more cleanup

This commit is contained in:
Tanishq Dubey
2023-02-16 19:34:13 -05:00
parent 12bba40f0a
commit b49b05c4d3
3 changed files with 34 additions and 20 deletions

View File

@ -1,4 +1,7 @@
import whisper
import json
from pathlib import Path
import tempfile
import numpy as np
import structlog
@ -19,10 +22,22 @@ class TextGlob:
class SentimentEditor:
def __init__(self, video_path, audio_path, params):
self.logger = structlog.get_logger("sentiment")
self.logger.info("loading whisper model", size=params["model_size"])
self.model = whisper.load_model(params["model_size"])
self.logger.info("transcribing audio", path=audio_path)
self.result = self.model.transcribe(audio_path)
tempdir = tempfile.gettempdir()
dest_location = f"{tempdir}/{params['temp_file_name']}-{params['model_size']}-sentiment.json"
if not Path(dest_location).is_file():
self.logger.info("loading whisper model", size=params["model_size"])
self.model = whisper.load_model(params["model_size"])
self.logger.info("transcribing audio", path=audio_path)
self.result = self.model.transcribe(audio_path)
with open(dest_location, 'w') as fp:
json.dump(self.result, fp)
else:
self.logger.info("cached transcription found", path=dest_location)
with open(dest_location, 'r') as f:
self.result = json.load(f)
self.segments = []
for segment in self.result['segments']:
self.segments.append(TextGlob(segment['start'], segment['end'], segment['text'], 0))
@ -42,7 +57,7 @@ class SentimentEditor:
def edit(self, large_window, small_window, params):
end_time = self.segments[-1].stop
window_factor = len(self.sentiments) / end_time
long_ma = np_moving_average(self.squared_subsample, large_window * window_factor)
short_ma = np_moving_average(self.squared_subsample, small_window * window_factor)
long_ma = np_moving_average(self.sentiments, large_window)
short_ma = np_moving_average(self.sentiments, small_window)
highlights = find_moving_average_highlights(short_ma, long_ma, 1.0 / window_factor)
return highlights, large_window, small_window

View File

@ -9,5 +9,8 @@ def get_subclips(source_video_path, moments):
def render_moments(moments, input_video_path, output_path):
clips, vid = get_subclips(input_video_path, moments)
to_render = mp.concatenate_videoclips(clips, logger=None)
to_render = mp.concatenate_videoclips(clips)
to_render.write_videofile(output_path, logger=None)
def filter_moments(moments, min_length, max_length):
return [m for m in moments if m.get_duration() > min_length and m.get_duration() < max_length]