|
|
import base64 |
|
|
import os |
|
|
from os import path as osp |
|
|
from PIL import Image |
|
|
from io import BytesIO |
|
|
from io import StringIO, BytesIO |
|
|
import textwrap |
|
|
from typing import Iterator, TextIO, List, Dict, Any, Optional, Sequence, Union |
|
|
|
|
|
import glob |
|
|
from tqdm import tqdm |
|
|
from pytubefix import YouTube, Stream |
|
|
from youtube_transcript_api import YouTubeTranscriptApi |
|
|
from youtube_transcript_api.formatters import WebVTTFormatter |
|
|
from transformers import BridgeTowerProcessor, BridgeTowerForContrastiveLearning |
|
|
import torch |
|
|
import cv2 |
|
|
|
|
|
|
|
|
|
|
|
def encode_image(image_path_or_PIL_img): |
|
|
if isinstance(image_path_or_PIL_img, Image.Image): |
|
|
|
|
|
buffered = BytesIO() |
|
|
image_path_or_PIL_img.save(buffered, format="JPEG") |
|
|
return base64.b64encode(buffered.getvalue()).decode('utf-8') |
|
|
else: |
|
|
|
|
|
with open(image_path_or_PIL_img, "rb") as image_file: |
|
|
return base64.b64encode(image_file.read()).decode('utf-8') |
|
|
|
|
|
|
|
|
|
|
|
def isBase64(sb): |
|
|
try: |
|
|
if isinstance(sb, str): |
|
|
|
|
|
sb_bytes = bytes(sb, 'ascii') |
|
|
elif isinstance(sb, bytes): |
|
|
sb_bytes = sb |
|
|
else: |
|
|
raise ValueError("Argument must be string or bytes") |
|
|
return base64.b64encode(base64.b64decode(sb_bytes)) == sb_bytes |
|
|
except Exception: |
|
|
return False |
|
|
|
|
|
|
|
|
def create_dummy_image(size=(224, 224)): |
|
|
"""Creates a blank white image to be used as a dummy input when no real image is provided.""" |
|
|
return Image.new("RGB", size, (255, 255, 255)) |
|
|
|
|
|
def bt_embeddings(prompt, base64_image=None): |
|
|
|
|
|
processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") |
|
|
model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") |
|
|
|
|
|
if base64_image: |
|
|
if not isBase64(base64_image): |
|
|
raise TypeError("Image input must be in base64 encoding!") |
|
|
try: |
|
|
image_data = base64.b64decode(base64_image) |
|
|
image = Image.open(BytesIO(image_data)).convert("RGB") |
|
|
except Exception as e: |
|
|
raise ValueError("Invalid image data!") from e |
|
|
else: |
|
|
image = create_dummy_image() |
|
|
|
|
|
texts = [prompt] |
|
|
images = [image] |
|
|
|
|
|
inputs = processor(images=images, text=texts, padding=True, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
|
|
|
if base64_image: |
|
|
embeddings = outputs.cross_embeds |
|
|
else: |
|
|
embeddings = outputs.text_embeds |
|
|
|
|
|
return embeddings.squeeze().tolist() |
|
|
|
|
|
|
|
|
|
|
|
def maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA): |
|
|
|
|
|
dim = None |
|
|
(h, w) = image.shape[:2] |
|
|
|
|
|
|
|
|
if width is None and height is None: |
|
|
return image |
|
|
|
|
|
|
|
|
if width is None: |
|
|
|
|
|
r = height / float(h) |
|
|
dim = (int(w * r), height) |
|
|
|
|
|
else: |
|
|
|
|
|
r = width / float(w) |
|
|
dim = (width, int(h * r)) |
|
|
|
|
|
|
|
|
return cv2.resize(image, dim, interpolation=inter) |
|
|
|
|
|
|
|
|
def str2time(strtime): |
|
|
|
|
|
strtime = strtime.strip('"') |
|
|
|
|
|
hrs, mins, seconds = [float(c) for c in strtime.split(':')] |
|
|
|
|
|
total_seconds = hrs * 60**2 + mins * 60 + seconds |
|
|
total_miliseconds = total_seconds * 1000 |
|
|
return total_miliseconds |
|
|
|
|
|
|
|
|
|
|
|
def format_timestamp(seconds: float, always_include_hours: bool = False, fractionalSeperator: str = '.'): |
|
|
assert seconds >= 0, "non-negative timestamp expected" |
|
|
milliseconds = round(seconds * 1000.0) |
|
|
|
|
|
hours = milliseconds // 3_600_000 |
|
|
milliseconds -= hours * 3_600_000 |
|
|
|
|
|
minutes = milliseconds // 60_000 |
|
|
milliseconds -= minutes * 60_000 |
|
|
|
|
|
seconds = milliseconds // 1_000 |
|
|
milliseconds -= seconds * 1_000 |
|
|
|
|
|
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else "" |
|
|
return f"{hours_marker}{minutes:02d}:{seconds:02d}{fractionalSeperator}{milliseconds:03d}" |
|
|
|
|
|
|
|
|
def _processText(text: str, maxLineWidth=None): |
|
|
if (maxLineWidth is None or maxLineWidth < 0): |
|
|
return text |
|
|
|
|
|
lines = textwrap.wrap(text, width=maxLineWidth, tabsize=4) |
|
|
return '\n'.join(lines) |
|
|
|
|
|
|
|
|
def write_vtt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None): |
|
|
print("WEBVTT\n", file=file) |
|
|
for segment in transcript: |
|
|
text = _processText(segment['text'], maxLineWidth).replace('-->', '->') |
|
|
|
|
|
print( |
|
|
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n" |
|
|
f"{text}\n", |
|
|
file=file, |
|
|
flush=True, |
|
|
) |
|
|
|
|
|
|
|
|
def write_srt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None): |
|
|
""" |
|
|
Write a transcript to a file in SRT format. |
|
|
Example usage: |
|
|
from pathlib import Path |
|
|
from whisper.utils import write_srt |
|
|
result = transcribe(model, audio_path, temperature=temperature, **args) |
|
|
# save SRT |
|
|
audio_basename = Path(audio_path).stem |
|
|
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt: |
|
|
write_srt(result["segments"], file=srt) |
|
|
""" |
|
|
for i, segment in enumerate(transcript, start=1): |
|
|
text = _processText(segment['text'].strip(), maxLineWidth).replace('-->', '->') |
|
|
|
|
|
|
|
|
print( |
|
|
f"{i}\n" |
|
|
f"{format_timestamp(segment['start'], always_include_hours=True, fractionalSeperator=',')} --> " |
|
|
f"{format_timestamp(segment['end'], always_include_hours=True, fractionalSeperator=',')}\n" |
|
|
f"{text}\n", |
|
|
file=file, |
|
|
flush=True, |
|
|
) |
|
|
|
|
|
def getSubs(segments: Iterator[dict], format: str, maxLineWidth: int=-1) -> str: |
|
|
segmentStream = StringIO() |
|
|
|
|
|
if format == 'vtt': |
|
|
write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth) |
|
|
elif format == 'srt': |
|
|
write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth) |
|
|
else: |
|
|
raise Exception("Unknown format " + format) |
|
|
|
|
|
segmentStream.seek(0) |
|
|
return segmentStream.read() |
|
|
|
|
|
def download_video(video_url, path='/tmp/'): |
|
|
print(f'Getting video information for {video_url}') |
|
|
if not video_url.startswith('http'): |
|
|
return os.path.join(path, video_url) |
|
|
|
|
|
filepath = glob.glob(os.path.join(path, '*.mp4')) |
|
|
if len(filepath) > 0: |
|
|
return filepath[0] |
|
|
|
|
|
def progress_callback(stream: Stream, data_chunk: bytes, bytes_remaining: int) -> None: |
|
|
pbar.update(len(data_chunk)) |
|
|
|
|
|
yt = YouTube(video_url, on_progress_callback=progress_callback) |
|
|
stream = yt.streams.filter(progressive=True, file_extension='mp4', res='720p').desc().first() |
|
|
if stream is None: |
|
|
stream = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first() |
|
|
if not os.path.exists(path): |
|
|
os.makedirs(path) |
|
|
filepath = os.path.join(path, stream.default_filename) |
|
|
if not os.path.exists(filepath): |
|
|
print('Downloading video from YouTube...') |
|
|
pbar = tqdm(desc='Downloading video from YouTube', total=stream.filesize, unit="bytes") |
|
|
stream.download(path) |
|
|
pbar.close() |
|
|
return filepath |
|
|
|
|
|
def get_video_id_from_url(video_url): |
|
|
""" |
|
|
Examples: |
|
|
- http://youtu.be/SA2iWivDJiE |
|
|
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu |
|
|
- http://www.youtube.com/embed/SA2iWivDJiE |
|
|
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US |
|
|
""" |
|
|
import urllib.parse |
|
|
url = urllib.parse.urlparse(video_url) |
|
|
if url.hostname == 'youtu.be': |
|
|
return url.path[1:] |
|
|
if url.hostname in ('www.youtube.com', 'youtube.com'): |
|
|
if url.path == '/watch': |
|
|
p = urllib.parse.parse_qs(url.query) |
|
|
return p['v'][0] |
|
|
if url.path[:7] == '/embed/': |
|
|
return url.path.split('/')[2] |
|
|
if url.path[:3] == '/v/': |
|
|
return url.path.split('/')[2] |
|
|
|
|
|
return video_url |
|
|
|
|
|
|
|
|
def get_transcript_vtt(video_url, path='/tmp'): |
|
|
video_id = get_video_id_from_url(video_url) |
|
|
filepath = os.path.join(path,'captions.vtt') |
|
|
if os.path.exists(filepath): |
|
|
return filepath |
|
|
|
|
|
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=['en-GB', 'en']) |
|
|
formatter = WebVTTFormatter() |
|
|
webvtt_formatted = formatter.format_transcript(transcript) |
|
|
|
|
|
with open(filepath, 'w', encoding='utf-8') as webvtt_file: |
|
|
webvtt_file.write(webvtt_formatted) |
|
|
webvtt_file.close() |
|
|
|
|
|
return filepath |
|
|
|
|
|
|
|
|
def download_youtube_subtitle(video_url, path='./shared_data/videos/video1'): |
|
|
video_id = video_url.split('v=')[-1] |
|
|
output_path = os.path.join(path, f"{video_id}.en.vtt") |
|
|
|
|
|
if os.path.exists(output_path): |
|
|
return output_path |
|
|
|
|
|
os.makedirs(path, exist_ok=True) |
|
|
|
|
|
ydl_opts = { |
|
|
'skip_download': True, |
|
|
'writesubtitles': True, |
|
|
'subtitleslangs': ['en'], |
|
|
'subtitlesformat': 'vtt', |
|
|
'outtmpl': os.path.join(path, '%(id)s.%(ext)s'), |
|
|
} |
|
|
|
|
|
with yt_dlp.YoutubeDL(ydl_opts) as ydl: |
|
|
ydl.download([video_url]) |
|
|
|
|
|
return output_path |