abdur0000 commited on
Commit
5662b4c
Β·
1 Parent(s): f6a7e32
Files changed (3) hide show
  1. Dockerfile +1 -1
  2. README.md +19 -9
  3. app.py β†’ main.py +5 -1
Dockerfile CHANGED
@@ -23,4 +23,4 @@ ENV WHISPER_DEVICE=cpu
23
  ENV WHISPER_COMPUTE_TYPE=int8
24
 
25
  # Run FastAPI
26
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
23
  ENV WHISPER_COMPUTE_TYPE=int8
24
 
25
  # Run FastAPI
26
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,10 +1,20 @@
1
- ---
2
- title: Video Processor Api
3
- emoji: πŸŒ–
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: docker
7
- pinned: false
8
- ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🎧 Video Processor API (Whisper + Gemini)
 
 
 
 
 
 
 
2
 
3
+ FastAPI-based video processor that:
4
+ - Extracts audio from video using FFmpeg
5
+ - Transcribes with Faster Whisper
6
+ - Summarizes using Google Gemini API
7
+ - Returns transcript, summary, and subtitles
8
+
9
+ ### πŸ”‘ Environment Variables
10
+ | Name | Description |
11
+ |------|--------------|
12
+ | `GEMINI_API_KEY` | Your Google Generative AI API key |
13
+ | `WHISPER_MODEL` | Whisper model size (e.g., tiny, base, small) |
14
+ | `WHISPER_DEVICE` | cpu or cuda |
15
+ | `MAX_UPLOAD_MB` | Max upload file size |
16
+
17
+ ### πŸš€ Run Locally
18
+ ```bash
19
+ pip install -r requirements.txt
20
+ python app.py
app.py β†’ main.py RENAMED
@@ -35,7 +35,7 @@ MAX_UPLOAD_MB = int(os.getenv("MAX_UPLOAD_MB", "200"))
35
  WHISPER_MODEL_SIZE = os.getenv("WHISPER_MODEL", "tiny")
36
  WHISPER_DEVICE = os.getenv("WHISPER_DEVICE", "cpu") # or "cuda" if GPU available
37
  WHISPER_COMPUTE_TYPE = os.getenv("WHISPER_COMPUTE_TYPE", "int8")
38
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "AIzaSyD1EEbH_ybbRh_hemkxMyuaerIaLfXHWlQ")
39
 
40
  # Global variables
41
  running_jobs = 0
@@ -276,6 +276,10 @@ async def health_check():
276
  "memory_usage": psutil.Process().memory_info().rss // 1024 // 1024
277
  }
278
 
 
 
 
 
279
  @app.get("/metrics")
280
  async def metrics():
281
  return {
 
35
  WHISPER_MODEL_SIZE = os.getenv("WHISPER_MODEL", "tiny")
36
  WHISPER_DEVICE = os.getenv("WHISPER_DEVICE", "cpu") # or "cuda" if GPU available
37
  WHISPER_COMPUTE_TYPE = os.getenv("WHISPER_COMPUTE_TYPE", "int8")
38
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "")
39
 
40
  # Global variables
41
  running_jobs = 0
 
276
  "memory_usage": psutil.Process().memory_info().rss // 1024 // 1024
277
  }
278
 
279
+ @app.get("/")
280
+ def root():
281
+ return JSONResponse({"message": "Video Processor API is running"})
282
+
283
  @app.get("/metrics")
284
  async def metrics():
285
  return {