Преглед на файлове

Merge pull request #725 from jnkstr/no-internet-whisper

fix: no internet connection for whisper if you use docker
Timothy Jaeryang Baek преди 1 година
родител
ревизия
1def55cf09
променени са 3 файла, в които са добавени 13 реда и са изтрити 5 реда
  1. 6 0
      Dockerfile
  2. 4 4
      backend/apps/audio/main.py
  3. 3 1
      backend/config.py

+ 6 - 0
Dockerfile

@@ -30,6 +30,10 @@ ENV WEBUI_SECRET_KEY ""
 ENV SCARF_NO_ANALYTICS true
 ENV SCARF_NO_ANALYTICS true
 ENV DO_NOT_TRACK true
 ENV DO_NOT_TRACK true
 
 
+#Whisper TTS Settings
+ENV WHISPER_MODEL="base"
+ENV WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
+
 WORKDIR /app/backend
 WORKDIR /app/backend
 
 
 # install python dependencies
 # install python dependencies
@@ -45,6 +49,8 @@ RUN apt-get update \
     && rm -rf /var/lib/apt/lists/*
     && rm -rf /var/lib/apt/lists/*
 
 
 # RUN python -c "from sentence_transformers import SentenceTransformer; model = SentenceTransformer('all-MiniLM-L6-v2')"
 # RUN python -c "from sentence_transformers import SentenceTransformer; model = SentenceTransformer('all-MiniLM-L6-v2')"
+RUN python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"
+
 
 
 # copy embedding weight from build
 # copy embedding weight from build
 RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
 RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2

+ 4 - 4
backend/apps/audio/main.py

@@ -1,3 +1,4 @@
+import os
 from fastapi import (
 from fastapi import (
     FastAPI,
     FastAPI,
     Request,
     Request,
@@ -20,7 +21,7 @@ from utils.utils import (
 )
 )
 from utils.misc import calculate_sha256
 from utils.misc import calculate_sha256
 
 
-from config import CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL_NAME
+from config import CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL, WHISPER_MODEL_DIR
 
 
 app = FastAPI()
 app = FastAPI()
 app.add_middleware(
 app.add_middleware(
@@ -53,12 +54,11 @@ def transcribe(
             f.write(contents)
             f.write(contents)
             f.close()
             f.close()
 
 
-        model_name = WHISPER_MODEL_NAME
         model = WhisperModel(
         model = WhisperModel(
-            model_name,
+            WHISPER_MODEL,
             device="cpu",
             device="cpu",
             compute_type="int8",
             compute_type="int8",
-            download_root=f"{CACHE_DIR}/whisper/models",
+            download_root=WHISPER_MODEL_DIR,
         )
         )
 
 
         segments, info = model.transcribe(file_path, beam_size=5)
         segments, info = model.transcribe(file_path, beam_size=5)

+ 3 - 1
backend/config.py

@@ -139,4 +139,6 @@ CHUNK_OVERLAP = 100
 ####################################
 ####################################
 # Transcribe
 # Transcribe
 ####################################
 ####################################
-WHISPER_MODEL_NAME = "base"
+
+WHISPER_MODEL = os.getenv("WHISPER_MODEL", "base")
+WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models")