|
@@ -13,7 +13,7 @@ ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
|
|
|
ARG USE_RERANKING_MODEL=""
|
|
|
|
|
|
# Tiktoken encoding name; models to use can be found at https://huggingface.co/models?library=tiktoken
|
|
|
-ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
|
|
|
+ARG USE_TIKTOKEN_ENCODING_MODEL_NAME="cl100k_base"
|
|
|
|
|
|
ARG BUILD_HASH=dev-build
|
|
|
# Override at your own risk - non-root configurations are untested
|
|
@@ -77,7 +77,7 @@ ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
|
|
|
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
|
|
|
|
|
|
## Tiktoken model settings ##
|
|
|
-ENV TIKTOKEN_ENCODING_NAME="$USE_TIKTOKEN_ENCODING_NAME" \
|
|
|
+ENV TIKTOKEN_ENCODING_MODEL_NAME="$USE_TIKTOKEN_ENCODING_MODEL_NAME" \
|
|
|
TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken"
|
|
|
|
|
|
## Hugging Face download cache ##
|
|
@@ -139,13 +139,13 @@ RUN pip3 install uv && \
|
|
|
uv pip install --system -r requirements.txt --no-cache-dir && \
|
|
|
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
|
|
|
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
|
|
|
- python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
|
|
|
+ python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_MODEL_NAME'])"; \
|
|
|
else \
|
|
|
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
|
|
|
uv pip install --system -r requirements.txt --no-cache-dir && \
|
|
|
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
|
|
|
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
|
|
|
- python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
|
|
|
+ python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_MODEL_NAME'])"; \
|
|
|
fi; \
|
|
|
chown -R $UID:$GID /app/backend/data/
|
|
|
|