瀏覽代碼

Guard integration tests with a tag

This should help CI avoid running the integration test logic in a
container where it's not currently possible.
Daniel Hiltgen 1 年之前
父節點
當前提交
697bea6939
共有 4 個文件被更改,包括 15 次插入5 次删除
  1. 7 4
      scripts/setup_integration_tests.sh
  2. 2 0
      server/llm_image_test.go
  3. 2 0
      server/llm_test.go
  4. 4 1
      server/llm_utils_test.go

+ 7 - 4
scripts/setup_integration_tests.sh

@@ -2,6 +2,9 @@
 
 # This script sets up integration tests which run the full stack to verify
 # inference locally
+#
+# To run the relevant tests use
+# go test -tags=integration ./server
 set -e
 set -o pipefail
 
@@ -21,15 +24,15 @@ for model in ${TEST_MODELS[@]}; do
     echo "Pulling manifest for ${TEST_MODEL}:${TEST_MODEL_TAG}"
     curl -s --header "${ACCEPT_HEADER}" \
         -o ${OLLAMA_MODELS}/manifests/${REGISTRY}/${TEST_MODEL}/${TEST_MODEL_TAG} \
-        ${REGISTRY_SCHEME}://${REGISTRY}/v2/${TEST_MODEL}/manifests/${TEST_MODEL_TAG} 
+        ${REGISTRY_SCHEME}://${REGISTRY}/v2/${TEST_MODEL}/manifests/${TEST_MODEL_TAG}
 
     CFG_HASH=$(cat ${OLLAMA_MODELS}/manifests/${REGISTRY}/${TEST_MODEL}/${TEST_MODEL_TAG} | jq -r ".config.digest")
     echo "Pulling config blob ${CFG_HASH}"
     curl -L -C - --header "${ACCEPT_HEADER}" \
-            -o ${OLLAMA_MODELS}/blobs/${CFG_HASH} \
-            ${REGISTRY_SCHEME}://${REGISTRY}/v2/${TEST_MODEL}/blobs/${CFG_HASH}
+        -o ${OLLAMA_MODELS}/blobs/${CFG_HASH} \
+        ${REGISTRY_SCHEME}://${REGISTRY}/v2/${TEST_MODEL}/blobs/${CFG_HASH}
 
-    for LAYER in $(cat ${OLLAMA_MODELS}/manifests/${REGISTRY}/${TEST_MODEL}/${TEST_MODEL_TAG} | jq -r ".layers[].digest" ) ; do
+    for LAYER in $(cat ${OLLAMA_MODELS}/manifests/${REGISTRY}/${TEST_MODEL}/${TEST_MODEL_TAG} | jq -r ".layers[].digest"); do
         echo "Pulling blob ${LAYER}"
         curl -L -C - --header "${ACCEPT_HEADER}" \
             -o ${OLLAMA_MODELS}/blobs/${LAYER} \

+ 2 - 0
server/llm_image_test.go

@@ -1,3 +1,5 @@
+//go:build integration
+
 package server
 
 import (

+ 2 - 0
server/llm_test.go

@@ -1,3 +1,5 @@
+//go:build integration
+
 package server
 
 import (

+ 4 - 1
server/llm_utils_test.go

@@ -1,3 +1,5 @@
+//go:build integration
+
 package server
 
 import (
@@ -38,7 +40,7 @@ func PrepareModelForPrompts(t *testing.T, modelName string, opts api.Options) (*
 }
 
 func OneShotPromptResponse(t *testing.T, ctx context.Context, req api.GenerateRequest, model *Model, runner llm.LLM) string {
-	prompt, err := model.Prompt(PromptVars{
+	prompt, err := model.PreResponsePrompt(PromptVars{
 		System: req.System,
 		Prompt: req.Prompt,
 		First:  len(req.Context) == 0,
@@ -54,6 +56,7 @@ func OneShotPromptResponse(t *testing.T, ctx context.Context, req api.GenerateRe
 			success <- true
 		}
 	}
+
 	predictReq := llm.PredictOpts{
 		Prompt: prompt,
 		Format: req.Format,