Jelajahi Sumber

change `github.com/jmorganca/ollama` to `github.com/ollama/ollama` (#3347)

Patrick Devine 1 tahun lalu
induk
melakukan
1b272d5bcd
54 mengubah file dengan 115 tambahan dan 115 penghapusan
  1. 1 1
      .golangci.yaml
  2. 25 25
      Dockerfile
  3. 3 3
      README.md
  4. 2 2
      api/client.go
  5. 2 2
      app/lifecycle/lifecycle.go
  6. 1 1
      app/lifecycle/server.go
  7. 2 2
      app/lifecycle/updater.go
  8. 1 1
      app/main.go
  9. 2 2
      app/tray/tray.go
  10. 1 1
      app/tray/tray_nonwindows.go
  11. 2 2
      app/tray/tray_windows.go
  12. 1 1
      app/tray/wintray/tray.go
  13. 6 6
      cmd/cmd.go
  14. 3 3
      cmd/interactive.go
  15. 1 1
      cmd/interactive_test.go
  16. 1 1
      cmd/start_darwin.go
  17. 1 1
      cmd/start_default.go
  18. 1 1
      cmd/start_windows.go
  19. 2 2
      convert/convert.go
  20. 1 1
      docs/faq.md
  21. 1 1
      docs/modelfile.md
  22. 1 1
      docs/openai.md
  23. 1 1
      examples/langchain-python-rag-privategpt/README.md
  24. 1 1
      examples/modelfile-mario/readme.md
  25. 1 1
      examples/python-json-datagenerator/readme.md
  26. 1 1
      examples/python-loganalysis/readme.md
  27. 1 1
      examples/typescript-functioncalling/readme.md
  28. 1 1
      go.mod
  29. 1 1
      integration/basic_test.go
  30. 1 1
      integration/llm_image_test.go
  31. 1 1
      integration/llm_test.go
  32. 2 2
      integration/utils_test.go
  33. 2 2
      llm/dyn_ext_server.go
  34. 1 1
      llm/gguf.go
  35. 1 1
      llm/llama.go
  36. 2 2
      llm/llm.go
  37. 1 1
      llm/payload_common.go
  38. 1 1
      llm/payload_test.go
  39. 1 1
      main.go
  40. 1 1
      openai/openai.go
  41. 1 1
      progress/bar.go
  42. 1 1
      scripts/build_darwin.sh
  43. 2 2
      scripts/build_docker.sh
  44. 3 3
      scripts/build_linux.sh
  45. 2 2
      scripts/build_windows.ps1
  46. 1 1
      scripts/push_docker.sh
  47. 2 2
      server/auth.go
  48. 2 2
      server/download.go
  49. 5 5
      server/images.go
  50. 1 1
      server/prompt.go
  51. 1 1
      server/prompt_test.go
  52. 6 6
      server/routes.go
  53. 4 4
      server/routes_test.go
  54. 2 2
      server/upload.go

+ 1 - 1
.golangci.yaml

@@ -24,4 +24,4 @@ linters-settings:
       - (*os.File).Seek
       - (*bufio.Writer).WriteString
       - (*github.com/spf13/pflag.FlagSet).Set
-      - (*github.com/jmorganca/ollama/llm.readSeekOffset).Seek
+      - (*github.com/ollama/ollama/llm.readSeekOffset).Seek

+ 25 - 25
Dockerfile

@@ -15,8 +15,8 @@ ARG CMAKE_VERSION
 COPY ./scripts/rh_linux_deps.sh /
 RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
 ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
-COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
-WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
+COPY --from=llm-code / /go/src/github.com/ollama/ollama/
+WORKDIR /go/src/github.com/ollama/ollama/llm/generate
 ARG CGO_CFLAGS
 RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
 
@@ -25,8 +25,8 @@ ARG CMAKE_VERSION
 COPY ./scripts/rh_linux_deps.sh /
 RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
 ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH
-COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
-WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
+COPY --from=llm-code / /go/src/github.com/ollama/ollama/
+WORKDIR /go/src/github.com/ollama/ollama/llm/generate
 ARG CGO_CFLAGS
 RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
 
@@ -36,18 +36,18 @@ COPY ./scripts/rh_linux_deps.sh /
 RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
 ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
 ENV LIBRARY_PATH /opt/amdgpu/lib64
-COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
-WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
+COPY --from=llm-code / /go/src/github.com/ollama/ollama/
+WORKDIR /go/src/github.com/ollama/ollama/llm/generate
 ARG CGO_CFLAGS
 ARG AMDGPU_TARGETS
 RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
 RUN mkdir /tmp/scratch && \
-    for dep in $(cat /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/x86_64/rocm*/lib/deps.txt) ; do \
+    for dep in $(cat /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/x86_64/rocm*/lib/deps.txt) ; do \
         cp ${dep} /tmp/scratch/ || exit 1 ; \
     done && \
     (cd /opt/rocm/lib && tar cf - rocblas/library) | (cd /tmp/scratch/ && tar xf - ) && \
-    mkdir -p /go/src/github.com/jmorganca/ollama/dist/deps/ && \
-    (cd /tmp/scratch/ && tar czvf /go/src/github.com/jmorganca/ollama/dist/deps/ollama-linux-amd64-rocm.tgz . )
+    mkdir -p /go/src/github.com/ollama/ollama/dist/deps/ && \
+    (cd /tmp/scratch/ && tar czvf /go/src/github.com/ollama/ollama/dist/deps/ollama-linux-amd64-rocm.tgz . )
 
 
 FROM --platform=linux/amd64 centos:7 AS cpu-builder-amd64
@@ -56,10 +56,10 @@ ARG GOLANG_VERSION
 COPY ./scripts/rh_linux_deps.sh /
 RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
 ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
-COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
+COPY --from=llm-code / /go/src/github.com/ollama/ollama/
 ARG OLLAMA_CUSTOM_CPU_DEFS
 ARG CGO_CFLAGS
-WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
+WORKDIR /go/src/github.com/ollama/ollama/llm/generate
 
 FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu-build-amd64
 RUN OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
@@ -74,8 +74,8 @@ ARG GOLANG_VERSION
 COPY ./scripts/rh_linux_deps.sh /
 RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
 ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
-COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
-WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
+COPY --from=llm-code / /go/src/github.com/ollama/ollama/
+WORKDIR /go/src/github.com/ollama/ollama/llm/generate
 # Note, we only build the "base" CPU variant on arm since avx/avx2 are x86 features
 ARG OLLAMA_CUSTOM_CPU_DEFS
 ARG CGO_CFLAGS
@@ -84,13 +84,13 @@ RUN OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
 # Intermediate stage used for ./scripts/build_linux.sh
 FROM --platform=linux/amd64 cpu-build-amd64 AS build-amd64
 ENV CGO_ENABLED 1
-WORKDIR /go/src/github.com/jmorganca/ollama
+WORKDIR /go/src/github.com/ollama/ollama
 COPY . .
-COPY --from=cpu_avx-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
-COPY --from=cpu_avx2-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
-COPY --from=cuda-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
-COPY --from=rocm-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
-COPY --from=rocm-build-amd64 /go/src/github.com/jmorganca/ollama/dist/deps/ ./dist/deps/
+COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
+COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
+COPY --from=cuda-build-amd64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
+COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
+COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/dist/deps/ ./dist/deps/
 ARG GOFLAGS
 ARG CGO_CFLAGS
 RUN go build -trimpath .
@@ -99,10 +99,10 @@ RUN go build -trimpath .
 FROM --platform=linux/arm64 cpu-build-arm64 AS build-arm64
 ENV CGO_ENABLED 1
 ARG GOLANG_VERSION
-WORKDIR /go/src/github.com/jmorganca/ollama
+WORKDIR /go/src/github.com/ollama/ollama
 COPY . .
-COPY --from=cuda-build-arm64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
-RUN mkdir -p /go/src/github.com/jmorganca/ollama/dist/deps/
+COPY --from=cuda-build-arm64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
+RUN mkdir -p /go/src/github.com/ollama/ollama/dist/deps/
 ARG GOFLAGS
 ARG CGO_CFLAGS
 RUN go build -trimpath .
@@ -110,15 +110,15 @@ RUN go build -trimpath .
 # Runtime stages
 FROM --platform=linux/amd64 ubuntu:22.04 as runtime-amd64
 RUN apt-get update && apt-get install -y ca-certificates
-COPY --from=build-amd64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
+COPY --from=build-amd64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
 FROM --platform=linux/arm64 ubuntu:22.04 as runtime-arm64
 RUN apt-get update && apt-get install -y ca-certificates
-COPY --from=build-arm64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
+COPY --from=build-arm64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
 
 # Radeon images are much larger so we keep it distinct from the CPU/CUDA image
 FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete as runtime-rocm
 RUN update-pciids
-COPY --from=build-amd64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
+COPY --from=build-amd64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
 EXPOSE 11434
 ENV OLLAMA_HOST 0.0.0.0
 

+ 3 - 3
README.md

@@ -1,5 +1,5 @@
 <div align="center">
-  <img alt="ollama" height="200px" src="https://github.com/jmorganca/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
+  <img alt="ollama" height="200px" src="https://github.com/ollama/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
 </div>
 
 # Ollama
@@ -22,7 +22,7 @@ Get up and running with large language models locally.
 curl -fsSL https://ollama.com/install.sh | sh
 ```
 
-[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
+[Manual install instructions](https://github.com/ollama/ollama/blob/main/docs/linux.md)
 
 ### Docker
 
@@ -213,7 +213,7 @@ Then build the binary:
 go build .
 ```
 
-More detailed instructions can be found in the [developer guide](https://github.com/jmorganca/ollama/blob/main/docs/development.md)
+More detailed instructions can be found in the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
 
 ### Running local builds
 

+ 2 - 2
api/client.go

@@ -15,8 +15,8 @@ import (
 	"runtime"
 	"strings"
 
-	"github.com/jmorganca/ollama/format"
-	"github.com/jmorganca/ollama/version"
+	"github.com/ollama/ollama/format"
+	"github.com/ollama/ollama/version"
 )
 
 type Client struct {

+ 2 - 2
app/lifecycle/lifecycle.go

@@ -9,8 +9,8 @@ import (
 	"os/signal"
 	"syscall"
 
-	"github.com/jmorganca/ollama/app/store"
-	"github.com/jmorganca/ollama/app/tray"
+	"github.com/ollama/ollama/app/store"
+	"github.com/ollama/ollama/app/tray"
 )
 
 func Run() {

+ 1 - 1
app/lifecycle/server.go

@@ -11,7 +11,7 @@ import (
 	"path/filepath"
 	"time"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 func getCLIFullPath(command string) string {

+ 2 - 2
app/lifecycle/updater.go

@@ -18,8 +18,8 @@ import (
 	"strings"
 	"time"
 
-	"github.com/jmorganca/ollama/auth"
-	"github.com/jmorganca/ollama/version"
+	"github.com/ollama/ollama/auth"
+	"github.com/ollama/ollama/version"
 )
 
 var (

+ 1 - 1
app/main.go

@@ -4,7 +4,7 @@ package main
 // go build -ldflags="-H windowsgui" .
 
 import (
-	"github.com/jmorganca/ollama/app/lifecycle"
+	"github.com/ollama/ollama/app/lifecycle"
 )
 
 func main() {

+ 2 - 2
app/tray/tray.go

@@ -4,8 +4,8 @@ import (
 	"fmt"
 	"runtime"
 
-	"github.com/jmorganca/ollama/app/assets"
-	"github.com/jmorganca/ollama/app/tray/commontray"
+	"github.com/ollama/ollama/app/assets"
+	"github.com/ollama/ollama/app/tray/commontray"
 )
 
 func NewTray() (commontray.OllamaTray, error) {

+ 1 - 1
app/tray/tray_nonwindows.go

@@ -5,7 +5,7 @@ package tray
 import (
 	"fmt"
 
-	"github.com/jmorganca/ollama/app/tray/commontray"
+	"github.com/ollama/ollama/app/tray/commontray"
 )
 
 func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {

+ 2 - 2
app/tray/tray_windows.go

@@ -1,8 +1,8 @@
 package tray
 
 import (
-	"github.com/jmorganca/ollama/app/tray/commontray"
-	"github.com/jmorganca/ollama/app/tray/wintray"
+	"github.com/ollama/ollama/app/tray/commontray"
+	"github.com/ollama/ollama/app/tray/wintray"
 )
 
 func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {

+ 1 - 1
app/tray/wintray/tray.go

@@ -13,7 +13,7 @@ import (
 	"sync"
 	"unsafe"
 
-	"github.com/jmorganca/ollama/app/tray/commontray"
+	"github.com/ollama/ollama/app/tray/commontray"
 	"golang.org/x/sys/windows"
 )
 

+ 6 - 6
cmd/cmd.go

@@ -30,12 +30,12 @@ import (
 	"golang.org/x/exp/slices"
 	"golang.org/x/term"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/format"
-	"github.com/jmorganca/ollama/parser"
-	"github.com/jmorganca/ollama/progress"
-	"github.com/jmorganca/ollama/server"
-	"github.com/jmorganca/ollama/version"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/format"
+	"github.com/ollama/ollama/parser"
+	"github.com/ollama/ollama/progress"
+	"github.com/ollama/ollama/server"
+	"github.com/ollama/ollama/version"
 )
 
 func CreateHandler(cmd *cobra.Command, args []string) error {

+ 3 - 3
cmd/interactive.go

@@ -14,9 +14,9 @@ import (
 	"github.com/spf13/cobra"
 	"golang.org/x/exp/slices"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/progress"
-	"github.com/jmorganca/ollama/readline"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/progress"
+	"github.com/ollama/ollama/readline"
 )
 
 type MultilineState int

+ 1 - 1
cmd/interactive_test.go

@@ -7,7 +7,7 @@ import (
 
 	"github.com/stretchr/testify/assert"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 func TestExtractFilenames(t *testing.T) {

+ 1 - 1
cmd/start_darwin.go

@@ -7,7 +7,7 @@ import (
 	"os/exec"
 	"strings"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 func startApp(ctx context.Context, client *api.Client) error {

+ 1 - 1
cmd/start_default.go

@@ -6,7 +6,7 @@ import (
 	"context"
 	"fmt"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 func startApp(ctx context.Context, client *api.Client) error {

+ 1 - 1
cmd/start_windows.go

@@ -10,7 +10,7 @@ import (
 	"strings"
 	"syscall"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 func startApp(ctx context.Context, client *api.Client) error {

+ 2 - 2
convert/convert.go

@@ -16,8 +16,8 @@ import (
 	"github.com/mitchellh/mapstructure"
 	"google.golang.org/protobuf/proto"
 
-	"github.com/jmorganca/ollama/convert/sentencepiece"
-	"github.com/jmorganca/ollama/llm"
+	"github.com/ollama/ollama/convert/sentencepiece"
+	"github.com/ollama/ollama/llm"
 )
 
 type Params struct {

+ 1 - 1
docs/faq.md

@@ -154,7 +154,7 @@ No. Ollama runs locally, and conversation data does not leave your machine.
 
 ## How can I use Ollama in Visual Studio Code?
 
-There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/jmorganca/ollama#extensions--plugins) at the bottom of the main repository readme.
+There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
 
 ## How do I use Ollama behind a proxy?
 

+ 1 - 1
docs/modelfile.md

@@ -113,7 +113,7 @@ FROM llama2
 ```
 
 A list of available base models:
-<https://github.com/jmorganca/ollama#model-library>
+<https://github.com/ollama/ollama#model-library>
 
 #### Build from a `bin` file
 

+ 1 - 1
docs/openai.md

@@ -1,6 +1,6 @@
 # OpenAI compatibility
 
-> **Note:** OpenAI compatibility is experimental and is subject to major adjustments including breaking changes. For fully-featured access to the Ollama API, see the Ollama [Python library](https://github.com/ollama/ollama-python), [JavaScript library](https://github.com/ollama/ollama-js) and [REST API](https://github.com/jmorganca/ollama/blob/main/docs/api.md).
+> **Note:** OpenAI compatibility is experimental and is subject to major adjustments including breaking changes. For fully-featured access to the Ollama API, see the Ollama [Python library](https://github.com/ollama/ollama-python), [JavaScript library](https://github.com/ollama/ollama-js) and [REST API](https://github.com/ollama/ollama/blob/main/docs/api.md).
 
 Ollama provides experimental compatibility with parts of the [OpenAI API](https://platform.openai.com/docs/api-reference) to help connect existing applications to Ollama.
 

+ 1 - 1
examples/langchain-python-rag-privategpt/README.md

@@ -1,6 +1,6 @@
 # PrivateGPT with Llama 2 uncensored
 
-https://github.com/jmorganca/ollama/assets/3325447/20cf8ec6-ff25-42c6-bdd8-9be594e3ce1b
+https://github.com/ollama/ollama/assets/3325447/20cf8ec6-ff25-42c6-bdd8-9be594e3ce1b
 
 > Note: this example is a slightly modified version of PrivateGPT using models such as Llama 2 Uncensored. All credit for PrivateGPT goes to Iván Martínez who is the creator of it, and you can find his GitHub repo [here](https://github.com/imartinez/privateGPT).
 

+ 1 - 1
examples/modelfile-mario/readme.md

@@ -28,7 +28,7 @@ You are Mario from Super Mario Bros, acting as an assistant.
 What if you want to change its behaviour?
 
 - Try changing the prompt
-- Try changing the parameters [Docs](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md)
+- Try changing the parameters [Docs](https://github.com/ollama/ollama/blob/main/docs/modelfile.md)
 - Try changing the model (e.g. An uncensored model by `FROM wizard-vicuna` this is the wizard-vicuna uncensored model )
 
 Once the changes are made,

+ 1 - 1
examples/python-json-datagenerator/readme.md

@@ -1,6 +1,6 @@
 # JSON Output Example
 
-![llmjson 2023-11-10 15_31_31](https://github.com/jmorganca/ollama/assets/633681/e599d986-9b4a-4118-81a4-4cfe7e22da25)
+![llmjson 2023-11-10 15_31_31](https://github.com/ollama/ollama/assets/633681/e599d986-9b4a-4118-81a4-4cfe7e22da25)
 
 There are two python scripts in this example. `randomaddresses.py` generates random addresses from different countries. `predefinedschema.py` sets a template for the model to fill in.
 

+ 1 - 1
examples/python-loganalysis/readme.md

@@ -1,6 +1,6 @@
 # Log Analysis example
 
-![loganalyzer 2023-11-10 08_53_29](https://github.com/jmorganca/ollama/assets/633681/ad30f1fc-321f-4953-8914-e30e24db9921)
+![loganalyzer 2023-11-10 08_53_29](https://github.com/ollama/ollama/assets/633681/ad30f1fc-321f-4953-8914-e30e24db9921)
 
 This example shows one possible way to create a log file analyzer. It uses the model **mattw/loganalyzer** which is based on **codebooga**, a 34b parameter model.
 

+ 1 - 1
examples/typescript-functioncalling/readme.md

@@ -1,6 +1,6 @@
 # Function calling
 
-![function calling 2023-11-16 16_12_58](https://github.com/jmorganca/ollama/assets/633681/a0acc247-9746-45ab-b325-b65dfbbee4fb)
+![function calling 2023-11-16 16_12_58](https://github.com/ollama/ollama/assets/633681/a0acc247-9746-45ab-b325-b65dfbbee4fb)
 
 One of the features added to some models is 'function calling'. It's a bit of a confusing name. It's understandable if you think that means the model can call functions, but that's not what it means. Function calling simply means that the output of the model is formatted in JSON, using a preconfigured schema, and uses the expected types. Then your code can use the output of the model and call functions with it. Using the JSON format in Ollama, you can use any model for function calling. 
 

+ 1 - 1
go.mod

@@ -1,4 +1,4 @@
-module github.com/jmorganca/ollama
+module github.com/ollama/ollama
 
 go 1.22
 

+ 1 - 1
integration/basic_test.go

@@ -8,7 +8,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 func TestOrcaMiniBlueSky(t *testing.T) {

+ 1 - 1
integration/llm_image_test.go

@@ -9,7 +9,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 	"github.com/stretchr/testify/require"
 )
 

+ 1 - 1
integration/llm_test.go

@@ -9,7 +9,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 // TODO - this would ideally be in the llm package, but that would require some refactoring of interfaces in the server

+ 2 - 2
integration/utils_test.go

@@ -21,8 +21,8 @@ import (
 	"testing"
 	"time"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/app/lifecycle"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/app/lifecycle"
 	"github.com/stretchr/testify/assert"
 )
 

+ 2 - 2
llm/dyn_ext_server.go

@@ -33,8 +33,8 @@ import (
 	"time"
 	"unsafe"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/gpu"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/gpu"
 )
 
 type dynExtServer struct {

+ 1 - 1
llm/gguf.go

@@ -15,7 +15,7 @@ import (
 	"github.com/pdevine/tensor/native"
 	"github.com/x448/float16"
 
-	"github.com/jmorganca/ollama/format"
+	"github.com/ollama/ollama/format"
 )
 
 type ContainerGGUF struct {

+ 1 - 1
llm/llama.go

@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"time"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 const jsonGrammar = `

+ 2 - 2
llm/llm.go

@@ -8,8 +8,8 @@ import (
 	"runtime"
 	"slices"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/gpu"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/gpu"
 )
 
 type LLM interface {

+ 1 - 1
llm/payload_common.go

@@ -16,7 +16,7 @@ import (
 	"golang.org/x/exp/slices"
 	"golang.org/x/sync/errgroup"
 
-	"github.com/jmorganca/ollama/gpu"
+	"github.com/ollama/ollama/gpu"
 )
 
 // Libraries names may contain an optional variant separated by '_'

+ 1 - 1
llm/payload_test.go

@@ -3,7 +3,7 @@ package llm
 import (
 	"testing"
 
-	"github.com/jmorganca/ollama/gpu"
+	"github.com/ollama/ollama/gpu"
 	"github.com/stretchr/testify/assert"
 )
 

+ 1 - 1
main.go

@@ -3,7 +3,7 @@ package main
 import (
 	"context"
 
-	"github.com/jmorganca/ollama/cmd"
+	"github.com/ollama/ollama/cmd"
 	"github.com/spf13/cobra"
 )
 

+ 1 - 1
openai/openai.go

@@ -11,7 +11,7 @@ import (
 	"time"
 
 	"github.com/gin-gonic/gin"
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 type Error struct {

+ 1 - 1
progress/bar.go

@@ -6,7 +6,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/jmorganca/ollama/format"
+	"github.com/ollama/ollama/format"
 	"golang.org/x/term"
 )
 

+ 1 - 1
scripts/build_darwin.sh

@@ -3,7 +3,7 @@
 set -e
 
 export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")}
-export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version=$VERSION\" \"-X=github.com/jmorganca/ollama/server.mode=release\"'"
+export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
 
 mkdir -p dist
 

+ 2 - 2
scripts/build_docker.sh

@@ -3,7 +3,7 @@
 set -eu
 
 export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")}
-export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version=$VERSION\" \"-X=github.com/jmorganca/ollama/server.mode=release\"'"
+export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
 
 # We use 2 different image repositories to handle combining architecture images into multiarch manifest
 # (The ROCm image is x86 only and is not a multiarch manifest)
@@ -74,4 +74,4 @@ if [ -z "${OLLAMA_SKIP_MANIFEST_CREATE}" ]; then
         echo "  ${ARCH_IMAGE_REPO}:$VERSION-arm64"
         echo "  ${ARCH_IMAGE_REPO}:$VERSION-rocm"
     fi
-fi
+fi

+ 3 - 3
scripts/build_linux.sh

@@ -3,7 +3,7 @@
 set -eu
 
 export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")}
-export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version=$VERSION\" \"-X=github.com/jmorganca/ollama/server.mode=release\"'"
+export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
 
 BUILD_ARCH=${BUILD_ARCH:-"amd64 arm64"}
 export AMDGPU_TARGETS=${AMDGPU_TARGETS:=""}
@@ -21,10 +21,10 @@ for TARGETARCH in ${BUILD_ARCH}; do
         -t builder:$TARGETARCH \
         .
     docker create --platform linux/$TARGETARCH --name builder-$TARGETARCH builder:$TARGETARCH
-    docker cp builder-$TARGETARCH:/go/src/github.com/jmorganca/ollama/ollama ./dist/ollama-linux-$TARGETARCH
+    docker cp builder-$TARGETARCH:/go/src/github.com/ollama/ollama/ollama ./dist/ollama-linux-$TARGETARCH
 
     if [ "$TARGETARCH" = "amd64" ]; then
-        docker cp builder-$TARGETARCH:/go/src/github.com/jmorganca/ollama/dist/deps/ ./dist/
+        docker cp builder-$TARGETARCH:/go/src/github.com/ollama/ollama/dist/deps/ ./dist/
     fi
 
     docker rm builder-$TARGETARCH

+ 2 - 2
scripts/build_windows.ps1

@@ -74,7 +74,7 @@ function buildOllama() {
     } else {
         write-host "Skipping generate step with OLLAMA_SKIP_GENERATE set"
     }
-    & go build -trimpath -ldflags "-s -w -X=github.com/jmorganca/ollama/version.Version=$script:VERSION -X=github.com/jmorganca/ollama/server.mode=release" .
+    & go build -trimpath -ldflags "-s -w -X=github.com/ollama/ollama/version.Version=$script:VERSION -X=github.com/ollama/ollama/server.mode=release" .
     if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
     if ("${env:KEY_CONTAINER}") {
         & "${script:SignTool}" sign /v /fd sha256 /t http://timestamp.digicert.com /f "${script:OLLAMA_CERT}" `
@@ -89,7 +89,7 @@ function buildApp() {
     write-host "Building Ollama App"
     cd "${script:SRC_DIR}\app"
     & windres -l 0 -o ollama.syso ollama.rc
-    & go build -trimpath -ldflags "-s -w -H windowsgui -X=github.com/jmorganca/ollama/version.Version=$script:VERSION -X=github.com/jmorganca/ollama/server.mode=release" .
+    & go build -trimpath -ldflags "-s -w -H windowsgui -X=github.com/ollama/ollama/version.Version=$script:VERSION -X=github.com/ollama/ollama/server.mode=release" .
     if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
     if ("${env:KEY_CONTAINER}") {
         & "${script:SignTool}" sign /v /fd sha256 /t http://timestamp.digicert.com /f "${script:OLLAMA_CERT}" `

+ 1 - 1
scripts/push_docker.sh

@@ -3,7 +3,7 @@
 set -eu
 
 export VERSION=${VERSION:-0.0.0}
-export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version=$VERSION\" \"-X=github.com/jmorganca/ollama/server.mode=release\"'"
+export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
 
 docker build \
     --push \

+ 2 - 2
server/auth.go

@@ -15,8 +15,8 @@ import (
 	"strings"
 	"time"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/auth"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/auth"
 )
 
 type registryChallenge struct {

+ 2 - 2
server/download.go

@@ -21,8 +21,8 @@ import (
 
 	"golang.org/x/sync/errgroup"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/format"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/format"
 )
 
 const maxRetries = 6

+ 5 - 5
server/images.go

@@ -24,11 +24,11 @@ import (
 
 	"golang.org/x/exp/slices"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/convert"
-	"github.com/jmorganca/ollama/llm"
-	"github.com/jmorganca/ollama/parser"
-	"github.com/jmorganca/ollama/version"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/convert"
+	"github.com/ollama/ollama/llm"
+	"github.com/ollama/ollama/parser"
+	"github.com/ollama/ollama/version"
 )
 
 type registryOptions struct {

+ 1 - 1
server/prompt.go

@@ -7,7 +7,7 @@ import (
 	"text/template"
 	"text/template/parse"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 // isResponseNode checks if the node contains .Response

+ 1 - 1
server/prompt_test.go

@@ -4,7 +4,7 @@ import (
 	"strings"
 	"testing"
 
-	"github.com/jmorganca/ollama/api"
+	"github.com/ollama/ollama/api"
 )
 
 func TestPrompt(t *testing.T) {

+ 6 - 6
server/routes.go

@@ -27,12 +27,12 @@ import (
 	"github.com/gin-gonic/gin"
 	"golang.org/x/exp/slices"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/gpu"
-	"github.com/jmorganca/ollama/llm"
-	"github.com/jmorganca/ollama/openai"
-	"github.com/jmorganca/ollama/parser"
-	"github.com/jmorganca/ollama/version"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/gpu"
+	"github.com/ollama/ollama/llm"
+	"github.com/ollama/ollama/openai"
+	"github.com/ollama/ollama/parser"
+	"github.com/ollama/ollama/version"
 )
 
 var mode string = gin.DebugMode

+ 4 - 4
server/routes_test.go

@@ -15,10 +15,10 @@ import (
 
 	"github.com/stretchr/testify/assert"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/llm"
-	"github.com/jmorganca/ollama/parser"
-	"github.com/jmorganca/ollama/version"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/llm"
+	"github.com/ollama/ollama/parser"
+	"github.com/ollama/ollama/version"
 )
 
 func Test_Routes(t *testing.T) {

+ 2 - 2
server/upload.go

@@ -16,8 +16,8 @@ import (
 	"sync/atomic"
 	"time"
 
-	"github.com/jmorganca/ollama/api"
-	"github.com/jmorganca/ollama/format"
+	"github.com/ollama/ollama/api"
+	"github.com/ollama/ollama/format"
 	"golang.org/x/sync/errgroup"
 )