upload.go 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. package server
  2. import (
  3. "context"
  4. "crypto/md5"
  5. "errors"
  6. "fmt"
  7. "hash"
  8. "io"
  9. "log/slog"
  10. "math"
  11. "net/http"
  12. "net/url"
  13. "os"
  14. "strconv"
  15. "sync"
  16. "sync/atomic"
  17. "time"
  18. "golang.org/x/sync/errgroup"
  19. "github.com/ollama/ollama/api"
  20. "github.com/ollama/ollama/format"
  21. "github.com/ollama/ollama/types/model"
  22. )
  23. var blobUploadManager sync.Map
  24. type blobUpload struct {
  25. Layer
  26. Total int64
  27. Completed atomic.Int64
  28. Parts []blobUploadPart
  29. nextURL chan *url.URL
  30. context.CancelFunc
  31. file *os.File
  32. done bool
  33. err error
  34. references atomic.Int32
  35. }
  36. const (
  37. numUploadParts = 16
  38. minUploadPartSize int64 = 100 * format.MegaByte
  39. maxUploadPartSize int64 = 1000 * format.MegaByte
  40. )
  41. func (b *blobUpload) Prepare(ctx context.Context, requestURL *url.URL, opts *registryOptions) error {
  42. p, err := GetBlobsPath(b.Digest)
  43. if err != nil {
  44. return err
  45. }
  46. if b.From != "" {
  47. values := requestURL.Query()
  48. values.Add("mount", b.Digest)
  49. values.Add("from", ParseModelPath(b.From).GetNamespaceRepository())
  50. requestURL.RawQuery = values.Encode()
  51. }
  52. resp, err := makeRequestWithRetry(ctx, http.MethodPost, requestURL, nil, nil, opts)
  53. if err != nil {
  54. return err
  55. }
  56. defer resp.Body.Close()
  57. location := resp.Header.Get("Docker-Upload-Location")
  58. if location == "" {
  59. location = resp.Header.Get("Location")
  60. }
  61. fi, err := os.Stat(p)
  62. if err != nil {
  63. return err
  64. }
  65. b.Total = fi.Size()
  66. // http.StatusCreated indicates a blob has been mounted
  67. // ref: https://distribution.github.io/distribution/spec/api/#cross-repository-blob-mount
  68. if resp.StatusCode == http.StatusCreated {
  69. b.Completed.Store(b.Total)
  70. b.done = true
  71. return nil
  72. }
  73. size := b.Total / numUploadParts
  74. switch {
  75. case size < minUploadPartSize:
  76. size = minUploadPartSize
  77. case size > maxUploadPartSize:
  78. size = maxUploadPartSize
  79. }
  80. var offset int64
  81. for offset < fi.Size() {
  82. if offset+size > fi.Size() {
  83. size = fi.Size() - offset
  84. }
  85. // set part.N to the current number of parts
  86. b.Parts = append(b.Parts, blobUploadPart{N: len(b.Parts), Offset: offset, Size: size})
  87. offset += size
  88. }
  89. slog.Info("uploading blob", "digest", b.Digest, "size", format.HumanBytes(b.Total), "parts", len(b.Parts), "size per part", format.HumanBytes(b.Parts[0].Size))
  90. requestURL, err = url.Parse(location)
  91. if err != nil {
  92. return err
  93. }
  94. b.nextURL = make(chan *url.URL, 1)
  95. b.nextURL <- requestURL
  96. return nil
  97. }
  98. // Run uploads blob parts to the upstream. If the upstream supports redirection, parts will be uploaded
  99. // in parallel as defined by Prepare. Otherwise, parts will be uploaded serially. Run sets b.err on error.
  100. func (b *blobUpload) Run(ctx context.Context, opts *registryOptions) {
  101. defer blobUploadManager.Delete(b.Digest)
  102. ctx, b.CancelFunc = context.WithCancel(ctx)
  103. p, err := GetBlobsPath(b.Digest)
  104. if err != nil {
  105. b.err = err
  106. return
  107. }
  108. b.file, err = os.Open(p)
  109. if err != nil {
  110. b.err = err
  111. return
  112. }
  113. defer b.file.Close()
  114. g, inner := errgroup.WithContext(ctx)
  115. g.SetLimit(numUploadParts)
  116. for i := range b.Parts {
  117. part := &b.Parts[i]
  118. select {
  119. case <-inner.Done():
  120. case requestURL := <-b.nextURL:
  121. g.Go(func() error {
  122. var err error
  123. for try := range maxRetries {
  124. err = b.uploadPart(inner, http.MethodPatch, requestURL, part, opts)
  125. switch {
  126. case errors.Is(err, context.Canceled):
  127. return err
  128. case errors.Is(err, errMaxRetriesExceeded):
  129. return err
  130. case err != nil:
  131. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  132. slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
  133. time.Sleep(sleep)
  134. continue
  135. }
  136. return nil
  137. }
  138. return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
  139. })
  140. }
  141. }
  142. if err := g.Wait(); err != nil {
  143. b.err = err
  144. return
  145. }
  146. requestURL := <-b.nextURL
  147. // calculate md5 checksum and add it to the commit request
  148. md5sum := md5.New()
  149. for _, part := range b.Parts {
  150. md5sum.Write(part.Sum(nil))
  151. }
  152. values := requestURL.Query()
  153. values.Add("digest", b.Digest)
  154. values.Add("etag", fmt.Sprintf("%x-%d", md5sum.Sum(nil), len(b.Parts)))
  155. requestURL.RawQuery = values.Encode()
  156. headers := make(http.Header)
  157. headers.Set("Content-Type", "application/octet-stream")
  158. headers.Set("Content-Length", "0")
  159. for try := range maxRetries {
  160. var resp *http.Response
  161. resp, err = makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, nil, opts)
  162. if errors.Is(err, context.Canceled) {
  163. break
  164. } else if err != nil {
  165. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  166. slog.Info(fmt.Sprintf("%s complete upload attempt %d failed: %v, retrying in %s", b.Digest[7:19], try, err, sleep))
  167. time.Sleep(sleep)
  168. continue
  169. }
  170. defer resp.Body.Close()
  171. break
  172. }
  173. b.err = err
  174. b.done = true
  175. }
  176. func (b *blobUpload) uploadPart(ctx context.Context, method string, requestURL *url.URL, part *blobUploadPart, opts *registryOptions) error {
  177. headers := make(http.Header)
  178. headers.Set("Content-Type", "application/octet-stream")
  179. headers.Set("Content-Length", strconv.FormatInt(part.Size, 10))
  180. if method == http.MethodPatch {
  181. headers.Set("X-Redirect-Uploads", "1")
  182. headers.Set("Content-Range", fmt.Sprintf("%d-%d", part.Offset, part.Offset+part.Size-1))
  183. }
  184. sr := io.NewSectionReader(b.file, part.Offset, part.Size)
  185. md5sum := md5.New()
  186. w := &progressWriter{blobUpload: b}
  187. resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sr, io.MultiWriter(w, md5sum)), opts)
  188. if err != nil {
  189. w.Rollback()
  190. return err
  191. }
  192. defer resp.Body.Close()
  193. location := resp.Header.Get("Docker-Upload-Location")
  194. if location == "" {
  195. location = resp.Header.Get("Location")
  196. }
  197. nextURL, err := url.Parse(location)
  198. if err != nil {
  199. w.Rollback()
  200. return err
  201. }
  202. switch {
  203. case resp.StatusCode == http.StatusTemporaryRedirect:
  204. w.Rollback()
  205. b.nextURL <- nextURL
  206. redirectURL, err := resp.Location()
  207. if err != nil {
  208. return err
  209. }
  210. // retry uploading to the redirect URL
  211. for try := range maxRetries {
  212. err = b.uploadPart(ctx, http.MethodPut, redirectURL, part, &registryOptions{})
  213. switch {
  214. case errors.Is(err, context.Canceled):
  215. return err
  216. case errors.Is(err, errMaxRetriesExceeded):
  217. return err
  218. case err != nil:
  219. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  220. slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
  221. time.Sleep(sleep)
  222. continue
  223. }
  224. return nil
  225. }
  226. return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
  227. case resp.StatusCode == http.StatusUnauthorized:
  228. w.Rollback()
  229. challenge := parseRegistryChallenge(resp.Header.Get("www-authenticate"))
  230. token, err := getAuthorizationToken(ctx, challenge)
  231. if err != nil {
  232. return err
  233. }
  234. opts.Token = token
  235. fallthrough
  236. case resp.StatusCode >= http.StatusBadRequest:
  237. w.Rollback()
  238. body, err := io.ReadAll(resp.Body)
  239. if err != nil {
  240. return err
  241. }
  242. return fmt.Errorf("http status %s: %s", resp.Status, body)
  243. }
  244. if method == http.MethodPatch {
  245. b.nextURL <- nextURL
  246. }
  247. part.Hash = md5sum
  248. return nil
  249. }
  250. func (b *blobUpload) acquire() {
  251. b.references.Add(1)
  252. }
  253. func (b *blobUpload) release() {
  254. if b.references.Add(-1) == 0 {
  255. b.CancelFunc()
  256. }
  257. }
  258. func (b *blobUpload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
  259. b.acquire()
  260. defer b.release()
  261. ticker := time.NewTicker(60 * time.Millisecond)
  262. for {
  263. select {
  264. case <-ticker.C:
  265. case <-ctx.Done():
  266. return ctx.Err()
  267. }
  268. fn(api.ProgressResponse{
  269. Status: fmt.Sprintf("pushing %s", b.Digest[7:19]),
  270. Digest: b.Digest,
  271. Total: b.Total,
  272. Completed: b.Completed.Load(),
  273. })
  274. if b.done || b.err != nil {
  275. return b.err
  276. }
  277. }
  278. }
  279. type blobUploadPart struct {
  280. // N is the part number
  281. N int
  282. Offset int64
  283. Size int64
  284. hash.Hash
  285. }
  286. type progressWriter struct {
  287. written int64
  288. *blobUpload
  289. }
  290. func (p *progressWriter) Write(b []byte) (n int, err error) {
  291. n = len(b)
  292. p.written += int64(n)
  293. p.Completed.Add(int64(n))
  294. return n, nil
  295. }
  296. func (p *progressWriter) Rollback() {
  297. p.Completed.Add(-p.written)
  298. p.written = 0
  299. }
  300. type uploadOptions struct {
  301. name model.Name
  302. baseURL *url.URL
  303. layer Layer
  304. regOpts *registryOptions
  305. fn func(api.ProgressResponse)
  306. }
  307. func uploadBlob(ctx context.Context, opts uploadOptions) error {
  308. requestURL := opts.baseURL.JoinPath("blobs", opts.layer.Digest)
  309. resp, err := makeRequestWithRetry(ctx, http.MethodHead, requestURL, nil, nil, opts.regOpts)
  310. switch {
  311. case errors.Is(err, os.ErrNotExist):
  312. case err != nil:
  313. return err
  314. default:
  315. defer resp.Body.Close()
  316. opts.fn(api.ProgressResponse{
  317. Status: fmt.Sprintf("pushing %s", opts.layer.Digest[7:19]),
  318. Digest: opts.layer.Digest,
  319. Total: opts.layer.Size,
  320. Completed: opts.layer.Size,
  321. })
  322. return nil
  323. }
  324. data, ok := blobUploadManager.LoadOrStore(opts.layer.Digest, &blobUpload{Layer: opts.layer})
  325. upload := data.(*blobUpload)
  326. if !ok {
  327. requestURL := opts.baseURL.JoinPath("blobs", "uploads")
  328. if err := upload.Prepare(ctx, requestURL, opts.regOpts); err != nil {
  329. blobUploadManager.Delete(opts.layer.Digest)
  330. return err
  331. }
  332. //nolint:contextcheck
  333. go upload.Run(context.Background(), opts.regOpts)
  334. }
  335. return upload.Wait(ctx, opts.fn)
  336. }