upload.go 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. package server
  2. import (
  3. "context"
  4. "crypto/md5"
  5. "errors"
  6. "fmt"
  7. "hash"
  8. "io"
  9. "log/slog"
  10. "math"
  11. "net/http"
  12. "net/url"
  13. "os"
  14. "strconv"
  15. "sync"
  16. "sync/atomic"
  17. "time"
  18. "golang.org/x/sync/errgroup"
  19. "github.com/ollama/ollama/api"
  20. "github.com/ollama/ollama/format"
  21. )
  22. var blobUploadManager sync.Map
  23. type blobUpload struct {
  24. Layer
  25. Total int64
  26. Completed atomic.Int64
  27. Parts []blobUploadPart
  28. nextURL chan *url.URL
  29. context.CancelFunc
  30. file *os.File
  31. done bool
  32. err error
  33. references atomic.Int32
  34. }
  35. const (
  36. numUploadParts = 16
  37. minUploadPartSize int64 = 100 * format.MegaByte
  38. maxUploadPartSize int64 = 1000 * format.MegaByte
  39. )
  40. func (b *blobUpload) Prepare(ctx context.Context, requestURL *url.URL, opts *registryOptions) error {
  41. p, err := GetBlobsPath(b.Digest)
  42. if err != nil {
  43. return err
  44. }
  45. if b.From != "" {
  46. values := requestURL.Query()
  47. values.Add("mount", b.Digest)
  48. values.Add("from", ParseModelPath(b.From).GetNamespaceRepository())
  49. requestURL.RawQuery = values.Encode()
  50. }
  51. resp, err := makeRequestWithRetry(ctx, http.MethodPost, requestURL, nil, nil, opts)
  52. if err != nil {
  53. return err
  54. }
  55. defer resp.Body.Close()
  56. location := resp.Header.Get("Docker-Upload-Location")
  57. if location == "" {
  58. location = resp.Header.Get("Location")
  59. }
  60. fi, err := os.Stat(p)
  61. if err != nil {
  62. return err
  63. }
  64. b.Total = fi.Size()
  65. // http.StatusCreated indicates a blob has been mounted
  66. // ref: https://distribution.github.io/distribution/spec/api/#cross-repository-blob-mount
  67. if resp.StatusCode == http.StatusCreated {
  68. b.Completed.Store(b.Total)
  69. b.done = true
  70. return nil
  71. }
  72. size := b.Total / numUploadParts
  73. switch {
  74. case size < minUploadPartSize:
  75. size = minUploadPartSize
  76. case size > maxUploadPartSize:
  77. size = maxUploadPartSize
  78. }
  79. var offset int64
  80. for offset < fi.Size() {
  81. if offset+size > fi.Size() {
  82. size = fi.Size() - offset
  83. }
  84. // set part.N to the current number of parts
  85. b.Parts = append(b.Parts, blobUploadPart{N: len(b.Parts), Offset: offset, Size: size})
  86. offset += size
  87. }
  88. if len(b.Parts) > 0 {
  89. slog.Info(fmt.Sprintf("uploading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size)))
  90. }
  91. requestURL, err = url.Parse(location)
  92. if err != nil {
  93. return err
  94. }
  95. b.nextURL = make(chan *url.URL, 1)
  96. b.nextURL <- requestURL
  97. return nil
  98. }
  99. // Run uploads blob parts to the upstream. If the upstream supports redirection, parts will be uploaded
  100. // in parallel as defined by Prepare. Otherwise, parts will be uploaded serially. Run sets b.err on error.
  101. func (b *blobUpload) Run(ctx context.Context, opts *registryOptions) {
  102. defer blobUploadManager.Delete(b.Digest)
  103. ctx, b.CancelFunc = context.WithCancel(ctx)
  104. p, err := GetBlobsPath(b.Digest)
  105. if err != nil {
  106. b.err = err
  107. return
  108. }
  109. b.file, err = os.Open(p)
  110. if err != nil {
  111. b.err = err
  112. return
  113. }
  114. defer b.file.Close()
  115. g, inner := errgroup.WithContext(ctx)
  116. g.SetLimit(numUploadParts)
  117. for i := range b.Parts {
  118. part := &b.Parts[i]
  119. select {
  120. case <-inner.Done():
  121. case requestURL := <-b.nextURL:
  122. g.Go(func() error {
  123. var err error
  124. for try := range maxRetries {
  125. err = b.uploadPart(inner, http.MethodPatch, requestURL, part, opts)
  126. switch {
  127. case errors.Is(err, context.Canceled):
  128. return err
  129. case errors.Is(err, errMaxRetriesExceeded):
  130. return err
  131. case err != nil:
  132. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  133. slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
  134. time.Sleep(sleep)
  135. continue
  136. }
  137. return nil
  138. }
  139. return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
  140. })
  141. }
  142. }
  143. if err := g.Wait(); err != nil {
  144. b.err = err
  145. return
  146. }
  147. requestURL := <-b.nextURL
  148. // calculate md5 checksum and add it to the commit request
  149. md5sum := md5.New()
  150. for _, part := range b.Parts {
  151. md5sum.Write(part.Sum(nil))
  152. }
  153. values := requestURL.Query()
  154. values.Add("digest", b.Digest)
  155. values.Add("etag", fmt.Sprintf("%x-%d", md5sum.Sum(nil), len(b.Parts)))
  156. requestURL.RawQuery = values.Encode()
  157. headers := make(http.Header)
  158. headers.Set("Content-Type", "application/octet-stream")
  159. headers.Set("Content-Length", "0")
  160. for try := range maxRetries {
  161. var resp *http.Response
  162. resp, err = makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, nil, opts)
  163. if errors.Is(err, context.Canceled) {
  164. break
  165. } else if err != nil {
  166. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  167. slog.Info(fmt.Sprintf("%s complete upload attempt %d failed: %v, retrying in %s", b.Digest[7:19], try, err, sleep))
  168. time.Sleep(sleep)
  169. continue
  170. }
  171. defer resp.Body.Close()
  172. break
  173. }
  174. b.err = err
  175. b.done = true
  176. }
  177. func (b *blobUpload) uploadPart(ctx context.Context, method string, requestURL *url.URL, part *blobUploadPart, opts *registryOptions) error {
  178. headers := make(http.Header)
  179. headers.Set("Content-Type", "application/octet-stream")
  180. headers.Set("Content-Length", strconv.FormatInt(part.Size, 10))
  181. if method == http.MethodPatch {
  182. headers.Set("X-Redirect-Uploads", "1")
  183. headers.Set("Content-Range", fmt.Sprintf("%d-%d", part.Offset, part.Offset+part.Size-1))
  184. }
  185. sr := io.NewSectionReader(b.file, part.Offset, part.Size)
  186. md5sum := md5.New()
  187. w := &progressWriter{blobUpload: b}
  188. resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sr, io.MultiWriter(w, md5sum)), opts)
  189. if err != nil {
  190. w.Rollback()
  191. return err
  192. }
  193. defer resp.Body.Close()
  194. location := resp.Header.Get("Docker-Upload-Location")
  195. if location == "" {
  196. location = resp.Header.Get("Location")
  197. }
  198. nextURL, err := url.Parse(location)
  199. if err != nil {
  200. w.Rollback()
  201. return err
  202. }
  203. switch {
  204. case resp.StatusCode == http.StatusTemporaryRedirect:
  205. w.Rollback()
  206. b.nextURL <- nextURL
  207. redirectURL, err := resp.Location()
  208. if err != nil {
  209. return err
  210. }
  211. // retry uploading to the redirect URL
  212. for try := range maxRetries {
  213. err = b.uploadPart(ctx, http.MethodPut, redirectURL, part, &registryOptions{})
  214. switch {
  215. case errors.Is(err, context.Canceled):
  216. return err
  217. case errors.Is(err, errMaxRetriesExceeded):
  218. return err
  219. case err != nil:
  220. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  221. slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
  222. time.Sleep(sleep)
  223. continue
  224. }
  225. return nil
  226. }
  227. return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
  228. case resp.StatusCode == http.StatusUnauthorized:
  229. w.Rollback()
  230. challenge := parseRegistryChallenge(resp.Header.Get("www-authenticate"))
  231. token, err := getAuthorizationToken(ctx, challenge)
  232. if err != nil {
  233. return err
  234. }
  235. opts.Token = token
  236. fallthrough
  237. case resp.StatusCode >= http.StatusBadRequest:
  238. w.Rollback()
  239. body, err := io.ReadAll(resp.Body)
  240. if err != nil {
  241. return err
  242. }
  243. return fmt.Errorf("http status %s: %s", resp.Status, body)
  244. }
  245. if method == http.MethodPatch {
  246. b.nextURL <- nextURL
  247. }
  248. part.Hash = md5sum
  249. return nil
  250. }
  251. func (b *blobUpload) acquire() {
  252. b.references.Add(1)
  253. }
  254. func (b *blobUpload) release() {
  255. if b.references.Add(-1) == 0 {
  256. b.CancelFunc()
  257. }
  258. }
  259. func (b *blobUpload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
  260. b.acquire()
  261. defer b.release()
  262. ticker := time.NewTicker(60 * time.Millisecond)
  263. for {
  264. select {
  265. case <-ticker.C:
  266. case <-ctx.Done():
  267. return ctx.Err()
  268. }
  269. fn(api.ProgressResponse{
  270. Status: fmt.Sprintf("pushing %s", b.Digest[7:19]),
  271. Digest: b.Digest,
  272. Total: b.Total,
  273. Completed: b.Completed.Load(),
  274. })
  275. if b.done || b.err != nil {
  276. return b.err
  277. }
  278. }
  279. }
  280. type blobUploadPart struct {
  281. // N is the part number
  282. N int
  283. Offset int64
  284. Size int64
  285. hash.Hash
  286. }
  287. type progressWriter struct {
  288. written int64
  289. *blobUpload
  290. }
  291. func (p *progressWriter) Write(b []byte) (n int, err error) {
  292. n = len(b)
  293. p.written += int64(n)
  294. p.Completed.Add(int64(n))
  295. return n, nil
  296. }
  297. func (p *progressWriter) Rollback() {
  298. p.Completed.Add(-p.written)
  299. p.written = 0
  300. }
  301. func uploadBlob(ctx context.Context, mp ModelPath, layer Layer, opts *registryOptions, fn func(api.ProgressResponse)) error {
  302. requestURL := mp.BaseURL()
  303. requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs", layer.Digest)
  304. resp, err := makeRequestWithRetry(ctx, http.MethodHead, requestURL, nil, nil, opts)
  305. switch {
  306. case errors.Is(err, os.ErrNotExist):
  307. case err != nil:
  308. return err
  309. default:
  310. defer resp.Body.Close()
  311. fn(api.ProgressResponse{
  312. Status: fmt.Sprintf("pushing %s", layer.Digest[7:19]),
  313. Digest: layer.Digest,
  314. Total: layer.Size,
  315. Completed: layer.Size,
  316. })
  317. return nil
  318. }
  319. data, ok := blobUploadManager.LoadOrStore(layer.Digest, &blobUpload{Layer: layer})
  320. upload := data.(*blobUpload)
  321. if !ok {
  322. requestURL := mp.BaseURL()
  323. requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs/uploads/")
  324. if err := upload.Prepare(ctx, requestURL, opts); err != nil {
  325. blobUploadManager.Delete(layer.Digest)
  326. return err
  327. }
  328. //nolint:contextcheck
  329. go upload.Run(context.Background(), opts)
  330. }
  331. return upload.Wait(ctx, fn)
  332. }