download.go 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. package server
  2. import (
  3. "context"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "log/slog"
  9. "math"
  10. "net/http"
  11. "net/url"
  12. "os"
  13. "path/filepath"
  14. "strconv"
  15. "strings"
  16. "sync"
  17. "sync/atomic"
  18. "syscall"
  19. "time"
  20. "golang.org/x/sync/errgroup"
  21. "golang.org/x/sync/semaphore"
  22. "github.com/jmorganca/ollama/api"
  23. "github.com/jmorganca/ollama/format"
  24. )
  25. const maxRetries = 6
  26. var errMaxRetriesExceeded = errors.New("max retries exceeded")
  27. var errPartStalled = errors.New("part stalled")
  28. var blobDownloadManager sync.Map
  29. type blobDownload struct {
  30. Name string
  31. Digest string
  32. Total int64
  33. Completed atomic.Int64
  34. Parts []*blobDownloadPart
  35. context.CancelFunc
  36. done bool
  37. err error
  38. references atomic.Int32
  39. }
  40. type blobDownloadPart struct {
  41. N int
  42. Offset int64
  43. Size int64
  44. Completed int64
  45. lastUpdated time.Time
  46. *blobDownload `json:"-"`
  47. }
  48. const (
  49. numDownloadParts = 64
  50. minDownloadPartSize int64 = 100 * format.MegaByte
  51. maxDownloadPartSize int64 = 1000 * format.MegaByte
  52. )
  53. func (p *blobDownloadPart) Name() string {
  54. return strings.Join([]string{
  55. p.blobDownload.Name, "partial", strconv.Itoa(p.N),
  56. }, "-")
  57. }
  58. func (p *blobDownloadPart) StartsAt() int64 {
  59. return p.Offset + p.Completed
  60. }
  61. func (p *blobDownloadPart) StopsAt() int64 {
  62. return p.Offset + p.Size
  63. }
  64. func (p *blobDownloadPart) Write(b []byte) (n int, err error) {
  65. n = len(b)
  66. p.blobDownload.Completed.Add(int64(n))
  67. p.lastUpdated = time.Now()
  68. return n, nil
  69. }
  70. func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *registryOptions) error {
  71. partFilePaths, err := filepath.Glob(b.Name + "-partial-*")
  72. if err != nil {
  73. return err
  74. }
  75. for _, partFilePath := range partFilePaths {
  76. part, err := b.readPart(partFilePath)
  77. if err != nil {
  78. return err
  79. }
  80. b.Total += part.Size
  81. b.Completed.Add(part.Completed)
  82. b.Parts = append(b.Parts, part)
  83. }
  84. if len(b.Parts) == 0 {
  85. resp, err := makeRequestWithRetry(ctx, http.MethodHead, requestURL, nil, nil, opts)
  86. if err != nil {
  87. return err
  88. }
  89. defer resp.Body.Close()
  90. b.Total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
  91. size := b.Total / numDownloadParts
  92. switch {
  93. case size < minDownloadPartSize:
  94. size = minDownloadPartSize
  95. case size > maxDownloadPartSize:
  96. size = maxDownloadPartSize
  97. }
  98. var offset int64
  99. for offset < b.Total {
  100. if offset+size > b.Total {
  101. size = b.Total - offset
  102. }
  103. if err := b.newPart(offset, size); err != nil {
  104. return err
  105. }
  106. offset += size
  107. }
  108. }
  109. slog.Info(fmt.Sprintf("downloading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size)))
  110. return nil
  111. }
  112. func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *registryOptions) {
  113. defer blobDownloadManager.Delete(b.Digest)
  114. ctx, b.CancelFunc = context.WithCancel(ctx)
  115. file, err := os.OpenFile(b.Name+"-partial", os.O_CREATE|os.O_RDWR, 0o644)
  116. if err != nil {
  117. b.err = err
  118. return
  119. }
  120. defer file.Close()
  121. _ = file.Truncate(b.Total)
  122. g, inner := NewLimitGroup(ctx, numDownloadParts)
  123. go func() {
  124. ticker := time.NewTicker(time.Second)
  125. var n int64 = 1
  126. var maxDelta float64
  127. var buckets []int64
  128. for {
  129. select {
  130. case <-ticker.C:
  131. buckets = append(buckets, b.Completed.Load())
  132. if len(buckets) < 2 {
  133. continue
  134. } else if len(buckets) > 10 {
  135. buckets = buckets[1:]
  136. }
  137. delta := float64((buckets[len(buckets)-1] - buckets[0])) / float64(len(buckets))
  138. slog.Debug(fmt.Sprintf("delta: %s/s max_delta: %s/s", format.HumanBytes(int64(delta)), format.HumanBytes(int64(maxDelta))))
  139. if delta > maxDelta*1.5 {
  140. maxDelta = delta
  141. g.SetLimit(n)
  142. n++
  143. }
  144. case <-ctx.Done():
  145. return
  146. }
  147. }
  148. }()
  149. for i := range b.Parts {
  150. part := b.Parts[i]
  151. if part.Completed == part.Size {
  152. continue
  153. }
  154. g.Go(func() error {
  155. var err error
  156. for try := 0; try < maxRetries; try++ {
  157. w := io.NewOffsetWriter(file, part.StartsAt())
  158. err = b.downloadChunk(inner, requestURL, w, part, opts)
  159. switch {
  160. case errors.Is(err, context.Canceled), errors.Is(err, syscall.ENOSPC):
  161. // return immediately if the context is canceled or the device is out of space
  162. return err
  163. case errors.Is(err, errPartStalled):
  164. try--
  165. continue
  166. case err != nil:
  167. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  168. slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
  169. time.Sleep(sleep)
  170. continue
  171. default:
  172. return nil
  173. }
  174. }
  175. return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
  176. })
  177. }
  178. if err := g.Wait(); err != nil {
  179. b.err = err
  180. return
  181. }
  182. // explicitly close the file so we can rename it
  183. if err := file.Close(); err != nil {
  184. b.err = err
  185. return
  186. }
  187. for i := range b.Parts {
  188. if err := os.Remove(file.Name() + "-" + strconv.Itoa(i)); err != nil {
  189. b.err = err
  190. return
  191. }
  192. }
  193. if err := os.Rename(file.Name(), b.Name); err != nil {
  194. b.err = err
  195. return
  196. }
  197. b.done = true
  198. return
  199. }
  200. func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, w io.Writer, part *blobDownloadPart, opts *registryOptions) error {
  201. g, ctx := errgroup.WithContext(ctx)
  202. g.Go(func() error {
  203. headers := make(http.Header)
  204. headers.Set("Range", fmt.Sprintf("bytes=%d-%d", part.StartsAt(), part.StopsAt()-1))
  205. resp, err := makeRequestWithRetry(ctx, http.MethodGet, requestURL, headers, nil, opts)
  206. if err != nil {
  207. return err
  208. }
  209. defer resp.Body.Close()
  210. n, err := io.Copy(w, io.TeeReader(resp.Body, part))
  211. if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.ErrUnexpectedEOF) {
  212. // rollback progress
  213. b.Completed.Add(-n)
  214. return err
  215. }
  216. part.Completed += n
  217. if err := b.writePart(part.Name(), part); err != nil {
  218. return err
  219. }
  220. // return nil or context.Canceled or UnexpectedEOF (resumable)
  221. return err
  222. })
  223. g.Go(func() error {
  224. ticker := time.NewTicker(time.Second)
  225. for {
  226. select {
  227. case <-ticker.C:
  228. if part.Completed >= part.Size {
  229. return nil
  230. }
  231. if !part.lastUpdated.IsZero() && time.Since(part.lastUpdated) > 5*time.Second {
  232. slog.Info(fmt.Sprintf("%s part %d stalled; retrying", b.Digest[7:19], part.N))
  233. // reset last updated
  234. part.lastUpdated = time.Time{}
  235. return errPartStalled
  236. }
  237. case <-ctx.Done():
  238. return ctx.Err()
  239. }
  240. }
  241. })
  242. return g.Wait()
  243. }
  244. func (b *blobDownload) newPart(offset, size int64) error {
  245. part := blobDownloadPart{blobDownload: b, Offset: offset, Size: size, N: len(b.Parts)}
  246. if err := b.writePart(part.Name(), &part); err != nil {
  247. return err
  248. }
  249. b.Parts = append(b.Parts, &part)
  250. return nil
  251. }
  252. func (b *blobDownload) readPart(partName string) (*blobDownloadPart, error) {
  253. var part blobDownloadPart
  254. partFile, err := os.Open(partName)
  255. if err != nil {
  256. return nil, err
  257. }
  258. defer partFile.Close()
  259. if err := json.NewDecoder(partFile).Decode(&part); err != nil {
  260. return nil, err
  261. }
  262. part.blobDownload = b
  263. return &part, nil
  264. }
  265. func (b *blobDownload) writePart(partName string, part *blobDownloadPart) error {
  266. partFile, err := os.OpenFile(partName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
  267. if err != nil {
  268. return err
  269. }
  270. defer partFile.Close()
  271. return json.NewEncoder(partFile).Encode(part)
  272. }
  273. func (b *blobDownload) acquire() {
  274. b.references.Add(1)
  275. }
  276. func (b *blobDownload) release() {
  277. if b.references.Add(-1) == 0 {
  278. b.CancelFunc()
  279. }
  280. }
  281. func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
  282. b.acquire()
  283. defer b.release()
  284. ticker := time.NewTicker(60 * time.Millisecond)
  285. for {
  286. select {
  287. case <-ticker.C:
  288. fn(api.ProgressResponse{
  289. Status: fmt.Sprintf("pulling %s", b.Digest[7:19]),
  290. Digest: b.Digest,
  291. Total: b.Total,
  292. Completed: b.Completed.Load(),
  293. })
  294. if b.done || b.err != nil {
  295. return b.err
  296. }
  297. case <-ctx.Done():
  298. return ctx.Err()
  299. }
  300. }
  301. }
  302. type downloadOpts struct {
  303. mp ModelPath
  304. digest string
  305. regOpts *registryOptions
  306. fn func(api.ProgressResponse)
  307. }
  308. // downloadBlob downloads a blob from the registry and stores it in the blobs directory
  309. func downloadBlob(ctx context.Context, opts downloadOpts) error {
  310. fp, err := GetBlobsPath(opts.digest)
  311. if err != nil {
  312. return err
  313. }
  314. fi, err := os.Stat(fp)
  315. switch {
  316. case errors.Is(err, os.ErrNotExist):
  317. case err != nil:
  318. return err
  319. default:
  320. opts.fn(api.ProgressResponse{
  321. Status: fmt.Sprintf("pulling %s", opts.digest[7:19]),
  322. Digest: opts.digest,
  323. Total: fi.Size(),
  324. Completed: fi.Size(),
  325. })
  326. return nil
  327. }
  328. data, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest})
  329. download := data.(*blobDownload)
  330. if !ok {
  331. requestURL := opts.mp.BaseURL()
  332. requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest)
  333. if err := download.Prepare(ctx, requestURL, opts.regOpts); err != nil {
  334. blobDownloadManager.Delete(opts.digest)
  335. return err
  336. }
  337. // nolint: contextcheck
  338. go download.Run(context.Background(), requestURL, opts.regOpts)
  339. }
  340. return download.Wait(ctx, opts.fn)
  341. }
  342. type LimitGroup struct {
  343. *errgroup.Group
  344. context.Context
  345. Semaphore *semaphore.Weighted
  346. weight, max_weight int64
  347. }
  348. func NewLimitGroup(ctx context.Context, n int64) (*LimitGroup, context.Context) {
  349. g, ctx := errgroup.WithContext(ctx)
  350. return &LimitGroup{
  351. Group: g,
  352. Context: ctx,
  353. Semaphore: semaphore.NewWeighted(n),
  354. weight: n,
  355. max_weight: n,
  356. }, ctx
  357. }
  358. func (g *LimitGroup) Go(fn func() error) {
  359. weight := g.weight
  360. g.Semaphore.Acquire(g.Context, weight)
  361. if g.Context.Err() != nil {
  362. return
  363. }
  364. g.Group.Go(func() error {
  365. defer g.Semaphore.Release(weight)
  366. return fn()
  367. })
  368. }
  369. func (g *LimitGroup) SetLimit(n int64) {
  370. if n > 0 {
  371. slog.Debug(fmt.Sprintf("setting limit to %d", n))
  372. g.weight = g.max_weight / n
  373. }
  374. }