download.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. package server
  2. import (
  3. "context"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "log/slog"
  9. "math"
  10. "net/http"
  11. "net/url"
  12. "os"
  13. "path/filepath"
  14. "strconv"
  15. "strings"
  16. "sync"
  17. "sync/atomic"
  18. "syscall"
  19. "time"
  20. "golang.org/x/sync/errgroup"
  21. "golang.org/x/sync/semaphore"
  22. "github.com/jmorganca/ollama/api"
  23. "github.com/jmorganca/ollama/format"
  24. )
  25. const maxRetries = 6
  26. var errMaxRetriesExceeded = errors.New("max retries exceeded")
  27. var errPartStalled = errors.New("part stalled")
  28. var blobDownloadManager sync.Map
  29. type blobDownload struct {
  30. Name string
  31. Digest string
  32. Total int64
  33. Completed atomic.Int64
  34. Parts []*blobDownloadPart
  35. context.CancelFunc
  36. done bool
  37. err error
  38. references atomic.Int32
  39. }
  40. type blobDownloadPart struct {
  41. N int
  42. Offset int64
  43. Size int64
  44. Completed int64
  45. lastUpdated time.Time
  46. *blobDownload `json:"-"`
  47. }
  48. const (
  49. numDownloadParts = 64
  50. minDownloadPartSize int64 = 100 * format.MegaByte
  51. maxDownloadPartSize int64 = 1000 * format.MegaByte
  52. )
  53. func (p *blobDownloadPart) Name() string {
  54. return strings.Join([]string{
  55. p.blobDownload.Name, "partial", strconv.Itoa(p.N),
  56. }, "-")
  57. }
  58. func (p *blobDownloadPart) StartsAt() int64 {
  59. return p.Offset + p.Completed
  60. }
  61. func (p *blobDownloadPart) StopsAt() int64 {
  62. return p.Offset + p.Size
  63. }
  64. func (p *blobDownloadPart) Write(b []byte) (n int, err error) {
  65. n = len(b)
  66. p.blobDownload.Completed.Add(int64(n))
  67. p.lastUpdated = time.Now()
  68. return n, nil
  69. }
  70. func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *registryOptions) error {
  71. partFilePaths, err := filepath.Glob(b.Name + "-partial-*")
  72. if err != nil {
  73. return err
  74. }
  75. for _, partFilePath := range partFilePaths {
  76. part, err := b.readPart(partFilePath)
  77. if err != nil {
  78. return err
  79. }
  80. b.Total += part.Size
  81. b.Completed.Add(part.Completed)
  82. b.Parts = append(b.Parts, part)
  83. }
  84. if len(b.Parts) == 0 {
  85. resp, err := makeRequestWithRetry(ctx, http.MethodHead, requestURL, nil, nil, opts)
  86. if err != nil {
  87. return err
  88. }
  89. defer resp.Body.Close()
  90. b.Total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
  91. size := b.Total / numDownloadParts
  92. switch {
  93. case size < minDownloadPartSize:
  94. size = minDownloadPartSize
  95. case size > maxDownloadPartSize:
  96. size = maxDownloadPartSize
  97. }
  98. var offset int64
  99. for offset < b.Total {
  100. if offset+size > b.Total {
  101. size = b.Total - offset
  102. }
  103. if err := b.newPart(offset, size); err != nil {
  104. return err
  105. }
  106. offset += size
  107. }
  108. }
  109. slog.Info(fmt.Sprintf("downloading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size)))
  110. return nil
  111. }
  112. func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *registryOptions) {
  113. defer blobDownloadManager.Delete(b.Digest)
  114. ctx, b.CancelFunc = context.WithCancel(ctx)
  115. file, err := os.OpenFile(b.Name+"-partial", os.O_CREATE|os.O_RDWR, 0o644)
  116. if err != nil {
  117. b.err = err
  118. return
  119. }
  120. defer file.Close()
  121. _ = file.Truncate(b.Total)
  122. var limit int64 = 2
  123. g, inner := NewLimitGroup(ctx, numDownloadParts, limit)
  124. go watchDelta(inner, g, &b.Completed, limit)
  125. for i := range b.Parts {
  126. part := b.Parts[i]
  127. if part.Completed == part.Size {
  128. continue
  129. }
  130. g.Go(inner, func() error {
  131. var err error
  132. for try := 0; try < maxRetries; try++ {
  133. w := io.NewOffsetWriter(file, part.StartsAt())
  134. err = b.downloadChunk(inner, requestURL, w, part, opts)
  135. switch {
  136. case errors.Is(err, context.Canceled), errors.Is(err, syscall.ENOSPC):
  137. // return immediately if the context is canceled or the device is out of space
  138. return err
  139. case errors.Is(err, errPartStalled):
  140. try--
  141. continue
  142. case err != nil:
  143. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  144. slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
  145. time.Sleep(sleep)
  146. continue
  147. default:
  148. return nil
  149. }
  150. }
  151. return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
  152. })
  153. }
  154. if err := g.Wait(); err != nil {
  155. b.err = err
  156. return
  157. }
  158. // explicitly close the file so we can rename it
  159. if err := file.Close(); err != nil {
  160. b.err = err
  161. return
  162. }
  163. for i := range b.Parts {
  164. if err := os.Remove(file.Name() + "-" + strconv.Itoa(i)); err != nil {
  165. b.err = err
  166. return
  167. }
  168. }
  169. if err := os.Rename(file.Name(), b.Name); err != nil {
  170. b.err = err
  171. return
  172. }
  173. b.done = true
  174. }
  175. func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, w io.Writer, part *blobDownloadPart, opts *registryOptions) error {
  176. g, ctx := errgroup.WithContext(ctx)
  177. g.Go(func() error {
  178. headers := make(http.Header)
  179. headers.Set("Range", fmt.Sprintf("bytes=%d-%d", part.StartsAt(), part.StopsAt()-1))
  180. resp, err := makeRequestWithRetry(ctx, http.MethodGet, requestURL, headers, nil, opts)
  181. if err != nil {
  182. return err
  183. }
  184. defer resp.Body.Close()
  185. n, err := io.Copy(w, io.TeeReader(resp.Body, part))
  186. if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.ErrUnexpectedEOF) {
  187. // rollback progress
  188. b.Completed.Add(-n)
  189. return err
  190. }
  191. part.Completed += n
  192. if err := b.writePart(part.Name(), part); err != nil {
  193. return err
  194. }
  195. // return nil or context.Canceled or UnexpectedEOF (resumable)
  196. return err
  197. })
  198. g.Go(func() error {
  199. ticker := time.NewTicker(time.Second)
  200. for {
  201. select {
  202. case <-ticker.C:
  203. if part.Completed >= part.Size {
  204. return nil
  205. }
  206. if !part.lastUpdated.IsZero() && time.Since(part.lastUpdated) > 5*time.Second {
  207. slog.Info(fmt.Sprintf("%s part %d stalled; retrying", b.Digest[7:19], part.N))
  208. // reset last updated
  209. part.lastUpdated = time.Time{}
  210. return errPartStalled
  211. }
  212. case <-ctx.Done():
  213. return ctx.Err()
  214. }
  215. }
  216. })
  217. return g.Wait()
  218. }
  219. func (b *blobDownload) newPart(offset, size int64) error {
  220. part := blobDownloadPart{blobDownload: b, Offset: offset, Size: size, N: len(b.Parts)}
  221. if err := b.writePart(part.Name(), &part); err != nil {
  222. return err
  223. }
  224. b.Parts = append(b.Parts, &part)
  225. return nil
  226. }
  227. func (b *blobDownload) readPart(partName string) (*blobDownloadPart, error) {
  228. var part blobDownloadPart
  229. partFile, err := os.Open(partName)
  230. if err != nil {
  231. return nil, err
  232. }
  233. defer partFile.Close()
  234. if err := json.NewDecoder(partFile).Decode(&part); err != nil {
  235. return nil, err
  236. }
  237. part.blobDownload = b
  238. return &part, nil
  239. }
  240. func (b *blobDownload) writePart(partName string, part *blobDownloadPart) error {
  241. partFile, err := os.OpenFile(partName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
  242. if err != nil {
  243. return err
  244. }
  245. defer partFile.Close()
  246. return json.NewEncoder(partFile).Encode(part)
  247. }
  248. func (b *blobDownload) acquire() {
  249. b.references.Add(1)
  250. }
  251. func (b *blobDownload) release() {
  252. if b.references.Add(-1) == 0 {
  253. b.CancelFunc()
  254. }
  255. }
  256. func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
  257. b.acquire()
  258. defer b.release()
  259. ticker := time.NewTicker(60 * time.Millisecond)
  260. for {
  261. select {
  262. case <-ticker.C:
  263. fn(api.ProgressResponse{
  264. Status: fmt.Sprintf("pulling %s", b.Digest[7:19]),
  265. Digest: b.Digest,
  266. Total: b.Total,
  267. Completed: b.Completed.Load(),
  268. })
  269. if b.done || b.err != nil {
  270. return b.err
  271. }
  272. case <-ctx.Done():
  273. return ctx.Err()
  274. }
  275. }
  276. }
  277. type downloadOpts struct {
  278. mp ModelPath
  279. digest string
  280. regOpts *registryOptions
  281. fn func(api.ProgressResponse)
  282. }
  283. // downloadBlob downloads a blob from the registry and stores it in the blobs directory
  284. func downloadBlob(ctx context.Context, opts downloadOpts) error {
  285. fp, err := GetBlobsPath(opts.digest)
  286. if err != nil {
  287. return err
  288. }
  289. fi, err := os.Stat(fp)
  290. switch {
  291. case errors.Is(err, os.ErrNotExist):
  292. case err != nil:
  293. return err
  294. default:
  295. opts.fn(api.ProgressResponse{
  296. Status: fmt.Sprintf("pulling %s", opts.digest[7:19]),
  297. Digest: opts.digest,
  298. Total: fi.Size(),
  299. Completed: fi.Size(),
  300. })
  301. return nil
  302. }
  303. data, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest})
  304. download := data.(*blobDownload)
  305. if !ok {
  306. requestURL := opts.mp.BaseURL()
  307. requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest)
  308. if err := download.Prepare(ctx, requestURL, opts.regOpts); err != nil {
  309. blobDownloadManager.Delete(opts.digest)
  310. return err
  311. }
  312. // nolint: contextcheck
  313. go download.Run(context.Background(), requestURL, opts.regOpts)
  314. }
  315. return download.Wait(ctx, opts.fn)
  316. }
  317. type LimitGroup struct {
  318. *errgroup.Group
  319. *semaphore.Weighted
  320. size, limit int64
  321. }
  322. func NewLimitGroup(ctx context.Context, size, limit int64) (*LimitGroup, context.Context) {
  323. g, ctx := errgroup.WithContext(ctx)
  324. return &LimitGroup{
  325. Group: g,
  326. Weighted: semaphore.NewWeighted(size),
  327. size: size,
  328. limit: limit,
  329. }, ctx
  330. }
  331. func (g *LimitGroup) Go(ctx context.Context, fn func() error) {
  332. var weight int64 = 1
  333. if g.limit > 0 {
  334. weight = g.size / g.limit
  335. }
  336. _ = g.Acquire(ctx, weight)
  337. if ctx.Err() != nil {
  338. return
  339. }
  340. g.Group.Go(func() error {
  341. defer g.Release(weight)
  342. return fn()
  343. })
  344. }
  345. func (g *LimitGroup) SetLimit(limit int64) {
  346. if limit > g.limit {
  347. g.limit = limit
  348. }
  349. }
  350. func watchDelta(ctx context.Context, g *LimitGroup, c *atomic.Int64, limit int64) {
  351. var maxDelta float64
  352. var buckets []int64
  353. // 5s ramp up period
  354. nextUpdate := time.Now().Add(5 * time.Second)
  355. ticker := time.NewTicker(time.Second)
  356. for {
  357. select {
  358. case <-ticker.C:
  359. buckets = append(buckets, c.Load())
  360. if len(buckets) < 2 {
  361. continue
  362. } else if len(buckets) > 10 {
  363. buckets = buckets[1:]
  364. }
  365. delta := float64((buckets[len(buckets)-1] - buckets[0])) / float64(len(buckets))
  366. slog.Debug("", "limit", limit, "delta", format.HumanBytes(int64(delta)), "max_delta", format.HumanBytes(int64(maxDelta)))
  367. if time.Now().Before(nextUpdate) {
  368. // quiet period; do not update ccy if recently updated
  369. continue
  370. } else if maxDelta > 0 {
  371. x := delta / maxDelta
  372. if x < 1.2 {
  373. continue
  374. }
  375. limit += int64(x)
  376. slog.Debug("setting", "limit", limit)
  377. g.SetLimit(limit)
  378. }
  379. // 3s cooldown period
  380. nextUpdate = time.Now().Add(3 * time.Second)
  381. maxDelta = delta
  382. case <-ctx.Done():
  383. return
  384. }
  385. }
  386. }