瀏覽代碼

Merge pull request #626 from jmorganca/mxyng/concurrent-downloads

parallel chunked downloads
Michael Yang 1 年之前
父節點
當前提交
38dc2f79bc
共有 5 個文件被更改,包括 258 次插入175 次删除
  1. 1 0
      go.mod
  2. 2 0
      go.sum
  3. 247 160
      server/download.go
  4. 2 14
      server/images.go
  5. 6 1
      server/upload.go

+ 1 - 0
go.mod

@@ -10,6 +10,7 @@ require (
 	github.com/olekukonko/tablewriter v0.0.5
 	github.com/pdevine/readline v1.5.2
 	github.com/spf13/cobra v1.7.0
+	golang.org/x/sync v0.3.0
 )
 
 require github.com/rivo/uniseg v0.2.0 // indirect

+ 2 - 0
go.sum

@@ -125,6 +125,8 @@ golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMe
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
 golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

+ 247 - 160
server/download.go

@@ -2,236 +2,323 @@ package server
 
 import (
 	"context"
+	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
 	"log"
 	"net/http"
+	"net/url"
 	"os"
 	"path/filepath"
 	"strconv"
+	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
+	"golang.org/x/sync/errgroup"
+
 	"github.com/jmorganca/ollama/api"
 )
 
-type FileDownload struct {
-	Digest    string
-	FilePath  string
+var blobDownloadManager sync.Map
+
+type blobDownload struct {
+	Name   string
+	Digest string
+
 	Total     int64
+	Completed atomic.Int64
+
+	Parts []*blobDownloadPart
+
+	context.CancelFunc
+	references atomic.Int32
+}
+
+type blobDownloadPart struct {
+	N         int
+	Offset    int64
+	Size      int64
 	Completed int64
+
+	*blobDownload `json:"-"`
 }
 
-var inProgress sync.Map // map of digests currently being downloaded to their current download progress
+func (p *blobDownloadPart) Name() string {
+	return strings.Join([]string{
+		p.blobDownload.Name, "partial", strconv.Itoa(p.N),
+	}, "-")
+}
 
-type downloadOpts struct {
-	mp      ModelPath
-	digest  string
-	regOpts *RegistryOptions
-	fn      func(api.ProgressResponse)
-	retry   int // track the number of retries on this download
+func (p *blobDownloadPart) StartsAt() int64 {
+	return p.Offset + p.Completed
 }
 
-const maxRetry = 3
+func (p *blobDownloadPart) StopsAt() int64 {
+	return p.Offset + p.Size
+}
 
-// downloadBlob downloads a blob from the registry and stores it in the blobs directory
-func downloadBlob(ctx context.Context, opts downloadOpts) error {
-	fp, err := GetBlobsPath(opts.digest)
+func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) error {
+	partFilePaths, err := filepath.Glob(b.Name + "-partial-*")
 	if err != nil {
 		return err
 	}
 
-	if fi, _ := os.Stat(fp); fi != nil {
-		// we already have the file, so return
-		opts.fn(api.ProgressResponse{
-			Digest:    opts.digest,
-			Total:     fi.Size(),
-			Completed: fi.Size(),
-		})
+	for _, partFilePath := range partFilePaths {
+		part, err := b.readPart(partFilePath)
+		if err != nil {
+			return err
+		}
 
-		return nil
+		b.Total += part.Size
+		b.Completed.Add(part.Completed)
+		b.Parts = append(b.Parts, part)
 	}
 
-	fileDownload := &FileDownload{
-		Digest:    opts.digest,
-		FilePath:  fp,
-		Total:     1, // dummy value to indicate that we don't know the total size yet
-		Completed: 0,
-	}
+	if len(b.Parts) == 0 {
+		resp, err := makeRequest(ctx, "HEAD", requestURL, nil, nil, opts)
+		if err != nil {
+			return err
+		}
+		defer resp.Body.Close()
 
-	_, downloading := inProgress.LoadOrStore(opts.digest, fileDownload)
-	if downloading {
-		// this is another client requesting the server to download the same blob concurrently
-		return monitorDownload(ctx, opts, fileDownload)
-	}
-	if err := doDownload(ctx, opts, fileDownload); err != nil {
-		if errors.Is(err, errDownload) && opts.retry < maxRetry {
-			opts.retry++
-			log.Print(err)
-			log.Printf("retrying download of %s", opts.digest)
-			return downloadBlob(ctx, opts)
+		if resp.StatusCode >= http.StatusBadRequest {
+			body, _ := io.ReadAll(resp.Body)
+			return fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
 		}
-		return err
-	}
-	return nil
-}
 
-var downloadMu sync.Mutex // mutex to check to resume a download while monitoring
-
-// monitorDownload monitors the download progress of a blob and resumes it if it is interrupted
-func monitorDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error {
-	tick := time.NewTicker(time.Second)
-	for range tick.C {
-		done, resume, err := func() (bool, bool, error) {
-			downloadMu.Lock()
-			defer downloadMu.Unlock()
-			val, downloading := inProgress.Load(f.Digest)
-			if !downloading {
-				// check once again if the download is complete
-				if fi, _ := os.Stat(f.FilePath); fi != nil {
-					// successful download while monitoring
-					opts.fn(api.ProgressResponse{
-						Digest:    f.Digest,
-						Total:     fi.Size(),
-						Completed: fi.Size(),
-					})
-					return true, false, nil
-				}
-				// resume the download
-				inProgress.Store(f.Digest, f) // store the file download again to claim the resume
-				return false, true, nil
+		b.Total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+
+		var offset int64
+		var size int64 = 64 * 1024 * 1024
+
+		for offset < b.Total {
+			if offset+size > b.Total {
+				size = b.Total - offset
 			}
-			f, ok := val.(*FileDownload)
-			if !ok {
-				return false, false, fmt.Errorf("invalid type for in progress download: %T", val)
+
+			if err := b.newPart(offset, size); err != nil {
+				return err
 			}
-			opts.fn(api.ProgressResponse{
-				Status:    fmt.Sprintf("downloading %s", f.Digest),
-				Digest:    f.Digest,
-				Total:     f.Total,
-				Completed: f.Completed,
-			})
-			return false, false, nil
-		}()
-		if err != nil {
-			return err
-		}
-		if done {
-			// done downloading
-			return nil
-		}
-		if resume {
-			return doDownload(ctx, opts, f)
+
+			offset += size
 		}
 	}
+
+	log.Printf("downloading %s in %d part(s)", b.Digest[7:19], len(b.Parts))
 	return nil
 }
 
-var (
-	chunkSize   int64 = 1024 * 1024 // 1 MiB in bytes
-	errDownload       = fmt.Errorf("download failed")
-)
+func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) (err error) {
+	defer blobDownloadManager.Delete(b.Digest)
 
-// doDownload downloads a blob from the registry and stores it in the blobs directory
-func doDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error {
-	defer inProgress.Delete(f.Digest)
-	var size int64
+	ctx, b.CancelFunc = context.WithCancel(ctx)
 
-	fi, err := os.Stat(f.FilePath + "-partial")
-	switch {
-	case errors.Is(err, os.ErrNotExist):
-		// noop, file doesn't exist so create it
-	case err != nil:
-		return fmt.Errorf("stat: %w", err)
-	default:
-		size = fi.Size()
-		// Ensure the size is divisible by the chunk size by removing excess bytes
-		size -= size % chunkSize
+	file, err := os.OpenFile(b.Name+"-partial", os.O_CREATE|os.O_RDWR, 0644)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
 
-		err := os.Truncate(f.FilePath+"-partial", size)
-		if err != nil {
-			return fmt.Errorf("truncate: %w", err)
+	file.Truncate(b.Total)
+
+	g, ctx := errgroup.WithContext(ctx)
+	// TODO(mxyng): download concurrency should be configurable
+	g.SetLimit(64)
+	for i := range b.Parts {
+		part := b.Parts[i]
+		if part.Completed == part.Size {
+			continue
 		}
+
+		i := i
+		g.Go(func() error {
+			for try := 0; try < maxRetries; try++ {
+				w := io.NewOffsetWriter(file, part.StartsAt())
+				err := b.downloadChunk(ctx, requestURL, w, part, opts)
+				switch {
+				case errors.Is(err, context.Canceled):
+					return err
+				case err != nil:
+					log.Printf("%s part %d attempt %d failed: %v, retrying", b.Digest[7:19], i, try, err)
+					continue
+				default:
+					return nil
+				}
+			}
+
+			return errors.New("max retries exceeded")
+		})
 	}
 
-	requestURL := opts.mp.BaseURL()
-	requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", f.Digest)
+	if err := g.Wait(); err != nil {
+		return err
+	}
 
-	headers := make(http.Header)
-	headers.Set("Range", fmt.Sprintf("bytes=%d-", size))
+	// explicitly close the file so we can rename it
+	if err := file.Close(); err != nil {
+		return err
+	}
+
+	for i := range b.Parts {
+		if err := os.Remove(file.Name() + "-" + strconv.Itoa(i)); err != nil {
+			return err
+		}
+	}
+
+	return os.Rename(file.Name(), b.Name)
+}
 
-	resp, err := makeRequest(ctx, "GET", requestURL, headers, nil, opts.regOpts)
+func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, w io.Writer, part *blobDownloadPart, opts *RegistryOptions) error {
+	headers := make(http.Header)
+	headers.Set("Range", fmt.Sprintf("bytes=%d-%d", part.StartsAt(), part.StopsAt()-1))
+	resp, err := makeRequest(ctx, "GET", requestURL, headers, nil, opts)
 	if err != nil {
-		log.Printf("couldn't download blob: %v", err)
-		return fmt.Errorf("%w: %w", errDownload, err)
+		return err
 	}
 	defer resp.Body.Close()
 
-	if resp.StatusCode >= http.StatusBadRequest {
-		body, _ := io.ReadAll(resp.Body)
-		return fmt.Errorf("%w: on download registry responded with code %d: %v", errDownload, resp.StatusCode, string(body))
+	n, err := io.Copy(w, io.TeeReader(resp.Body, b))
+	if err != nil && !errors.Is(err, context.Canceled) {
+		// rollback progress
+		b.Completed.Add(-n)
+		return err
+	}
+
+	part.Completed += n
+	if err := b.writePart(part.Name(), part); err != nil {
+		return err
+	}
+
+	// return nil or context.Canceled
+	return err
+}
+
+func (b *blobDownload) newPart(offset, size int64) error {
+	part := blobDownloadPart{blobDownload: b, Offset: offset, Size: size, N: len(b.Parts)}
+	if err := b.writePart(part.Name(), &part); err != nil {
+		return err
 	}
 
-	err = os.MkdirAll(filepath.Dir(f.FilePath), 0o700)
+	b.Parts = append(b.Parts, &part)
+	return nil
+}
+
+func (b *blobDownload) readPart(partName string) (*blobDownloadPart, error) {
+	var part blobDownloadPart
+	partFile, err := os.Open(partName)
 	if err != nil {
-		return fmt.Errorf("make blobs directory: %w", err)
+		return nil, err
 	}
+	defer partFile.Close()
 
-	remaining, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
-	f.Completed = size
-	f.Total = remaining + f.Completed
+	if err := json.NewDecoder(partFile).Decode(&part); err != nil {
+		return nil, err
+	}
 
-	inProgress.Store(f.Digest, f)
+	part.blobDownload = b
+	return &part, nil
+}
 
-	out, err := os.OpenFile(f.FilePath+"-partial", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
+func (b *blobDownload) writePart(partName string, part *blobDownloadPart) error {
+	partFile, err := os.OpenFile(partName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
 	if err != nil {
-		return fmt.Errorf("open file: %w", err)
+		return err
 	}
-	defer out.Close()
-outerLoop:
+	defer partFile.Close()
+
+	return json.NewEncoder(partFile).Encode(part)
+}
+
+func (b *blobDownload) Write(p []byte) (n int, err error) {
+	n = len(p)
+	b.Completed.Add(int64(n))
+	return n, nil
+}
+
+func (b *blobDownload) acquire() {
+	b.references.Add(1)
+}
+
+func (b *blobDownload) release() {
+	if b.references.Add(-1) == 0 {
+		b.CancelFunc()
+	}
+}
+
+func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
+	b.acquire()
+	defer b.release()
+
+	ticker := time.NewTicker(60 * time.Millisecond)
 	for {
 		select {
+		case <-ticker.C:
 		case <-ctx.Done():
-			// handle client request cancellation
-			inProgress.Delete(f.Digest)
-			return nil
-		default:
-			opts.fn(api.ProgressResponse{
-				Status:    fmt.Sprintf("downloading %s", f.Digest),
-				Digest:    f.Digest,
-				Total:     f.Total,
-				Completed: f.Completed,
-			})
-
-			if f.Completed >= f.Total {
-				if err := out.Close(); err != nil {
-					return err
-				}
+			return ctx.Err()
+		}
 
-				if err := os.Rename(f.FilePath+"-partial", f.FilePath); err != nil {
-					opts.fn(api.ProgressResponse{
-						Status:    fmt.Sprintf("error renaming file: %v", err),
-						Digest:    f.Digest,
-						Total:     f.Total,
-						Completed: f.Completed,
-					})
-					return err
-				}
+		fn(api.ProgressResponse{
+			Status:    fmt.Sprintf("downloading %s", b.Digest),
+			Digest:    b.Digest,
+			Total:     b.Total,
+			Completed: b.Completed.Load(),
+		})
 
-				break outerLoop
+		if b.Completed.Load() >= b.Total {
+			// wait for the file to get renamed
+			if _, err := os.Stat(b.Name); err == nil {
+				return nil
 			}
 		}
+	}
+}
+
+type downloadOpts struct {
+	mp      ModelPath
+	digest  string
+	regOpts *RegistryOptions
+	fn      func(api.ProgressResponse)
+}
+
+const maxRetries = 3
 
-		n, err := io.CopyN(out, resp.Body, chunkSize)
-		if err != nil && !errors.Is(err, io.EOF) {
-			return fmt.Errorf("%w: %w", errDownload, err)
+// downloadBlob downloads a blob from the registry and stores it in the blobs directory
+func downloadBlob(ctx context.Context, opts downloadOpts) error {
+	fp, err := GetBlobsPath(opts.digest)
+	if err != nil {
+		return err
+	}
+
+	fi, err := os.Stat(fp)
+	switch {
+	case errors.Is(err, os.ErrNotExist):
+	case err != nil:
+		return err
+	default:
+		opts.fn(api.ProgressResponse{
+			Status:    fmt.Sprintf("downloading %s", opts.digest),
+			Digest:    opts.digest,
+			Total:     fi.Size(),
+			Completed: fi.Size(),
+		})
+
+		return nil
+	}
+
+	data, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest})
+	download := data.(*blobDownload)
+	if !ok {
+		requestURL := opts.mp.BaseURL()
+		requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest)
+		if err := download.Prepare(ctx, requestURL, opts.regOpts); err != nil {
+			return err
 		}
-		f.Completed += n
 
-		inProgress.Store(f.Digest, f)
+		go download.Run(context.Background(), requestURL, opts.regOpts)
 	}
 
-	log.Printf("success getting %s\n", f.Digest)
-	return nil
+	return download.Wait(ctx, opts.fn)
 }

+ 2 - 14
server/images.go

@@ -30,8 +30,6 @@ import (
 	"github.com/jmorganca/ollama/version"
 )
 
-const MaxRetries = 3
-
 type RegistryOptions struct {
 	Insecure bool
 	Username string
@@ -1417,7 +1415,7 @@ func checkBlobExistence(ctx context.Context, mp ModelPath, digest string, regOpt
 
 func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *RegistryOptions) (*http.Response, error) {
 	var status string
-	for try := 0; try < MaxRetries; try++ {
+	for try := 0; try < maxRetries; try++ {
 		resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts)
 		if err != nil {
 			log.Printf("couldn't start upload: %v", err)
@@ -1487,17 +1485,7 @@ func makeRequest(ctx context.Context, method string, requestURL *url.URL, header
 		req.ContentLength = contentLength
 	}
 
-	client := &http.Client{
-		CheckRedirect: func(req *http.Request, via []*http.Request) error {
-			if len(via) >= 10 {
-				return fmt.Errorf("too many redirects")
-			}
-			log.Printf("redirected to: %s\n", req.URL)
-			return nil
-		},
-	}
-
-	resp, err := client.Do(req)
+	resp, err := http.DefaultClient.Do(req)
 	if err != nil {
 		return nil, err
 	}

+ 6 - 1
server/upload.go

@@ -10,6 +10,7 @@ import (
 	"net/url"
 	"os"
 	"strconv"
+	"sync"
 
 	"github.com/jmorganca/ollama/api"
 )
@@ -138,7 +139,7 @@ func uploadBlobChunk(ctx context.Context, method string, requestURL *url.URL, r
 		headers.Set("Content-Range", fmt.Sprintf("%d-%d", offset, offset+sectionReader.Size()-1))
 	}
 
-	for try := 0; try < MaxRetries; try++ {
+	for try := 0; try < maxRetries; try++ {
 		resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sectionReader, pw), opts)
 		if err != nil && !errors.Is(err, io.EOF) {
 			return nil, err
@@ -191,9 +192,13 @@ type ProgressWriter struct {
 	completed int64
 	total     int64
 	fn        func(api.ProgressResponse)
+	mu        sync.Mutex
 }
 
 func (pw *ProgressWriter) Write(b []byte) (int, error) {
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+
 	n := len(b)
 	pw.bucket += int64(n)