Browse Source

ggml-backend: Don't recreate the scheduler for each context

We don't need to create and destroy the GGML scheduler for every
context. This introduces extra CPU overhead for every forward
pass and extra memory for contexts that don't actually get scheduled
(for example, KV caches). We can instead just have one scheduler
for the backend and reset it each time we call Compute.

This improves token generation performance by 1-2% and removes
scheduler create/destroy from profile traces.
Jesse Gross 2 months ago
parent
commit
e5bcc51ae1
1 changed files with 21 additions and 13 deletions
  1. 21 13
      ml/backend/ggml/ggml.go

+ 21 - 13
ml/backend/ggml/ggml.go

@@ -82,6 +82,8 @@ type Backend struct {
 	meta       *fs.GGML
 	meta       *fs.GGML
 	cpus, gpus []Context
 	cpus, gpus []Context
 	tensors    map[string]*Context
 	tensors    map[string]*Context
+
+	sched *C.struct_ggml_backend_sched
 }
 }
 
 
 func New(r *os.File, params ml.BackendParams) (ml.Backend, error) {
 func New(r *os.File, params ml.BackendParams) (ml.Backend, error) {
@@ -182,10 +184,24 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
+	backends := make([]*C.struct_ggml_backend, len(gpus)+len(cpus))
+	bufts := make([]*C.struct_ggml_backend_buffer_type, len(gpus)+len(cpus))
+	for i, c := range append(gpus, cpus...) {
+		backends[i] = c.backend
+		bufts[i] = C.ggml_backend_get_default_buffer_type(c.backend)
+	}
+
 	return &Backend{
 	return &Backend{
 		meta: meta,
 		meta: meta,
 		cpus: cpus,
 		cpus: cpus,
 		gpus: gpus,
 		gpus: gpus,
+		sched: C.ggml_backend_sched_new(
+			(*C.ggml_backend_t)(unsafe.Pointer(&backends[0])),
+			(*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&bufts[0])),
+			C.int(len(backends)),
+			C.size_t(max(8192, len(meta.Tensors().Items())*5)),
+			true,
+		),
 	}, nil
 	}, nil
 }
 }
 
 
@@ -219,31 +235,23 @@ func (b *Backend) NewContext() ml.Context {
 	})
 	})
 
 
 	backends := make([]*C.struct_ggml_backend, len(b.gpus)+len(b.cpus))
 	backends := make([]*C.struct_ggml_backend, len(b.gpus)+len(b.cpus))
-	bufts := make([]*C.struct_ggml_backend_buffer_type, len(b.gpus)+len(b.cpus))
 	for i, c := range append(b.gpus, b.cpus...) {
 	for i, c := range append(b.gpus, b.cpus...) {
 		backends[i] = c.backend
 		backends[i] = c.backend
-		bufts[i] = C.ggml_backend_get_default_buffer_type(c.backend)
 	}
 	}
 
 
 	return &Context{
 	return &Context{
+		b:       b,
 		ctx:     c,
 		ctx:     c,
 		backend: backends[0],
 		backend: backends[0],
 		nodes:   nodes,
 		nodes:   nodes,
-		sched: C.ggml_backend_sched_new(
-			(*C.ggml_backend_t)(unsafe.Pointer(&backends[0])),
-			(*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&bufts[0])),
-			C.int(len(backends)),
-			C.size_t(nodes),
-			true,
-		),
 	}
 	}
 }
 }
 
 
 type Context struct {
 type Context struct {
+	b       *Backend
 	ctx     *C.struct_ggml_context
 	ctx     *C.struct_ggml_context
 	backend *C.struct_ggml_backend
 	backend *C.struct_ggml_backend
 
 
-	sched *C.struct_ggml_backend_sched
 	graph *C.struct_ggml_cgraph
 	graph *C.struct_ggml_cgraph
 	nodes int
 	nodes int
 }
 }
@@ -257,12 +265,13 @@ func (c *Context) Forward(t ml.Tensor) {
 }
 }
 
 
 func (c *Context) Compute(tensors ...ml.Tensor) {
 func (c *Context) Compute(tensors ...ml.Tensor) {
-	C.ggml_backend_sched_graph_compute_async(c.sched, c.graph)
+	C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph)
+	C.ggml_backend_sched_reset(c.b.sched)
 
 
 	needSync := true
 	needSync := true
 	sync := func() {
 	sync := func() {
 		if needSync {
 		if needSync {
-			C.ggml_backend_sched_synchronize(c.sched)
+			C.ggml_backend_sched_synchronize(c.b.sched)
 			needSync = false
 			needSync = false
 		}
 		}
 	}
 	}
@@ -350,7 +359,6 @@ func (c Context) FromIntSlice(s []int32, shape ...int) (ml.Tensor, error) {
 
 
 func (c *Context) Close() {
 func (c *Context) Close() {
 	if c != nil {
 	if c != nil {
-		C.ggml_backend_sched_free(c.sched)
 		C.ggml_free(c.ctx)
 		C.ggml_free(c.ctx)
 	}
 	}
 }
 }