Browse Source

grammar: introduce new grammar package

This package provides a way to convert JSON schemas to equivalent EBNF.
It is intended to be a replacement to llama.cpp's schema_to_grammar.

This is still an early version and does not yet support all JSON schema
features. The to-do list includes:

- minumum/maximum constraints on integer types
- minLength/maxLength constraints on string types
- defs and refs
Blake Mizerany 4 months ago
parent
commit
f91565dfb5

+ 7 - 6
go.mod

@@ -12,7 +12,7 @@ require (
 	github.com/spf13/cobra v1.7.0
 	github.com/stretchr/testify v1.9.0
 	github.com/x448/float16 v0.8.4
-	golang.org/x/sync v0.9.0
+	golang.org/x/sync v0.10.0
 )
 
 require (
@@ -23,6 +23,7 @@ require (
 	github.com/nlpodyssey/gopickle v0.3.0
 	github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
 	golang.org/x/image v0.22.0
+	golang.org/x/tools v0.28.0
 )
 
 require (
@@ -68,12 +69,12 @@ require (
 	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
 	github.com/ugorji/go/codec v1.2.12 // indirect
 	golang.org/x/arch v0.8.0 // indirect
-	golang.org/x/crypto v0.23.0
+	golang.org/x/crypto v0.30.0
 	golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
-	golang.org/x/net v0.25.0 // indirect
-	golang.org/x/sys v0.20.0
-	golang.org/x/term v0.20.0
-	golang.org/x/text v0.20.0
+	golang.org/x/net v0.32.0 // indirect
+	golang.org/x/sys v0.28.0
+	golang.org/x/term v0.27.0
+	golang.org/x/text v0.21.0
 	google.golang.org/protobuf v1.34.1
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )

+ 14 - 12
go.sum

@@ -212,8 +212,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
 golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
-golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
+golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
 golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -255,8 +255,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
 golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
-golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
+golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -266,8 +266,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
-golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -283,17 +283,17 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
-golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
-golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
-golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
 golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -307,6 +307,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
+golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

+ 22 - 0
grammar/bench_test.go

@@ -0,0 +1,22 @@
+//go:build go1.24
+
+package grammar
+
+import "testing"
+
+func BenchmarkFromSchema(b *testing.B) {
+	for tt := range testCases(b) {
+		b.Run("", func(b *testing.B) {
+			s := []byte(tt.schema)
+
+			b.ReportAllocs()
+			for b.Loop() {
+				_, err := FromSchema(nil, s)
+				if err != nil {
+					b.Fatalf("GrammarFromSchema: %v", err)
+				}
+			}
+		})
+		return
+	}
+}

+ 227 - 0
grammar/grammar.go

@@ -0,0 +1,227 @@
+package grammar
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"iter"
+	"strconv"
+
+	"github.com/ollama/ollama/grammar/jsonschema"
+)
+
+const jsonTerms = `
+# Unicode
+#
+# Unicode characters can be specified directly in the grammar, for example
+# hiragana ::= [ぁ-ゟ], or with escapes: 8-bit (\xXX), 16-bit (\uXXXX) or 32-bit
+# (\UXXXXXXXX).
+unicode ::= \x{hex}{2} | \u{hex}{4} | \U{hex}{8}
+
+# JSON grammar from RFC 7159
+null    ::= "null"
+object  ::= "{" (kv ("," kv)*)? "}"
+array   ::= "[" (value ("," value)*)? "]"
+kv      ::= string ":" value
+integer ::= "0" | [1-9] [0-9]*
+number  ::= "-"? integer frac? exp?
+frac    ::= "." [0-9]+
+exp     ::= ("e" | "E") ("+" | "-") [0-9]+
+string  ::= "\"" char* "\""
+escape  ::= ["/" | "b" | "f" | "n" | "r" | "t" | unicode]
+char    ::= [^"\\] | escape
+space   ::= (" " | "\t" | "\n" | "\r")*
+hex     ::= [0-9] | [a-f] | [A-F]
+boolean ::= "true" | "false"
+value   ::= object | array | string | number | boolean | "null"
+
+# User-defined
+`
+
+// FromSchema generates a grammar from a JSON schema.
+func FromSchema(buf []byte, jsonSchema []byte) ([]byte, error) {
+	var s *jsonschema.Schema
+	if err := json.Unmarshal(jsonSchema, &s); err != nil {
+		return nil, err
+	}
+
+	var g builder
+
+	// "root" is the only rule that is guaranteed to exist, so we start
+	// with its length for padding, and then adjust it as we go.
+	g.pad = len("root")
+	for id := range dependencies("root", s) {
+		g.pad = max(g.pad, len(id))
+	}
+
+	g.b.WriteString(jsonTerms)
+
+	ids := make(map[*jsonschema.Schema]string)
+	for id, s := range dependencies("root", s) {
+		ids[s] = id
+		g.define(id)
+		if err := fromSchema(&g, ids, s); err != nil {
+			return nil, err
+		}
+	}
+	g.define("root")
+	if err := fromSchema(&g, ids, s); err != nil {
+		return nil, err
+	}
+	g.define("") // finalize the last rule
+	return g.b.Bytes(), nil
+}
+
+func fromSchema(g *builder, ids map[*jsonschema.Schema]string, s *jsonschema.Schema) error {
+	switch typ := s.EffectiveType(); typ {
+	case "array":
+		if len(s.PrefixItems) == 0 && s.Items == nil {
+			g.u("array")
+		} else {
+			g.q("[")
+			for i, s := range s.PrefixItems {
+				if i > 0 {
+					g.q(",")
+				}
+				g.u(ids[s])
+			}
+			if s.Items != nil {
+				g.u("(")
+				if len(s.PrefixItems) > 0 {
+					g.q(",")
+				}
+				g.u(ids[s.Items])
+				g.u(")*")
+			}
+			g.q("]")
+		}
+	case "object":
+		if len(s.Properties) == 0 {
+			g.u("object")
+		} else {
+			g.q("{")
+			for i, p := range s.Properties {
+				name := ids[p]
+				if i > 0 {
+					g.q(",")
+				}
+				g.q(p.Name)
+				g.q(":")
+				g.u(name)
+			}
+			g.q("}")
+		}
+	case "number":
+		buildConstrainedNumber(g, s)
+	case "string":
+		if len(s.Enum) == 0 {
+			g.u("string")
+		} else {
+			g.u("(")
+			for i, e := range s.Enum {
+				if i > 0 {
+					g.q("|")
+				}
+				g.q(string(e))
+			}
+			g.u(")")
+		}
+	case "boolean", "value", "null", "integer":
+		g.u(typ)
+	default:
+		return fmt.Errorf("%s: unsupported type %q", s.Name, typ)
+	}
+	return nil
+}
+
+// dependencies returns a sequence of all child dependencies of the schema in
+// post-order.
+//
+// The first value is the id/pointer to the dependency, and the second value
+// is the schema.
+func dependencies(id string, s *jsonschema.Schema) iter.Seq2[string, *jsonschema.Schema] {
+	return func(yield func(string, *jsonschema.Schema) bool) {
+		for i, p := range s.Properties {
+			id := fmt.Sprintf("%s_%d", id, i)
+			for did, d := range dependencies(id, p) {
+				if !yield(did, d) {
+					return
+				}
+			}
+			if !yield(id, p) {
+				return
+			}
+		}
+		for i, p := range s.PrefixItems {
+			id := fmt.Sprintf("tuple_%d", i)
+			for did, d := range dependencies(id, p) {
+				id := fmt.Sprintf("%s_%s", id, did)
+				if !yield(id, d) {
+					return
+				}
+			}
+			if !yield(id, p) {
+				return
+			}
+		}
+		if s.Items != nil {
+			id := fmt.Sprintf("%s_tuple_%d", id, len(s.PrefixItems))
+			for did, d := range dependencies(id, s.Items) {
+				if !yield(did, d) {
+					return
+				}
+			}
+			if !yield(id, s.Items) {
+				return
+			}
+		}
+	}
+}
+
+type builder struct {
+	b     bytes.Buffer
+	pad   int
+	rules int
+	items int
+}
+
+// define terminates the current rule, if any, and then either starts a new
+// rule or does nothing else if the name is empty.
+func (b *builder) define(name string) {
+	if b.rules > 0 {
+		b.b.WriteString(";\n")
+	}
+	if name == "" {
+		return
+	}
+	fmt.Fprintf(&b.b, "% -*s", b.pad, name)
+	b.b.WriteString(" ::=")
+	b.rules++
+	b.items = 0
+}
+
+// quote appends a terminal to the current rule.
+func (b *builder) q(s string) {
+	if b.items > 0 {
+		b.b.WriteString(" ")
+	}
+	b.b.WriteString(" ")
+	b.b.WriteString(strconv.Quote(s))
+}
+
+// u appends a non-terminal to the current rule.
+func (b *builder) u(s string) {
+	if b.items > 0 {
+		b.b.WriteString(" ")
+	}
+	b.b.WriteString(" ")
+	b.b.WriteString(s)
+}
+
+func buildConstrainedNumber(b *builder, s *jsonschema.Schema) {
+	if s.Minimum == 0 && s.Maximum == 0 {
+		b.u("TODO")
+	} else {
+		b.u("number")
+	}
+}

+ 75 - 0
grammar/grammar_test.go

@@ -0,0 +1,75 @@
+package grammar
+
+import (
+	"bufio"
+	"cmp"
+	"iter"
+	"strings"
+	"testing"
+
+	_ "embed"
+
+	"github.com/ollama/ollama/grammar/internal/diff"
+)
+
+func TestFromSchema(t *testing.T) {
+	for tt := range testCases(t) {
+		t.Run(tt.name, func(t *testing.T) {
+			g, err := FromSchema(nil, []byte(tt.schema))
+			if err != nil {
+				t.Fatalf("FromSchema: %v", err)
+			}
+			got := string(g)
+			got = strings.TrimPrefix(got, jsonTerms)
+			if got != tt.want {
+				t.Logf("schema:\n%s", tt.schema)
+				t.Fatal(string(diff.Diff("got", []byte(got), "want", []byte(tt.want))))
+			}
+		})
+	}
+}
+
+type testCase struct {
+	name   string
+	schema string
+	want   string
+}
+
+//go:embed testdata/schemas.txt
+var tests string
+
+func testCases(t testing.TB) iter.Seq[testCase] {
+	t.Helper()
+	return func(yield func(testCase) bool) {
+		t.Helper()
+		sc := bufio.NewScanner(strings.NewReader(tests))
+		name := ""
+		for sc.Scan() {
+			line := strings.TrimSpace(sc.Text())
+			if line == "" {
+				name = ""
+				continue
+			}
+			if line[0] == '#' {
+				name = cmp.Or(name, strings.TrimSpace(line[1:]))
+				continue
+			}
+			s := sc.Text()
+			g := ""
+			for sc.Scan() {
+				line = strings.TrimSpace(sc.Text())
+				if line == "" || line[0] == '#' {
+					break
+				}
+				g += sc.Text() + "\n"
+			}
+			if !yield(testCase{name, s, g}) {
+				return
+			}
+			name = strings.TrimSpace(strings.TrimPrefix(line, "#"))
+		}
+		if err := sc.Err(); err != nil {
+			t.Fatalf("error reading tests: %v", err)
+		}
+	}
+}

+ 261 - 0
grammar/internal/diff/diff.go

@@ -0,0 +1,261 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// A pair is a pair of values tracked for both the x and y side of a diff.
+// It is typically a pair of line indexes.
+type pair struct{ x, y int }
+
+// Diff returns an anchored diff of the two texts old and new
+// in the “unified diff” format. If old and new are identical,
+// Diff returns a nil slice (no output).
+//
+// Unix diff implementations typically look for a diff with
+// the smallest number of lines inserted and removed,
+// which can in the worst case take time quadratic in the
+// number of lines in the texts. As a result, many implementations
+// either can be made to run for a long time or cut off the search
+// after a predetermined amount of work.
+//
+// In contrast, this implementation looks for a diff with the
+// smallest number of “unique” lines inserted and removed,
+// where unique means a line that appears just once in both old and new.
+// We call this an “anchored diff” because the unique lines anchor
+// the chosen matching regions. An anchored diff is usually clearer
+// than a standard diff, because the algorithm does not try to
+// reuse unrelated blank lines or closing braces.
+// The algorithm also guarantees to run in O(n log n) time
+// instead of the standard O(n²) time.
+//
+// Some systems call this approach a “patience diff,” named for
+// the “patience sorting” algorithm, itself named for a solitaire card game.
+// We avoid that name for two reasons. First, the name has been used
+// for a few different variants of the algorithm, so it is imprecise.
+// Second, the name is frequently interpreted as meaning that you have
+// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm,
+// when in fact the algorithm is faster than the standard one.
+func Diff(oldName string, old []byte, newName string, new []byte) []byte {
+	if bytes.Equal(old, new) {
+		return nil
+	}
+	x := lines(old)
+	y := lines(new)
+
+	// Print diff header.
+	var out bytes.Buffer
+	fmt.Fprintf(&out, "diff %s %s\n", oldName, newName)
+	fmt.Fprintf(&out, "--- %s\n", oldName)
+	fmt.Fprintf(&out, "+++ %s\n", newName)
+
+	// Loop over matches to consider,
+	// expanding each match to include surrounding lines,
+	// and then printing diff chunks.
+	// To avoid setup/teardown cases outside the loop,
+	// tgs returns a leading {0,0} and trailing {len(x), len(y)} pair
+	// in the sequence of matches.
+	var (
+		done  pair     // printed up to x[:done.x] and y[:done.y]
+		chunk pair     // start lines of current chunk
+		count pair     // number of lines from each side in current chunk
+		ctext []string // lines for current chunk
+	)
+	for _, m := range tgs(x, y) {
+		if m.x < done.x {
+			// Already handled scanning forward from earlier match.
+			continue
+		}
+
+		// Expand matching lines as far as possible,
+		// establishing that x[start.x:end.x] == y[start.y:end.y].
+		// Note that on the first (or last) iteration we may (or definitely do)
+		// have an empty match: start.x==end.x and start.y==end.y.
+		start := m
+		for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] {
+			start.x--
+			start.y--
+		}
+		end := m
+		for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] {
+			end.x++
+			end.y++
+		}
+
+		// Emit the mismatched lines before start into this chunk.
+		// (No effect on first sentinel iteration, when start = {0,0}.)
+		for _, s := range x[done.x:start.x] {
+			ctext = append(ctext, "-"+s)
+			count.x++
+		}
+		for _, s := range y[done.y:start.y] {
+			ctext = append(ctext, "+"+s)
+			count.y++
+		}
+
+		// If we're not at EOF and have too few common lines,
+		// the chunk includes all the common lines and continues.
+		const C = 3 // number of context lines
+		if (end.x < len(x) || end.y < len(y)) &&
+			(end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) {
+			for _, s := range x[start.x:end.x] {
+				ctext = append(ctext, " "+s)
+				count.x++
+				count.y++
+			}
+			done = end
+			continue
+		}
+
+		// End chunk with common lines for context.
+		if len(ctext) > 0 {
+			n := end.x - start.x
+			if n > C {
+				n = C
+			}
+			for _, s := range x[start.x : start.x+n] {
+				ctext = append(ctext, " "+s)
+				count.x++
+				count.y++
+			}
+			done = pair{start.x + n, start.y + n}
+
+			// Format and emit chunk.
+			// Convert line numbers to 1-indexed.
+			// Special case: empty file shows up as 0,0 not 1,0.
+			if count.x > 0 {
+				chunk.x++
+			}
+			if count.y > 0 {
+				chunk.y++
+			}
+			fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y)
+			for _, s := range ctext {
+				out.WriteString(s)
+			}
+			count.x = 0
+			count.y = 0
+			ctext = ctext[:0]
+		}
+
+		// If we reached EOF, we're done.
+		if end.x >= len(x) && end.y >= len(y) {
+			break
+		}
+
+		// Otherwise start a new chunk.
+		chunk = pair{end.x - C, end.y - C}
+		for _, s := range x[chunk.x:end.x] {
+			ctext = append(ctext, " "+s)
+			count.x++
+			count.y++
+		}
+		done = end
+	}
+
+	return out.Bytes()
+}
+
+// lines returns the lines in the file x, including newlines.
+// If the file does not end in a newline, one is supplied
+// along with a warning about the missing newline.
+func lines(x []byte) []string {
+	l := strings.SplitAfter(string(x), "\n")
+	if l[len(l)-1] == "" {
+		l = l[:len(l)-1]
+	} else {
+		// Treat last line as having a message about the missing newline attached,
+		// using the same text as BSD/GNU diff (including the leading backslash).
+		l[len(l)-1] += "\n\\ No newline at end of file\n"
+	}
+	return l
+}
+
+// tgs returns the pairs of indexes of the longest common subsequence
+// of unique lines in x and y, where a unique line is one that appears
+// once in x and once in y.
+//
+// The longest common subsequence algorithm is as described in
+// Thomas G. Szymanski, “A Special Case of the Maximal Common
+// Subsequence Problem,” Princeton TR #170 (January 1975),
+// available at https://research.swtch.com/tgs170.pdf.
+func tgs(x, y []string) []pair {
+	// Count the number of times each string appears in a and b.
+	// We only care about 0, 1, many, counted as 0, -1, -2
+	// for the x side and 0, -4, -8 for the y side.
+	// Using negative numbers now lets us distinguish positive line numbers later.
+	m := make(map[string]int)
+	for _, s := range x {
+		if c := m[s]; c > -2 {
+			m[s] = c - 1
+		}
+	}
+	for _, s := range y {
+		if c := m[s]; c > -8 {
+			m[s] = c - 4
+		}
+	}
+
+	// Now unique strings can be identified by m[s] = -1+-4.
+	//
+	// Gather the indexes of those strings in x and y, building:
+	//	xi[i] = increasing indexes of unique strings in x.
+	//	yi[i] = increasing indexes of unique strings in y.
+	//	inv[i] = index j such that x[xi[i]] = y[yi[j]].
+	var xi, yi, inv []int
+	for i, s := range y {
+		if m[s] == -1+-4 {
+			m[s] = len(yi)
+			yi = append(yi, i)
+		}
+	}
+	for i, s := range x {
+		if j, ok := m[s]; ok && j >= 0 {
+			xi = append(xi, i)
+			inv = append(inv, j)
+		}
+	}
+
+	// Apply Algorithm A from Szymanski's paper.
+	// In those terms, A = J = inv and B = [0, n).
+	// We add sentinel pairs {0,0}, and {len(x),len(y)}
+	// to the returned sequence, to help the processing loop.
+	J := inv
+	n := len(xi)
+	T := make([]int, n)
+	L := make([]int, n)
+	for i := range T {
+		T[i] = n + 1
+	}
+	for i := range n {
+		k := sort.Search(n, func(k int) bool {
+			return T[k] >= J[i]
+		})
+		T[k] = J[i]
+		L[i] = k + 1
+	}
+	k := 0
+	for _, v := range L {
+		if k < v {
+			k = v
+		}
+	}
+	seq := make([]pair, 2+k)
+	seq[1+k] = pair{len(x), len(y)} // sentinel at end
+	lastj := n
+	for i := n - 1; i >= 0; i-- {
+		if L[i] == k && J[i] < lastj {
+			seq[k] = pair{xi[i], yi[J[i]]}
+			k--
+		}
+	}
+	seq[0] = pair{0, 0} // sentinel at start
+	return seq
+}

+ 44 - 0
grammar/internal/diff/diff_test.go

@@ -0,0 +1,44 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+	"bytes"
+	"path/filepath"
+	"testing"
+
+	"golang.org/x/tools/txtar"
+)
+
+func clean(text []byte) []byte {
+	text = bytes.ReplaceAll(text, []byte("$\n"), []byte("\n"))
+	text = bytes.TrimSuffix(text, []byte("^D\n"))
+	return text
+}
+
+func Test(t *testing.T) {
+	files, _ := filepath.Glob("testdata/*.txt")
+	if len(files) == 0 {
+		t.Fatalf("no testdata")
+	}
+
+	for _, file := range files {
+		t.Run(filepath.Base(file), func(t *testing.T) {
+			a, err := txtar.ParseFile(file)
+			if err != nil {
+				t.Fatal(err)
+			}
+			if len(a.Files) != 3 || a.Files[2].Name != "diff" {
+				t.Fatalf("%s: want three files, third named \"diff\"", file)
+			}
+			diffs := Diff(a.Files[0].Name, clean(a.Files[0].Data), a.Files[1].Name, clean(a.Files[1].Data))
+			want := clean(a.Files[2].Data)
+			if !bytes.Equal(diffs, want) {
+				t.Fatalf("%s: have:\n%s\nwant:\n%s\n%s", file,
+					diffs, want, Diff("have", diffs, "want", want))
+			}
+		})
+	}
+}

+ 13 - 0
grammar/internal/diff/testdata/allnew.txt

@@ -0,0 +1,13 @@
+-- old --
+-- new --
+a
+b
+c
+-- diff --
+diff old new
+--- old
++++ new
+@@ -0,0 +1,3 @@
++a
++b
++c

+ 13 - 0
grammar/internal/diff/testdata/allold.txt

@@ -0,0 +1,13 @@
+-- old --
+a
+b
+c
+-- new --
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,3 +0,0 @@
+-a
+-b
+-c

+ 35 - 0
grammar/internal/diff/testdata/basic.txt

@@ -0,0 +1,35 @@
+Example from Hunt and McIlroy, “An Algorithm for Differential File Comparison.”
+https://www.cs.dartmouth.edu/~doug/diff.pdf
+
+-- old --
+a
+b
+c
+d
+e
+f
+g
+-- new --
+w
+a
+b
+x
+y
+z
+e
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,7 +1,7 @@
++w
+ a
+ b
+-c
+-d
++x
++y
++z
+ e
+-f
+-g

+ 40 - 0
grammar/internal/diff/testdata/dups.txt

@@ -0,0 +1,40 @@
+-- old --
+a
+
+b
+
+c
+
+d
+
+e
+
+f
+-- new --
+a
+
+B
+
+C
+
+d
+
+e
+
+f
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,8 +1,8 @@
+ a
+ $
+-b
+-
+-c
++B
++
++C
+ $
+ d
+ $

+ 38 - 0
grammar/internal/diff/testdata/end.txt

@@ -0,0 +1,38 @@
+-- old --
+1
+2
+3
+4
+5
+6
+7
+eight
+nine
+ten
+eleven
+-- new --
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+-- diff --
+diff old new
+--- old
++++ new
+@@ -5,7 +5,6 @@
+ 5
+ 6
+ 7
+-eight
+-nine
+-ten
+-eleven
++8
++9
++10

+ 9 - 0
grammar/internal/diff/testdata/eof.txt

@@ -0,0 +1,9 @@
+-- old --
+a
+b
+c^D
+-- new --
+a
+b
+c^D
+-- diff --

+ 18 - 0
grammar/internal/diff/testdata/eof1.txt

@@ -0,0 +1,18 @@
+-- old --
+a
+b
+c
+-- new --
+a
+b
+c^D
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,3 +1,3 @@
+ a
+ b
+-c
++c
+\ No newline at end of file

+ 18 - 0
grammar/internal/diff/testdata/eof2.txt

@@ -0,0 +1,18 @@
+-- old --
+a
+b
+c^D
+-- new --
+a
+b
+c
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,3 +1,3 @@
+ a
+ b
+-c
+\ No newline at end of file
++c

+ 62 - 0
grammar/internal/diff/testdata/long.txt

@@ -0,0 +1,62 @@
+-- old --
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+14½
+15
+16
+17
+18
+19
+20
+-- new --
+1
+2
+3
+4
+5
+6
+8
+9
+10
+11
+12
+13
+14
+17
+18
+19
+20
+-- diff --
+diff old new
+--- old
++++ new
+@@ -4,7 +4,6 @@
+ 4
+ 5
+ 6
+-7
+ 8
+ 9
+ 10
+@@ -12,9 +11,6 @@
+ 12
+ 13
+ 14
+-14½
+-15
+-16
+ 17
+ 18
+ 19

+ 5 - 0
grammar/internal/diff/testdata/same.txt

@@ -0,0 +1,5 @@
+-- old --
+hello world
+-- new --
+hello world
+-- diff --

+ 34 - 0
grammar/internal/diff/testdata/start.txt

@@ -0,0 +1,34 @@
+-- old --
+e
+pi
+4
+5
+6
+7
+8
+9
+10
+-- new --
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,5 +1,6 @@
+-e
+-pi
++1
++2
++3
+ 4
+ 5
+ 6

+ 40 - 0
grammar/internal/diff/testdata/triv.txt

@@ -0,0 +1,40 @@
+Another example from Hunt and McIlroy,
+“An Algorithm for Differential File Comparison.”
+https://www.cs.dartmouth.edu/~doug/diff.pdf
+
+Anchored diff gives up on finding anything,
+since there are no unique lines.
+
+-- old --
+a
+b
+c
+a
+b
+b
+a
+-- new --
+c
+a
+b
+a
+b
+c
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,7 +1,6 @@
+-a
+-b
+-c
+-a
+-b
+-b
+-a
++c
++a
++b
++a
++b
++c

+ 171 - 0
grammar/jsonschema/decode.go

@@ -0,0 +1,171 @@
+package jsonschema
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+)
+
+// Schema holds a JSON schema.
+type Schema struct {
+	// Name is the name of the property. For the parent/root property, this
+	// is "root". For child properties, this is the name of the property.
+	Name string `json:"-"`
+
+	// Type is the type of the property.
+	//
+	// TODO: Union types (e.g. make this a []string).
+	Type string
+
+	// PrefixItems is a list of schemas for each item in a tuple. By
+	// default, the tuple is "closed." unless Items is set to true or a
+	// valid Schema.
+	PrefixItems []*Schema
+
+	// Items is the schema for each item in a list.
+	//
+	// If it is missing, or its JSON value is "null" or "false", it is nil.
+	// If the JSON value is "true", it is set to the empty Schema. If the
+	// JSON value is an object, it will be decoded as a Schema.
+	Items *Schema
+
+	// MinItems specifies the minimum number of items allowed in a list.
+	MinItems int
+
+	// MaxItems specifies the maximum number of items allowed in a list.
+	MaxItems int
+
+	// Properties is the schema for each property of an object.
+	Properties []*Schema
+
+	// Format is the format of the property. This is used to validate the
+	// property against a specific format.
+	//
+	// It is the callers responsibility to validate the property against
+	// the format.
+	Format string
+
+	// Minimum specifies the minimum value for numeric properties.
+	Minimum float64
+
+	// Maximum specifies the maximum value for numeric properties.
+	Maximum float64
+
+	// Enum is a list of valid values for the property.
+	Enum []json.RawMessage
+}
+
+func (s *Schema) UnmarshalJSON(data []byte) error {
+	type S Schema
+	w := struct {
+		Properties props
+		Items      items
+		*S
+	}{
+		S: (*S)(s),
+	}
+	if err := json.Unmarshal(data, &w); err != nil {
+		return err
+	}
+	if w.Items.set {
+		s.Items = &w.Items.Schema
+	}
+	s.Properties = w.Properties
+	return nil
+}
+
+type items struct {
+	Schema
+	set bool
+}
+
+func (s *items) UnmarshalJSON(data []byte) error {
+	switch b := data[0]; b {
+	case 't':
+		*s = items{set: true}
+	case '{':
+		type I items
+		if err := json.Unmarshal(data, (*I)(s)); err != nil {
+			return err
+		}
+		s.set = true
+	case 'n', 'f':
+	default:
+		return errors.New("invalid Items")
+	}
+	return nil
+}
+
+// EffectiveType returns the effective type of the schema. If the Type field is
+// not empty, it is returned; otherwise:
+//
+//   - If the schema has both Properties and Items, it returns an empty string.
+//   - If the schema has Properties, it returns "object".
+//   - If the schema has Items, it returns "array".
+//   - If the schema has neither Properties nor Items, it returns "value".
+//
+// The returned string is never empty.
+func (d *Schema) EffectiveType() string {
+	if d.Type == "" {
+		if len(d.Properties) > 0 {
+			return "object"
+		}
+		if len(d.PrefixItems) > 0 || d.Items != nil {
+			return "array"
+		}
+		return "value"
+	}
+	return d.Type
+}
+
+// props is an ordered list of properties. The order of the properties
+// is the order in which they were defined in the schema.
+type props []*Schema
+
+var _ json.Unmarshaler = (*props)(nil)
+
+func (v *props) UnmarshalJSON(data []byte) error {
+	if len(data) == 0 {
+		return nil
+	}
+	if data[0] != '{' {
+		return errors.New("expected object")
+	}
+
+	d := json.NewDecoder(bytes.NewReader(data))
+
+	// TODO(bmizerany): Consider DisallowUnknownFields. Currently, we, like
+	// llama.cpp, ignore unknown fields, which could be lead to unexpected
+	// behavior for clients of this package, since they may not be aware
+	// that "additionalFields", "itemsPrefix", etc, are being ignored.
+	//
+	// For now, just do what llama.cpp does.
+
+	t, err := d.Token()
+	if err != nil {
+		return err
+	}
+	if t != json.Delim('{') {
+		return errors.New("expected object")
+	}
+	for d.More() {
+		// Use the first token (map key) as the property name, then
+		// decode the rest of the object fields into a Schema and
+		// append.
+		t, err := d.Token()
+		if err != nil {
+			return err
+		}
+		if t == json.Delim('}') {
+			return nil
+		}
+		s := &Schema{
+			Name: t.(string),
+		}
+		if err := d.Decode(s); err != nil {
+			return err
+		}
+		*v = append(*v, s)
+	}
+	return nil
+}

+ 104 - 0
grammar/jsonschema/decode_test.go

@@ -0,0 +1,104 @@
+package jsonschema
+
+import (
+	"encoding/json"
+	"reflect"
+	"strings"
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+)
+
+const testSchemaBasic = `
+{
+  "properties": {
+    "tupleClosedEmpty":   { "prefixItems": [] },
+    "tupleClosedMissing": { "prefixItems": [{}] },
+    "tupleClosedNull":    { "prefixItems": [{}], "items": null },
+    "tupleClosedFalse":   { "prefixItems": [{}], "items": false },
+    "tupleOpenTrue":      { "prefixItems": [{}], "items": true },
+    "tupleOpenEmpty":     { "prefixItems": [{}], "items": {} },
+    "tupleOpenTyped":     { "prefixItems": [{}], "items": {"type": "boolean"} },
+    "tupleOpenMax":       { "prefixItems": [{}], "items": true, "maxItems": 3},
+
+    "array": { "items": {"type": "number"} },
+
+    "null": { "type": "null" },
+    "string": { "type": "string" },
+    "boolean": { "type": "boolean" }
+  }
+}
+`
+
+func TestSchemaUnmarshal(t *testing.T) {
+	var got *Schema
+	if err := json.Unmarshal([]byte(testSchemaBasic), &got); err != nil {
+		t.Fatalf("Unmarshal: %v", err)
+	}
+	want := &Schema{
+		Properties: []*Schema{
+			{Name: "tupleClosedEmpty", PrefixItems: []*Schema{}, Items: nil},
+			{Name: "tupleClosedMissing", PrefixItems: []*Schema{{}}, Items: nil},
+			{Name: "tupleClosedNull", PrefixItems: []*Schema{{}}, Items: nil},
+			{Name: "tupleClosedFalse", PrefixItems: []*Schema{{}}, Items: nil},
+
+			{Name: "tupleOpenTrue", PrefixItems: []*Schema{{}}, Items: &Schema{}},
+			{Name: "tupleOpenEmpty", PrefixItems: []*Schema{{}}, Items: &Schema{}},
+			{Name: "tupleOpenTyped", PrefixItems: []*Schema{{}}, Items: &Schema{Type: "boolean"}},
+			{Name: "tupleOpenMax", PrefixItems: []*Schema{{}}, Items: &Schema{}, MaxItems: 3},
+
+			{Name: "array", Items: &Schema{Type: "number"}},
+
+			{Name: "null", Type: "null"},
+			{Name: "string", Type: "string"},
+			{Name: "boolean", Type: "boolean"},
+		},
+	}
+
+	if diff := cmp.Diff(want, got); diff != "" {
+		t.Errorf("(-want, +got)\n%s", diff)
+	}
+}
+
+func TestEffectiveType(t *testing.T) {
+	const schema = `
+		{"properties": {
+			"o": {"type": "object"},
+			"a": {"type": "array"},
+			"n": {"type": "number"},
+			"s": {"type": "string"},
+			"z": {"type": "null"},
+			"b": {"type": "boolean"},
+
+			"t0": {"prefixItems": [{}], "items": {"type": "number"}},
+			"t1": {"items": {"type": "number"}, "maxItems": 3},
+
+			"v": {"maxItems": 3}
+		}}
+	`
+
+	var s *Schema
+	if err := json.Unmarshal([]byte(schema), &s); err != nil {
+		t.Fatalf("json.Unmarshal: %v", err)
+	}
+
+	var got []string
+	for _, p := range s.Properties {
+		got = append(got, p.EffectiveType())
+	}
+
+	want := strings.Fields(`
+		object
+		array
+		number
+		string
+		null
+		boolean
+		array
+		array
+		value
+	`)
+	if !reflect.DeepEqual(want, got) {
+		t.Errorf("\ngot:\n\t%v\nwant:\n\t%v", got, want)
+	}
+}

+ 76 - 0
grammar/testdata/schemas.txt

@@ -0,0 +1,76 @@
+# This file holds tests for JSON schema to EBNF grammar conversions.
+#
+# The format is a JSON schema, followed by the expected EBNF grammar. Each test
+# MAY be preceded by a comment that describes the test (e.g. the test name), followed by
+# the JSON schema and the expected EBNF grammar. If no comment is present, the test
+# name the tests number in the file (e.g. "#0", "#1", etc.)
+#
+# Blank lines signify the end or start of a new test. Comments can be added
+# anywhere in the file, but they must be preceded by a '#' character and start at
+# the beginning of the line.
+
+# default
+{}
+root ::= value;
+
+{"properties": {}}
+root ::= value;
+
+# array
+{"properties": {"a": {"type": "array", "items": {"type": "string"}}}}
+root_0_tuple_0 ::= string;
+root_0         ::= "[" ( root_0_tuple_0 )* "]";
+root           ::= "{" "a" ":" root_0 "}";
+
+# array with nested array
+{"type": "array", "items": {"type": "array", "items": {"type": "string"}}}
+root_tuple_0_tuple_0 ::= string;
+root_tuple_0         ::= "[" ( root_tuple_0_tuple_0 )* "]";
+root                 ::= "[" ( root_tuple_0 )* "]";
+
+# object
+{"properties": {"e": {}}}
+root_0 ::= value;
+root   ::= "{" "e" ":" root_0 "}";
+
+# object with nested object
+{"properties": {"o": {"type": "object", "properties": {"e": {}}}}}
+root_0_0 ::= value;
+root_0   ::= "{" "e" ":" root_0_0 "}";
+root     ::= "{" "o" ":" root_0 "}";
+
+# boolean
+{"type": "boolean"}
+root ::= boolean;
+
+# number
+{"properties": {"n": {"type": "number", "minimum": 123, "maximum": 4567}}}
+root_0 ::= number;
+root   ::= "{" "n" ":" root_0 "}";
+
+# string
+{"type": "string"}
+root ::= string;
+
+# string with enum
+{"type": "string", "enum": ["a", "b", "c"]}
+root ::= ( "\"a\"" "|" "\"b\"" "|" "\"c\"" );
+
+# spaces in key
+{"properties": {"a b": {}}}
+root_0 ::= value;
+root   ::= "{" "a b" ":" root_0 "}";
+
+# issue7978
+{ "type": "object", "properties": { "steps": { "type": "array", "items": { "type": "object", "properties": { "explanation": { "type": "string" }, "output": { "type": "string" } }, "required": [ "explanation", "output" ], "additionalProperties": false } }, "final_answer": { "type": "string" } }, "required": [ "steps", "final_answer" ], "additionalProperties": false }
+root_0_tuple_0_0 ::= string;
+root_0_tuple_0_1 ::= string;
+root_0_tuple_0   ::= "{" "explanation" ":" root_0_tuple_0_0 "," "output" ":" root_0_tuple_0_1 "}";
+root_0           ::= "[" ( root_0_tuple_0 )* "]";
+root_1           ::= string;
+root             ::= "{" "steps" ":" root_0 "," "final_answer" ":" root_1 "}";
+
+# !! # special characters in key
+# !! {"properties": {"a!b": {}}}
+# !! !invalid character '!' in key
+# !! 

+ 4 - 4
llm/server.go

@@ -28,6 +28,7 @@ import (
 	"github.com/ollama/ollama/discover"
 	"github.com/ollama/ollama/envconfig"
 	"github.com/ollama/ollama/format"
+	"github.com/ollama/ollama/grammar"
 	"github.com/ollama/ollama/llama"
 	"github.com/ollama/ollama/runners"
 )
@@ -713,9 +714,9 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
 			}
 
 			// User provided a JSON schema
-			g := llama.SchemaToGrammar(req.Format)
-			if g == nil {
-				return fmt.Errorf("invalid JSON schema in format")
+			g, err := grammar.FromSchema(nil, req.Format)
+			if err != nil {
+				return fmt.Errorf("invalid JSON schema in format: %w", err)
 			}
 			request["grammar"] = string(g)
 		}
@@ -735,7 +736,6 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
 	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
 		req.Options.NumPredict = 10 * s.options.NumCtx
 	}
-
 	// Make sure the server is ready
 	status, err := s.getServerStatusRetry(ctx)
 	if err != nil {