Browse Source

Fix issues with `/set template` and `/set system` (#1486)

Jeffrey Morgan 1 year ago
parent
commit
0a9d348023
5 changed files with 38 additions and 37 deletions
  1. 2 2
      README.md
  2. 14 12
      cmd/cmd.go
  3. 2 2
      docs/api.md
  4. 19 20
      docs/modelfile.md
  5. 1 1
      server/images.go

+ 2 - 2
README.md

@@ -104,7 +104,7 @@ FROM llama2
 # set the temperature to 1 [higher is more creative, lower is more coherent]
 # set the temperature to 1 [higher is more creative, lower is more coherent]
 PARAMETER temperature 1
 PARAMETER temperature 1
 
 
-# set the system prompt
+# set the system message
 SYSTEM """
 SYSTEM """
 You are Mario from Super Mario Bros. Answer as Mario, the assistant, only.
 You are Mario from Super Mario Bros. Answer as Mario, the assistant, only.
 """
 """
@@ -257,7 +257,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
 ### Database
 ### Database
 
 
 - [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md)
 - [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md)
-  
+
 ### Package managers
 ### Package managers
 
 
 - [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
 - [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)

+ 14 - 12
cmd/cmd.go

@@ -654,7 +654,7 @@ func generateInteractive(cmd *cobra.Command, opts generateOptions) error {
 	usageSet := func() {
 	usageSet := func() {
 		fmt.Fprintln(os.Stderr, "Available Commands:")
 		fmt.Fprintln(os.Stderr, "Available Commands:")
 		fmt.Fprintln(os.Stderr, "  /set parameter ...     Set a parameter")
 		fmt.Fprintln(os.Stderr, "  /set parameter ...     Set a parameter")
-		fmt.Fprintln(os.Stderr, "  /set system <string>   Set system prompt")
+		fmt.Fprintln(os.Stderr, "  /set system <string>   Set system message")
 		fmt.Fprintln(os.Stderr, "  /set template <string> Set prompt template")
 		fmt.Fprintln(os.Stderr, "  /set template <string> Set prompt template")
 		fmt.Fprintln(os.Stderr, "  /set history           Enable history")
 		fmt.Fprintln(os.Stderr, "  /set history           Enable history")
 		fmt.Fprintln(os.Stderr, "  /set nohistory         Disable history")
 		fmt.Fprintln(os.Stderr, "  /set nohistory         Disable history")
@@ -672,7 +672,7 @@ func generateInteractive(cmd *cobra.Command, opts generateOptions) error {
 		fmt.Fprintln(os.Stderr, "  /show license      Show model license")
 		fmt.Fprintln(os.Stderr, "  /show license      Show model license")
 		fmt.Fprintln(os.Stderr, "  /show modelfile    Show Modelfile for this model")
 		fmt.Fprintln(os.Stderr, "  /show modelfile    Show Modelfile for this model")
 		fmt.Fprintln(os.Stderr, "  /show parameters   Show parameters for this model")
 		fmt.Fprintln(os.Stderr, "  /show parameters   Show parameters for this model")
-		fmt.Fprintln(os.Stderr, "  /show system       Show system prompt")
+		fmt.Fprintln(os.Stderr, "  /show system       Show system message")
 		fmt.Fprintln(os.Stderr, "  /show template     Show prompt template")
 		fmt.Fprintln(os.Stderr, "  /show template     Show prompt template")
 		fmt.Fprintln(os.Stderr, "")
 		fmt.Fprintln(os.Stderr, "")
 	}
 	}
@@ -733,9 +733,10 @@ func generateInteractive(cmd *cobra.Command, opts generateOptions) error {
 			// if the prompt so far starts with """ then we're in multiline mode
 			// if the prompt so far starts with """ then we're in multiline mode
 			// and we need to keep reading until we find a line that ends with """
 			// and we need to keep reading until we find a line that ends with """
 			cut, found := strings.CutSuffix(line, `"""`)
 			cut, found := strings.CutSuffix(line, `"""`)
-			prompt += cut + "\n"
+			prompt += cut
 
 
 			if !found {
 			if !found {
+				prompt += "\n"
 				continue
 				continue
 			}
 			}
 
 
@@ -746,11 +747,11 @@ func generateInteractive(cmd *cobra.Command, opts generateOptions) error {
 			case MultilineSystem:
 			case MultilineSystem:
 				opts.System = prompt
 				opts.System = prompt
 				prompt = ""
 				prompt = ""
-				fmt.Println("Set system template.")
+				fmt.Println("Set system message.")
 			case MultilineTemplate:
 			case MultilineTemplate:
 				opts.Template = prompt
 				opts.Template = prompt
 				prompt = ""
 				prompt = ""
-				fmt.Println("Set model template.")
+				fmt.Println("Set prompt template.")
 			}
 			}
 			multiline = MultilineNone
 			multiline = MultilineNone
 		case strings.HasPrefix(line, `"""`) && len(prompt) == 0:
 		case strings.HasPrefix(line, `"""`) && len(prompt) == 0:
@@ -821,17 +822,18 @@ func generateInteractive(cmd *cobra.Command, opts generateOptions) error {
 					line = strings.TrimPrefix(line, `"""`)
 					line = strings.TrimPrefix(line, `"""`)
 					if strings.HasPrefix(args[2], `"""`) {
 					if strings.HasPrefix(args[2], `"""`) {
 						cut, found := strings.CutSuffix(line, `"""`)
 						cut, found := strings.CutSuffix(line, `"""`)
-						prompt += cut + "\n"
+						prompt += cut
 						if found {
 						if found {
-							opts.System = prompt
 							if args[1] == "system" {
 							if args[1] == "system" {
-								fmt.Println("Set system template.")
+								opts.System = prompt
+								fmt.Println("Set system message.")
 							} else {
 							} else {
+								opts.Template = prompt
 								fmt.Println("Set prompt template.")
 								fmt.Println("Set prompt template.")
 							}
 							}
 							prompt = ""
 							prompt = ""
 						} else {
 						} else {
-							prompt = `"""` + prompt
+							prompt = `"""` + prompt + "\n"
 							if args[1] == "system" {
 							if args[1] == "system" {
 								multiline = MultilineSystem
 								multiline = MultilineSystem
 							} else {
 							} else {
@@ -841,7 +843,7 @@ func generateInteractive(cmd *cobra.Command, opts generateOptions) error {
 						}
 						}
 					} else {
 					} else {
 						opts.System = line
 						opts.System = line
-						fmt.Println("Set system template.")
+						fmt.Println("Set system message.")
 					}
 					}
 				default:
 				default:
 					fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
 					fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
@@ -893,7 +895,7 @@ func generateInteractive(cmd *cobra.Command, opts generateOptions) error {
 					case resp.System != "":
 					case resp.System != "":
 						fmt.Println(resp.System + "\n")
 						fmt.Println(resp.System + "\n")
 					default:
 					default:
-						fmt.Print("No system prompt was specified for this model.\n\n")
+						fmt.Print("No system message was specified for this model.\n\n")
 					}
 					}
 				case "template":
 				case "template":
 					switch {
 					switch {
@@ -1250,7 +1252,7 @@ func NewCLI() *cobra.Command {
 	showCmd.Flags().Bool("modelfile", false, "Show Modelfile of a model")
 	showCmd.Flags().Bool("modelfile", false, "Show Modelfile of a model")
 	showCmd.Flags().Bool("parameters", false, "Show parameters of a model")
 	showCmd.Flags().Bool("parameters", false, "Show parameters of a model")
 	showCmd.Flags().Bool("template", false, "Show template of a model")
 	showCmd.Flags().Bool("template", false, "Show template of a model")
-	showCmd.Flags().Bool("system", false, "Show system prompt of a model")
+	showCmd.Flags().Bool("system", false, "Show system message of a model")
 
 
 	runCmd := &cobra.Command{
 	runCmd := &cobra.Command{
 		Use:     "run MODEL [PROMPT]",
 		Use:     "run MODEL [PROMPT]",

+ 2 - 2
docs/api.md

@@ -44,7 +44,7 @@ Advanced parameters (optional):
 
 
 - `format`: the format to return a response in. Currently the only accepted value is `json`
 - `format`: the format to return a response in. Currently the only accepted value is `json`
 - `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
 - `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
-- `system`: system prompt to (overrides what is defined in the `Modelfile`)
+- `system`: system message to (overrides what is defined in the `Modelfile`)
 - `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
 - `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
 - `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
 - `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
 - `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
 - `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
@@ -548,7 +548,7 @@ A single JSON object will be returned.
 POST /api/show
 POST /api/show
 ```
 ```
 
 
-Show details about a model including modelfile, template, parameters, license, and system prompt.
+Show details about a model including modelfile, template, parameters, license, and system message.
 
 
 ### Parameters
 ### Parameters
 
 

+ 19 - 20
docs/modelfile.md

@@ -30,14 +30,14 @@ The format of the `Modelfile`:
 INSTRUCTION arguments
 INSTRUCTION arguments
 ```
 ```
 
 
-| Instruction                         | Description                                                   |
-| ----------------------------------- | ------------------------------------------------------------- |
-| [`FROM`](#from-required) (required) | Defines the base model to use.                                |
-| [`PARAMETER`](#parameter)           | Sets the parameters for how Ollama will run the model.        |
-| [`TEMPLATE`](#template)             | The full prompt template to be sent to the model.             |
-| [`SYSTEM`](#system)                 | Specifies the system prompt that will be set in the template. |
-| [`ADAPTER`](#adapter)               | Defines the (Q)LoRA adapters to apply to the model.           |
-| [`LICENSE`](#license)               | Specifies the legal license.                                  |
+| Instruction                         | Description                                                    |
+| ----------------------------------- | -------------------------------------------------------------- |
+| [`FROM`](#from-required) (required) | Defines the base model to use.                                 |
+| [`PARAMETER`](#parameter)           | Sets the parameters for how Ollama will run the model.         |
+| [`TEMPLATE`](#template)             | The full prompt template to be sent to the model.              |
+| [`SYSTEM`](#system)                 | Specifies the system message that will be set in the template. |
+| [`ADAPTER`](#adapter)               | Defines the (Q)LoRA adapters to apply to the model.            |
+| [`LICENSE`](#license)               | Specifies the legal license.                                   |
 
 
 ## Examples
 ## Examples
 
 
@@ -52,7 +52,7 @@ PARAMETER temperature 1
 # sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
 # sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
 PARAMETER num_ctx 4096
 PARAMETER num_ctx 4096
 
 
-# sets a custom system prompt to specify the behavior of the chat assistant
+# sets a custom system message to specify the behavior of the chat assistant
 SYSTEM You are Mario from super mario bros, acting as an assistant.
 SYSTEM You are Mario from super mario bros, acting as an assistant.
 ```
 ```
 
 
@@ -70,9 +70,9 @@ More examples are available in the [examples directory](../examples).
 There are two ways to view `Modelfile`s underlying the models in [ollama.ai/library][1]:
 There are two ways to view `Modelfile`s underlying the models in [ollama.ai/library][1]:
 
 
 - Option 1: view a details page from a model's tags page:
 - Option 1: view a details page from a model's tags page:
-   1. Go to a particular model's tags (e.g. https://ollama.ai/library/llama2/tags)
-   2. Click on a tag (e.g. https://ollama.ai/library/llama2:13b)
-   3. Scroll down to "Layers"
+  1.  Go to a particular model's tags (e.g. https://ollama.ai/library/llama2/tags)
+  2.  Click on a tag (e.g. https://ollama.ai/library/llama2:13b)
+  3.  Scroll down to "Layers"
       - Note: if the [`FROM` instruction](#from-required) is not present,
       - Note: if the [`FROM` instruction](#from-required) is not present,
         it means the model was created from a local file
         it means the model was created from a local file
 - Option 2: use `ollama show` to print the `Modelfile` like so:
 - Option 2: use `ollama show` to print the `Modelfile` like so:
@@ -150,18 +150,17 @@ PARAMETER <parameter> <parametervalue>
 | top_k          | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)                                                                        | int        | top_k 40             |
 | top_k          | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)                                                                        | int        | top_k 40             |
 | top_p          | Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)                                                                 | float      | top_p 0.9            |
 | top_p          | Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)                                                                 | float      | top_p 0.9            |
 
 
-
 ### TEMPLATE
 ### TEMPLATE
 
 
-`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system prompt and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific. You can usually find the template for a given model in the readme for that model.
+`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system message and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific. You can usually find the template for a given model in the readme for that model.
 
 
 #### Template Variables
 #### Template Variables
 
 
-| Variable        | Description                                                                                                  |
-| --------------- | ------------------------------------------------------------------------------------------------------------ |
-| `{{ .System }}` | The system prompt used to specify custom behavior, this must also be set in the Modelfile as an instruction. |
-| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input.                 |
-| `{{ .First }}`  | A boolean value used to render specific template information for the first generation of a session.          |
+| Variable        | Description                                                                                                   |
+| --------------- | ------------------------------------------------------------------------------------------------------------- |
+| `{{ .System }}` | The system message used to specify custom behavior, this must also be set in the Modelfile as an instruction. |
+| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input.                  |
+| `{{ .First }}`  | A boolean value used to render specific template information for the first generation of a session.           |
 
 
 ```modelfile
 ```modelfile
 TEMPLATE """
 TEMPLATE """
@@ -181,7 +180,7 @@ SYSTEM """<system message>"""
 
 
 ### SYSTEM
 ### SYSTEM
 
 
-The `SYSTEM` instruction specifies the system prompt to be used in the template, if applicable.
+The `SYSTEM` instruction specifies the system message to be used in the template, if applicable.
 
 
 ```modelfile
 ```modelfile
 SYSTEM """<system message>"""
 SYSTEM """<system message>"""

+ 1 - 1
server/images.go

@@ -66,7 +66,7 @@ func (m *Model) Prompt(p PromptVars) (string, error) {
 	}
 	}
 
 
 	if p.System == "" {
 	if p.System == "" {
-		// use the default system prompt for this model if one is not specified
+		// use the default system message for this model if one is not specified
 		p.System = m.System
 		p.System = m.System
 	}
 	}