|
@@ -7,7 +7,7 @@ BASE_URL = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
|
|
|
# Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses.
|
|
|
# The final response object will include statistics and additional data from the request. Use the callback function to override
|
|
|
# the default handler.
|
|
|
-def generate(model_name, prompt, system=None, template=None, context=None, options=None, callback=None):
|
|
|
+def generate(model_name, prompt, system=None, template=None, format="", context=None, options=None, callback=None):
|
|
|
try:
|
|
|
url = f"{BASE_URL}/api/generate"
|
|
|
payload = {
|
|
@@ -16,7 +16,8 @@ def generate(model_name, prompt, system=None, template=None, context=None, optio
|
|
|
"system": system,
|
|
|
"template": template,
|
|
|
"context": context,
|
|
|
- "options": options
|
|
|
+ "options": options,
|
|
|
+ "format": format,
|
|
|
}
|
|
|
|
|
|
# Remove keys with None values
|