Files
Ian e3f7e5706b Add config option for controlling Ollama think parameter (#146000)
* Add config option for controlling Ollama think parameter

Allows enabling or disable thinking for supported models. Neither option
will dislay thinking content in the chat. Future support for displaying
think content will require frontend changes for formatting.

* Add thinking strings
2025-06-03 20:42:16 -07:00

46 lines
1.5 KiB
JSON

{
"config": {
"step": {
"user": {
"data": {
"url": "[%key:common::config_flow::data::url%]",
"model": "Model"
}
},
"download": {
"title": "Downloading model"
}
},
"abort": {
"download_failed": "Model downloading failed"
},
"error": {
"cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
"unknown": "[%key:common::config_flow::error::unknown%]"
},
"progress": {
"download": "Please wait while the model is downloaded, which may take a very long time. Check your Ollama server logs for more details."
}
},
"options": {
"step": {
"init": {
"data": {
"prompt": "Instructions",
"llm_hass_api": "[%key:common::config_flow::data::llm_hass_api%]",
"max_history": "Max history messages",
"num_ctx": "Context window size",
"keep_alive": "Keep alive",
"think": "Think before responding"
},
"data_description": {
"prompt": "Instruct how the LLM should respond. This can be a template.",
"keep_alive": "Duration in seconds for Ollama to keep model in memory. -1 = indefinite, 0 = never.",
"num_ctx": "Maximum number of text tokens the model can process. Lower to reduce Ollama RAM, or increase for a large number of exposed entities.",
"think": "If enabled, the LLM will think before responding. This can improve response quality but may increase latency."
}
}
}
}
}