Skip to content

Commit b910a54

Browse files
authored
feat: add native Ollama API support with "think" control (#277)
* add support for ollama's direct /api/chat processing * add support for think and options parameters * add 'ChatQwen3-8B' as default agent (thinking-disabled qwen model) * remove comment * clarify comment
1 parent 184977c commit b910a54

File tree

2 files changed

+60
-1
lines changed

2 files changed

+60
-1
lines changed

lua/gp/config.lua

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ local config = {
4949
},
5050
ollama = {
5151
disable = true,
52-
endpoint = "http://localhost:11434/v1/chat/completions",
52+
endpoint = "http://localhost:11434/api/chat",
5353
secret = "dummy_secret",
5454
},
5555
lmstudio = {
@@ -206,6 +206,19 @@ local config = {
206206
-- system prompt (use this to specify the persona/role of the AI)
207207
system_prompt = "You are a general AI assistant.",
208208
},
209+
{
210+
provider = "ollama",
211+
name = "ChatQwen3-8B",
212+
chat = true,
213+
command = false,
214+
-- string with model name or table with model name and parameters
215+
model = {
216+
model = "qwen3:8b",
217+
think = false, -- toggle thinking mode for Ollama's thinking models
218+
},
219+
-- system prompt (use this to specify the persona/role of the AI)
220+
system_prompt = "You are a general AI assistant.",
221+
},
209222
{
210223
provider = "lmstudio",
211224
name = "ChatLMStudio",

lua/gp/dispatcher.lua

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,41 @@ D.prepare_payload = function(messages, model, provider)
169169
return payload
170170
end
171171

172+
if provider == "ollama" then
173+
local payload = {
174+
model = model.model,
175+
stream = true,
176+
messages = messages,
177+
}
178+
179+
if model.think ~= nil then
180+
payload.think = model.think
181+
end
182+
183+
local options = {}
184+
if model.temperature then
185+
options.temperature = math.max(0, math.min(2, model.temperature))
186+
end
187+
if model.top_p then
188+
options.top_p = math.max(0, math.min(1, model.top_p))
189+
end
190+
if model.min_p then
191+
options.min_p = math.max(0, math.min(1, model.min_p))
192+
end
193+
if model.num_ctx then
194+
options.num_ctx = model.num_ctx
195+
end
196+
if model.top_k then
197+
options.top_k = model.top_k
198+
end
199+
200+
if next(options) then
201+
payload.options = options
202+
end
203+
204+
return payload
205+
end
206+
172207
local output = {
173208
model = model.model,
174209
stream = true,
@@ -289,6 +324,15 @@ local query = function(buf, provider, payload, handler, on_exit, callback)
289324
end
290325
end
291326

327+
if qt.provider == "ollama" then
328+
if line:match('"message":') and line:match('"content":') then
329+
local success, decoded = pcall(vim.json.decode, line)
330+
if success and decoded.message and decoded.message.content then
331+
content = decoded.message.content
332+
end
333+
end
334+
end
335+
292336

293337
if content and type(content) == "string" then
294338
qt.response = qt.response .. content
@@ -408,6 +452,8 @@ local query = function(buf, provider, payload, handler, on_exit, callback)
408452
"api-key: " .. bearer,
409453
}
410454
endpoint = render.template_replace(endpoint, "{{model}}", payload.model)
455+
elseif provider == "ollama" then
456+
headers = {}
411457
else -- default to openai compatible headers
412458
headers = {
413459
"-H",

0 commit comments

Comments
 (0)