diff --git a/.config/ghostty/config b/.config/ghostty/config index a5f8720..a87f56d 100644 --- a/.config/ghostty/config +++ b/.config/ghostty/config @@ -35,7 +35,6 @@ keybind = alt+shift+j=resize_split:down,10 keybind = alt+shift+k=resize_split:up,10 keybind = alt+shift+l=resize_split:right,10 -keybind = alt+d=close_window keybind = alt+x=close_surface keybind = alt+c=close_surface keybind = alt+s=new_split:down diff --git a/.config/nvim/lua/plugins/avante.lua b/.config/nvim/lua/plugins/avante.lua index 8e80c70..50e53d5 100644 --- a/.config/nvim/lua/plugins/avante.lua +++ b/.config/nvim/lua/plugins/avante.lua @@ -1,3 +1,38 @@ +local function get_ollama_setup() + local ollama_setup = { + -- add any opts here + ---@type Provider + provider = 'gemini', + vendors = { + ---@type AvanteProvider + ollama = { + ['local'] = true, + endpoint = '127.0.0.1:11434/v1', + model = 'llama3.2', + parse_curl_args = function(opts, code_opts) + return { + url = opts.endpoint .. '/chat/completions', + headers = { + ['Accept'] = 'application/json', + ['Content-Type'] = 'application/json', + }, + body = { + model = opts.model, + messages = require('avante.providers').copilot.parse_message(code_opts), -- you can make your own message, but this is very advanced + max_tokens = 2048, + stream = true, + }, + } + end, + parse_response_data = function(data_stream, event_state, opts) + require('avante.providers').openai.parse_response(data_stream, event_state, opts) + end, + }, + }, + } + return ollama_setup +end + ---@type LazySpec return { { @@ -7,39 +42,8 @@ return { build = 'make', opts = function() vim.env.GEMINI_API_KEY = require('plugins.config.util').get_age_credentials 'gemini_api.age' - local ollama_setup = { - -- add any opts here - ---@type Provider - provider = 'gemini', - vendors = { - ---@type AvanteProvider - ollama = { - ['local'] = true, - endpoint = '127.0.0.1:11434/v1', - model = 'llama3.2', - parse_curl_args = function(opts, code_opts) - return { - url = opts.endpoint .. '/chat/completions', - headers = { - ['Accept'] = 'application/json', - ['Content-Type'] = 'application/json', - }, - body = { - model = opts.model, - messages = require('avante.providers').copilot.parse_message(code_opts), -- you can make your own message, but this is very advanced - max_tokens = 2048, - stream = true, - }, - } - end, - parse_response_data = function(data_stream, event_state, opts) - require('avante.providers').openai.parse_response(data_stream, event_state, opts) - end, - }, - }, - } if not vim.env.GEMINI_API_KEY then - return ollama_setup + return get_ollama_setup() end return { ---@type Provider @@ -131,8 +135,8 @@ return { display = { chat = { render_headers = false } }, strategies = { --NOTE: Change the adapter as required - chat = { adapter = 'openai_compatible' }, - inline = { adapter = 'openai_compatible' }, + chat = { adapter = 'gemini' }, + inline = { adapter = 'gemini' }, }, adapters = { openai_compatible = function() @@ -150,6 +154,15 @@ return { }, }) end, + gemini = function() + return require('codecompanion.adapters').extend('gemini', { + schema = { + model = { + default = 'gemini-2.0-flash-exp', + }, + }, + }) + end, }, }) end,