This commit is contained in:
Christian Nieves
2025-06-17 21:38:27 +00:00
parent 8345f91cc8
commit 036a55effc

View File

@ -18,16 +18,7 @@ return {
url = "sso://user/vintharas/avante-goose.nvim", url = "sso://user/vintharas/avante-goose.nvim",
cond = use_google(), cond = use_google(),
opts = { opts = {
-- Add your options here model = "gemini-for-google-2.5-pro", -- Select model from go/goose-models.
-- These are the defaults
auto_start_backend = true, -- Whether to automatically start go/devai-api-http-proxy. If false you can use :AvanteGooseServerStart to start the server
auto_start_silent = true, -- Whether to have a silent auto start (don't log status messages)
model = "goose-v3.5-s", -- Select model from go/goose-models.
temperature = 0.1, -- Model temperature
max_decoder_steps = 8192, -- Maximum decoder steps (This affects the token limit of the output. More decoder steps -> higher limit in output tokens)
endpoint = "http://localhost:8080/predict", -- Endpoint to start/listen to go/devai-api-http-proxy
debug = false, -- Enables debug mode (outputs lots of logs for troubleshooting issues)
debug_backend = false, -- Whether to start the backend in debug mode. This logs backend output information under stdpath('cache')/devai-http-wrapper.log
}, },
}, },
}, },