From bc46bb7869b3bf9cdeb48f70cb0695da4ad5d1be Mon Sep 17 00:00:00 2001 From: Beibei Zhao <39853223+curie71@users.noreply.github.com> Date: Fri, 11 Jul 2025 22:46:27 +0800 Subject: [PATCH] add LOCAL_API_KEY environment variable to support api like ChatAnywhere reuse the code for local model to support APIs like ChatAnywhere, an environment variable is all you need. --- README.md | 10 ++++++---- internal/llm/models/local.go | 26 ++++++++++++++++++++++---- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index eee06acd..54e386da 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,7 @@ You can configure OpenCode using environment variables: | `AZURE_OPENAI_API_KEY` | For Azure OpenAI models (optional when using Entra ID) | | `AZURE_OPENAI_API_VERSION` | For Azure OpenAI models | | `LOCAL_ENDPOINT` | For self-hosted models | +| `LOCAL_API_KEY` | For self-hosted models | | `SHELL` | Default shell to use (if not specified in config) | ### Shell Configuration @@ -628,9 +629,11 @@ This is useful for developers who want to experiment with custom models. You can use a self-hosted model by setting the `LOCAL_ENDPOINT` environment variable. This will cause OpenCode to load and use the models from the specified endpoint. +`LOCAL_API_KEY` can be empty, or set to your api key. ```bash LOCAL_ENDPOINT=http://localhost:1235/v1 +LOCAL_API_KEY=YOUR_API_KEY ``` ### Configuring a self-hosted model @@ -639,12 +642,11 @@ You can also configure a self-hosted model in the configuration file under the ` ```json { - "agents": { - "coder": { - "model": "local.granite-3.3-2b-instruct@q8_0", + "task": { + "model": "local.o3", + "maxTokens": 100000, "reasoningEffort": "high" } - } } ``` diff --git a/internal/llm/models/local.go b/internal/llm/models/local.go index db0ea11c..56d6db9f 100644 --- a/internal/llm/models/local.go +++ b/internal/llm/models/local.go @@ -22,7 +22,10 @@ const ( ) func init() { - if endpoint := os.Getenv("LOCAL_ENDPOINT"); endpoint != "" { + endpoint := os.Getenv("LOCAL_ENDPOINT") + apiKey := os.Getenv("LOCAL_API_KEY") + + if endpoint != "" { localEndpoint, err := url.Parse(endpoint) if err != nil { logging.Debug("Failed to parse local endpoint", @@ -34,7 +37,7 @@ func init() { load := func(url *url.URL, path string) []localModel { url.Path = path - return listLocalModels(url.String()) + return listLocalModels(url.String(), apiKey) } models := load(localEndpoint, lmStudioBetaModelsPath) @@ -74,8 +77,23 @@ type localModel struct { LoadedContextLength int64 `json:"loaded_context_length"` } -func listLocalModels(modelsEndpoint string) []localModel { - res, err := http.Get(modelsEndpoint) + +func listLocalModels(modelsEndpoint string, apiKey string) []localModel { + client := &http.Client{} + req, err := http.NewRequest("GET", modelsEndpoint, nil) + if err != nil { + logging.Debug("Failed to create request for local models", + "error", err, + "endpoint", modelsEndpoint, + ) + return []localModel{} + } + + if apiKey != "" { + req.Header.Add("Authorization", "Bearer "+apiKey) + } + + res, err := client.Do(req) if err != nil { logging.Debug("Failed to list local models", "error", err,