From d7db20ff0a807ac9bb01a818f4b26b009903de61 Mon Sep 17 00:00:00 2001 From: "Artem Obukhov (aider)" Date: Mon, 16 Jun 2025 15:41:02 +0400 Subject: [PATCH 1/8] feat(session): add session deletion flow with Ctrl+P and overlay dialog --- .gitignore | 1 + README.md | 7 ++- internal/llm/models/bootstrap.go | 16 +++++++ internal/llm/models/local.go | 81 +++++++++++++++++++++++++------- internal/llm/models/models.go | 13 ----- internal/tui/tui.go | 71 +++++++++++++++++++++++++++- 6 files changed, 156 insertions(+), 33 deletions(-) create mode 100644 internal/llm/models/bootstrap.go diff --git a/.gitignore b/.gitignore index 36ff9c73..703998b0 100644 --- a/.gitignore +++ b/.gitignore @@ -44,3 +44,4 @@ Thumbs.db .opencode/ opencode +.aider* diff --git a/README.md b/README.md index b98c1830..273e6f8e 100644 --- a/README.md +++ b/README.md @@ -309,6 +309,7 @@ The output format is implemented as a strongly-typed `OutputFormat` in the codeb | `?` | Toggle help dialog (when not in editing mode) | | `Ctrl+L` | View logs | | `Ctrl+A` | Switch session | +| `Ctrl+P` | Prune session | | `Ctrl+K` | Command dialog | | `Ctrl+O` | Toggle model selection dialog | | `Esc` | Close current overlay/dialog or return to previous mode | @@ -581,11 +582,13 @@ This is useful for developers who want to experiment with custom models. ### Configuring a self-hosted provider -You can use a self-hosted model by setting the `LOCAL_ENDPOINT` environment variable. -This will cause OpenCode to load and use the models from the specified endpoint. +You can use a self-hosted model by setting the `LOCAL_ENDPOINT` and `LOCAL_ENDPOINT_API_KEY` environment variable. +This will cause OpenCode to load and use the models from the specified endpoint. When it loads model it tries to inherit settings +from predefined ones if possible. ```bash LOCAL_ENDPOINT=http://localhost:1235/v1 +LOCAL_ENDPOINT_API_KEY=secret ``` ### Configuring a self-hosted model diff --git a/internal/llm/models/bootstrap.go b/internal/llm/models/bootstrap.go new file mode 100644 index 00000000..82166b39 --- /dev/null +++ b/internal/llm/models/bootstrap.go @@ -0,0 +1,16 @@ +package models + +import "maps" + +func init() { + maps.Copy(SupportedModels, AnthropicModels) + maps.Copy(SupportedModels, OpenAIModels) + maps.Copy(SupportedModels, GeminiModels) + maps.Copy(SupportedModels, GroqModels) + maps.Copy(SupportedModels, AzureModels) + maps.Copy(SupportedModels, OpenRouterModels) + maps.Copy(SupportedModels, XAIModels) + maps.Copy(SupportedModels, VertexAIGeminiModels) + + initLocalModels() +} diff --git a/internal/llm/models/local.go b/internal/llm/models/local.go index 5d8412c8..a2b4145f 100644 --- a/internal/llm/models/local.go +++ b/internal/llm/models/local.go @@ -3,6 +3,7 @@ package models import ( "cmp" "encoding/json" + "fmt" "net/http" "net/url" "os" @@ -21,7 +22,7 @@ const ( lmStudioBetaModelsPath = "api/v0/models" ) -func init() { +func initLocalModels() { if endpoint := os.Getenv("LOCAL_ENDPOINT"); endpoint != "" { localEndpoint, err := url.Parse(endpoint) if err != nil { @@ -33,7 +34,8 @@ func init() { } load := func(url *url.URL, path string) []localModel { - url.Path = path + url = url.JoinPath(path) + logging.Debug(fmt.Sprintf("Trying to load models from %s", url)) return listLocalModels(url.String()) } @@ -43,16 +45,22 @@ func init() { models = load(localEndpoint, localModelsPath) } - if len(models) == 0 { + if c := len(models); c == 0 { logging.Debug("No local models found", "endpoint", endpoint, ) return + } else { + logging.Debug(fmt.Sprintf("%d local models found", c)) } loadLocalModels(models) - viper.SetDefault("providers.local.apiKey", "dummy") + if token, ok := os.LookupEnv("LOCAL_ENDPOINT_API_KEY"); ok { + viper.SetDefault("providers.local.apiKey", token) + } else { + viper.SetDefault("providers.local.apiKey", "dummy") + } ProviderPopularity[ProviderLocal] = 0 } } @@ -75,7 +83,25 @@ type localModel struct { } func listLocalModels(modelsEndpoint string) []localModel { - res, err := http.Get(modelsEndpoint) + token := os.Getenv("LOCAL_ENDPOINT_API_KEY") + var ( + res *http.Response + err error + ) + if token != "" { + req, reqErr := http.NewRequest("GET", modelsEndpoint, nil) + if reqErr != nil { + logging.Debug("Failed to create local models request", + "error", reqErr, + "endpoint", modelsEndpoint, + ) + return nil + } + req.Header.Set("Authorization", "Bearer "+token) + res, err = http.DefaultClient.Do(req) + } else { + res, err = http.Get(modelsEndpoint) + } if err != nil { logging.Debug("Failed to list local models", "error", err, @@ -122,7 +148,8 @@ func listLocalModels(modelsEndpoint string) []localModel { func loadLocalModels(models []localModel) { for i, m := range models { - model := convertLocalModel(m) + source := tryResolveSource(m.ID) + model := convertLocalModel(m, source) SupportedModels[model.ID] = model if i == 0 || m.State == "loaded" { @@ -134,16 +161,38 @@ func loadLocalModels(models []localModel) { } } -func convertLocalModel(model localModel) Model { - return Model{ - ID: ModelID("local." + model.ID), - Name: friendlyModelName(model.ID), - Provider: ProviderLocal, - APIModel: model.ID, - ContextWindow: cmp.Or(model.LoadedContextLength, 4096), - DefaultMaxTokens: cmp.Or(model.LoadedContextLength, 4096), - CanReason: true, - SupportsAttachments: true, +func tryResolveSource(localID string) *Model { + for _, model := range SupportedModels { + if strings.Contains(localID, model.APIModel) { + return &model + } + } + return nil +} + +func convertLocalModel(model localModel, source *Model) Model { + if source != nil { + return Model{ + ID: ModelID("local." + model.ID), + Name: source.Name, + Provider: ProviderLocal, + APIModel: model.ID, + ContextWindow: cmp.Or(source.ContextWindow, 4096), + DefaultMaxTokens: cmp.Or(source.DefaultMaxTokens, 4096), + CanReason: source.CanReason, + SupportsAttachments: source.SupportsAttachments, + } + } else { + return Model{ + ID: ModelID("local." + model.ID), + Name: friendlyModelName(model.ID), + Provider: ProviderLocal, + APIModel: model.ID, + ContextWindow: cmp.Or(model.LoadedContextLength, 4096), + DefaultMaxTokens: cmp.Or(model.LoadedContextLength, 4096), + CanReason: true, + SupportsAttachments: true, + } } } diff --git a/internal/llm/models/models.go b/internal/llm/models/models.go index 47d21718..73e40de8 100644 --- a/internal/llm/models/models.go +++ b/internal/llm/models/models.go @@ -1,7 +1,5 @@ package models -import "maps" - type ( ModelID string ModelProvider string @@ -83,14 +81,3 @@ var SupportedModels = map[ModelID]Model{ CostPer1MOut: 15.0, }, } - -func init() { - maps.Copy(SupportedModels, AnthropicModels) - maps.Copy(SupportedModels, OpenAIModels) - maps.Copy(SupportedModels, GeminiModels) - maps.Copy(SupportedModels, GroqModels) - maps.Copy(SupportedModels, AzureModels) - maps.Copy(SupportedModels, OpenRouterModels) - maps.Copy(SupportedModels, XAIModels) - maps.Copy(SupportedModels, VertexAIGeminiModels) -} diff --git a/internal/tui/tui.go b/internal/tui/tui.go index 1c9c2f03..4380af8e 100644 --- a/internal/tui/tui.go +++ b/internal/tui/tui.go @@ -33,6 +33,7 @@ type keyMap struct { Filepicker key.Binding Models key.Binding SwitchTheme key.Binding + PruneSession key.Binding } type startCompactSessionMsg struct{} @@ -78,6 +79,11 @@ var keys = keyMap{ key.WithKeys("ctrl+t"), key.WithHelp("ctrl+t", "switch theme"), ), + + PruneSession: key.NewBinding( + key.WithKeys("ctrl+p"), + key.WithHelp("ctrl+p", "delete session"), + ), } var helpEsc = key.NewBinding( @@ -117,6 +123,9 @@ type appModel struct { showSessionDialog bool sessionDialog dialog.SessionDialog + showDeleteSessionDialog bool + deleteSessionDialog dialog.SessionDialog + showCommandDialog bool commandDialog dialog.CommandDialog commands []dialog.Command @@ -163,6 +172,8 @@ func (a appModel) Init() tea.Cmd { cmds = append(cmds, cmd) cmd = a.themeDialog.Init() cmds = append(cmds, cmd) + cmd = a.deleteSessionDialog.Init() + cmds = append(cmds, cmd) // Check if we should show the init dialog cmds = append(cmds, func() tea.Msg { @@ -296,6 +307,10 @@ func (a appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return a, nil case dialog.CloseSessionDialogMsg: + if a.showDeleteSessionDialog { + a.showDeleteSessionDialog = false + return a, nil + } a.showSessionDialog = false return a, nil @@ -400,6 +415,18 @@ func (a appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { a.selectedSession = msg.Payload } case dialog.SessionSelectedMsg: + // if we're in "delete" mode, delete instead of switch + if a.showDeleteSessionDialog { + a.showDeleteSessionDialog = false + return a, func() tea.Msg { + ctx := context.Background() + if err := a.app.Sessions.Delete(ctx, msg.Session.ID); err != nil { + return util.InfoMsg{Type: util.InfoTypeError, Msg: "Delete failed: " + err.Error()} + } + return util.InfoMsg{Type: util.InfoTypeInfo, Msg: "Session deleted"} + } + } + // otherwise fall through to normal "switch session" a.showSessionDialog = false if a.currentPage == page.ChatPage { return a, util.CmdHandler(chat.SessionSelectedMsg(msg.Session)) @@ -518,6 +545,20 @@ func (a appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return a, a.themeDialog.Init() } return a, nil + case key.Matches(msg, keys.PruneSession): + if a.currentPage == page.ChatPage && !a.showQuit && !a.showPermissions && + !a.showSessionDialog && !a.showCommandDialog { + sessions, err := a.app.Sessions.List(context.Background()) + if err != nil { + return a, util.ReportError(err) + } + if len(sessions) == 0 { + return a, util.ReportWarn("No sessions available") + } + a.deleteSessionDialog.SetSessions(sessions) + a.showDeleteSessionDialog = true + } + return a, nil case key.Matches(msg, returnKey) || key.Matches(msg): if msg.String() == quitKey { if a.currentPage == page.LogsPage { @@ -616,6 +657,16 @@ func (a appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } } + if a.showDeleteSessionDialog { + d, cmd := a.deleteSessionDialog.Update(msg) + a.deleteSessionDialog = d.(dialog.SessionDialog) + cmds = append(cmds, cmd) + // block other tea.KeyMsgs + if _, ok := msg.(tea.KeyMsg); ok { + return a, tea.Batch(cmds...) + } + } + if a.showCommandDialog { d, commandCmd := a.commandDialog.Update(msg) a.commandDialog = d.(dialog.CommandDialog) @@ -824,6 +875,21 @@ func (a appModel) View() string { ) } + if a.showDeleteSessionDialog { + overlay := a.deleteSessionDialog.View() + row := lipgloss.Height(appView) / 2 + row -= lipgloss.Height(overlay) / 2 + col := lipgloss.Width(appView) / 2 + col -= lipgloss.Width(overlay) / 2 + appView = layout.PlaceOverlay( + col, + row, + overlay, + appView, + true, + ) + } + if a.showModelDialog { overlay := a.modelDialog.View() row := lipgloss.Height(appView) / 2 @@ -906,8 +972,9 @@ func New(app *app.App) tea.Model { status: core.NewStatusCmp(app.LSPClients), help: dialog.NewHelpCmp(), quit: dialog.NewQuitCmp(), - sessionDialog: dialog.NewSessionDialogCmp(), - commandDialog: dialog.NewCommandDialogCmp(), + sessionDialog: dialog.NewSessionDialogCmp(), + deleteSessionDialog: dialog.NewSessionDialogCmp(), + commandDialog: dialog.NewCommandDialogCmp(), modelDialog: dialog.NewModelDialogCmp(), permissions: dialog.NewPermissionDialogCmp(), initDialog: dialog.NewInitDialogCmp(), From 76b61414b43e7a74cbdfb99b3f60ef33062f16f8 Mon Sep 17 00:00:00 2001 From: Artem Obukhov Date: Wed, 18 Jun 2025 16:24:32 +0400 Subject: [PATCH 2/8] feat(litellm): added proxy setup, customise baseURL and header, add antropic to vertexai --- OpenCode.md | 23 +++++++ README.md | 19 ++++++ go.mod | 10 ++- go.sum | 10 +++ internal/config/config.go | 6 +- internal/llm/agent/agent.go | 8 +++ internal/llm/models/anthropic.go | 4 +- internal/llm/models/bootstrap.go | 1 + internal/llm/models/local.go | 33 +++++---- internal/llm/models/vertexai.go | 33 +++++++++ internal/llm/provider/anthropic.go | 59 +++++++++++++--- internal/llm/provider/gemini.go | 8 +++ internal/llm/provider/openai.go | 23 ++----- internal/llm/provider/provider.go | 56 ++++++++++----- internal/llm/provider/vertexai.go | 105 ++++++++++++++++++++++++++++- 15 files changed, 330 insertions(+), 68 deletions(-) create mode 100644 OpenCode.md diff --git a/OpenCode.md b/OpenCode.md new file mode 100644 index 00000000..10dbea3a --- /dev/null +++ b/OpenCode.md @@ -0,0 +1,23 @@ +# OpenCode Development Guide + +## Build/Test Commands +- `go build -o opencode` - Build the project +- `go test ./...` - Run all tests +- `go test ./internal/llm/prompt` - Run specific package tests +- `go test -v ./internal/llm/prompt` - Run single test with verbose output +- `go test -run TestFunctionName` - Run specific test function +- `go vet ./...` - Static analysis +- `go mod tidy` - Clean up dependencies +- `./opencode` - Run the built binary + +## Code Style Guidelines +- **Imports**: Group stdlib, 3rd party, then internal packages with blank lines between groups +- **Naming**: Use camelCase for private, PascalCase for public; descriptive names preferred +- **Types**: Define custom types for clarity (e.g., `type AgentName string`, `type MCPType string`) +- **Constants**: Group related constants in const blocks with descriptive comments +- **Error Handling**: Always handle errors explicitly; use `require.NoError(t, err)` in tests +- **Testing**: Use testify/assert and testify/require; include `t.Parallel()` for parallel tests +- **Comments**: Package comments start with "Package name"; use descriptive function comments +- **Structure**: Follow standard Go project layout with `internal/` for private packages +- **JSON Tags**: Always include json tags for structs that marshal/unmarshal +- **Context**: Pass context.Context as first parameter for functions that need it diff --git a/README.md b/README.md index 273e6f8e..c4704414 100644 --- a/README.md +++ b/README.md @@ -243,6 +243,8 @@ OpenCode supports a variety of AI models from different providers: - Gemini 2.5 - Gemini 2.5 Flash +- Anthropic Sonnet 4 +- Anthropic Opus 4 ## Usage @@ -591,6 +593,23 @@ LOCAL_ENDPOINT=http://localhost:1235/v1 LOCAL_ENDPOINT_API_KEY=secret ``` +### Using LiteLLM Proxy +It is possible to use LiteLLM as a passthrough proxy by providing `baseURL` and auth header to provider configuration: +```json +{ + "providers": { + "vertexai": { + "apiKey": "", + "disabled": false, + "baseURL": "https://localhost/vertex_ai" + "headers": { + "x-litellm-api-key": "litellm-api-key" + } + } + } +} +``` + ### Configuring a self-hosted model You can also configure a self-hosted model in the configuration file under the `agents` section: diff --git a/go.mod b/go.mod index 82994450..f63b8a03 100644 --- a/go.mod +++ b/go.mod @@ -33,9 +33,17 @@ require ( github.com/stretchr/testify v1.10.0 ) +require ( + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/api v0.215.0 // indirect +) + require ( cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth v0.13.0 cloud.google.com/go/compute/metadata v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect diff --git a/go.sum b/go.sum index 8b7e3074..cf58a701 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= @@ -250,6 +252,8 @@ github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= @@ -289,6 +293,8 @@ golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -329,11 +335,15 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.215.0 h1:jdYF4qnyczlEz2ReWIsosNLDuzXyvFHJtI5gcr0J7t0= +google.golang.org/api v0.215.0/go.mod h1:fta3CVtuJYOEdugLNWm6WodzOS8KdFckABwN4I40hzY= google.golang.org/genai v1.3.0 h1:tXhPJF30skOjnnDY7ZnjK3q7IKy4PuAlEA0fk7uEaEI= google.golang.org/genai v1.3.0/go.mod h1:TyfOKRz/QyCaj6f/ZDt505x+YreXnY40l2I6k8TvgqY= google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= diff --git a/internal/config/config.go b/internal/config/config.go index 5a0905bb..b02278ca 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -51,8 +51,10 @@ type Agent struct { // Provider defines configuration for an LLM provider. type Provider struct { - APIKey string `json:"apiKey"` - Disabled bool `json:"disabled"` + APIKey string `json:"apiKey"` + Disabled bool `json:"disabled"` + BaseURL string `json:"baseURL"` + Headers map[string]string `json:"headers,omitempty"` } // Data defines storage configuration. diff --git a/internal/llm/agent/agent.go b/internal/llm/agent/agent.go index 4f31fe75..c899de8e 100644 --- a/internal/llm/agent/agent.go +++ b/internal/llm/agent/agent.go @@ -709,12 +709,20 @@ func createAgentProvider(agentName config.AgentName) (provider.Provider, error) if agentConfig.MaxTokens > 0 { maxTokens = agentConfig.MaxTokens } + opts := []provider.ProviderClientOption{ provider.WithAPIKey(providerCfg.APIKey), provider.WithModel(model), provider.WithSystemMessage(prompt.GetAgentPrompt(agentName, model.Provider)), provider.WithMaxTokens(maxTokens), } + if providerCfg.BaseURL != "" { + opts = append(opts, provider.WithBaseURL(providerCfg.BaseURL)) + } + if len(providerCfg.Headers) != 0 { + opts = append(opts, provider.WithHeaders(providerCfg.Headers)) + } + if model.Provider == models.ProviderOpenAI || model.Provider == models.ProviderLocal && model.CanReason { opts = append( opts, diff --git a/internal/llm/models/anthropic.go b/internal/llm/models/anthropic.go index 9da03a83..3bb31ffe 100644 --- a/internal/llm/models/anthropic.go +++ b/internal/llm/models/anthropic.go @@ -91,7 +91,7 @@ var AnthropicModels = map[ModelID]Model{ CostPer1MOutCached: 0.30, CostPer1MOut: 15.0, ContextWindow: 200000, - DefaultMaxTokens: 50000, + DefaultMaxTokens: 64000, CanReason: true, SupportsAttachments: true, }, @@ -105,7 +105,7 @@ var AnthropicModels = map[ModelID]Model{ CostPer1MOutCached: 1.50, CostPer1MOut: 75.0, ContextWindow: 200000, - DefaultMaxTokens: 4096, + DefaultMaxTokens: 32000, SupportsAttachments: true, }, } diff --git a/internal/llm/models/bootstrap.go b/internal/llm/models/bootstrap.go index 82166b39..5f535f3b 100644 --- a/internal/llm/models/bootstrap.go +++ b/internal/llm/models/bootstrap.go @@ -11,6 +11,7 @@ func init() { maps.Copy(SupportedModels, OpenRouterModels) maps.Copy(SupportedModels, XAIModels) maps.Copy(SupportedModels, VertexAIGeminiModels) + maps.Copy(SupportedModels, VertexAIAnthropicModels) initLocalModels() } diff --git a/internal/llm/models/local.go b/internal/llm/models/local.go index a2b4145f..6520c01e 100644 --- a/internal/llm/models/local.go +++ b/internal/llm/models/local.go @@ -91,7 +91,7 @@ func listLocalModels(modelsEndpoint string) []localModel { if token != "" { req, reqErr := http.NewRequest("GET", modelsEndpoint, nil) if reqErr != nil { - logging.Debug("Failed to create local models request", + logging.Warn("Failed to create local models request", "error", reqErr, "endpoint", modelsEndpoint, ) @@ -102,27 +102,30 @@ func listLocalModels(modelsEndpoint string) []localModel { } else { res, err = http.Get(modelsEndpoint) } - if err != nil { - logging.Debug("Failed to list local models", + if err != nil || res == nil { + logging.Warn("Failed to list local models", "error", err, "endpoint", modelsEndpoint, ) + return nil } defer res.Body.Close() if res.StatusCode != http.StatusOK { - logging.Debug("Failed to list local models", + logging.Warn("Failed to list local models", "status", res.StatusCode, "endpoint", modelsEndpoint, ) + return nil } var modelList localModelList if err = json.NewDecoder(res.Body).Decode(&modelList); err != nil { - logging.Debug("Failed to list local models", + logging.Warn("Failed to list local models", "error", err, "endpoint", modelsEndpoint, ) + return nil } var supportedModels []localModel @@ -172,16 +175,12 @@ func tryResolveSource(localID string) *Model { func convertLocalModel(model localModel, source *Model) Model { if source != nil { - return Model{ - ID: ModelID("local." + model.ID), - Name: source.Name, - Provider: ProviderLocal, - APIModel: model.ID, - ContextWindow: cmp.Or(source.ContextWindow, 4096), - DefaultMaxTokens: cmp.Or(source.DefaultMaxTokens, 4096), - CanReason: source.CanReason, - SupportsAttachments: source.SupportsAttachments, - } + m := *source + m.ID = ModelID("local." + model.ID) + m.Name = source.Name + m.APIModel = model.ID + m.Provider = ProviderLocal + return m } else { return Model{ ID: ModelID("local." + model.ID), @@ -190,8 +189,8 @@ func convertLocalModel(model localModel, source *Model) Model { APIModel: model.ID, ContextWindow: cmp.Or(model.LoadedContextLength, 4096), DefaultMaxTokens: cmp.Or(model.LoadedContextLength, 4096), - CanReason: true, - SupportsAttachments: true, + CanReason: false, + SupportsAttachments: false, } } } diff --git a/internal/llm/models/vertexai.go b/internal/llm/models/vertexai.go index d71dfc0b..0b9ee953 100644 --- a/internal/llm/models/vertexai.go +++ b/internal/llm/models/vertexai.go @@ -6,6 +6,8 @@ const ( // Models VertexAIGemini25Flash ModelID = "vertexai.gemini-2.5-flash" VertexAIGemini25 ModelID = "vertexai.gemini-2.5" + VertexAISonnet4 ModelID = "vertexai.claude-sonnet-4" + VertexAIOpus4 ModelID = "vertexai.claude-opus-4" ) var VertexAIGeminiModels = map[ModelID]Model{ @@ -36,3 +38,34 @@ var VertexAIGeminiModels = map[ModelID]Model{ SupportsAttachments: true, }, } + +var VertexAIAnthropicModels = map[ModelID]Model{ + VertexAISonnet4: { + ID: VertexAISonnet4, + Name: "VertexAI: Claude Sonnet 4", + Provider: ProviderVertexAI, + APIModel: "claude-sonnet-4", + CostPer1MIn: AnthropicModels[Claude4Sonnet].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude4Sonnet].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude4Sonnet].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude4Sonnet].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude4Sonnet].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude4Sonnet].DefaultMaxTokens, + SupportsAttachments: AnthropicModels[Claude4Sonnet].SupportsAttachments, + CanReason: AnthropicModels[Claude4Sonnet].CanReason, + }, + VertexAIOpus4: { + ID: VertexAIOpus4, + Name: "VertexAI: Claude Opus 4", + Provider: ProviderVertexAI, + APIModel: "claude-opus-4@20250514", + CostPer1MIn: AnthropicModels[Claude4Opus].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude4Opus].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude4Opus].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude4Opus].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude4Opus].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude4Opus].DefaultMaxTokens, + SupportsAttachments: AnthropicModels[Claude4Opus].SupportsAttachments, + CanReason: AnthropicModels[Claude4Opus].CanReason, + }, +} diff --git a/internal/llm/provider/anthropic.go b/internal/llm/provider/anthropic.go index badf6a3a..08836f23 100644 --- a/internal/llm/provider/anthropic.go +++ b/internal/llm/provider/anthropic.go @@ -12,6 +12,7 @@ import ( "github.com/anthropics/anthropic-sdk-go" "github.com/anthropics/anthropic-sdk-go/bedrock" "github.com/anthropics/anthropic-sdk-go/option" + sdkoption "github.com/anthropics/anthropic-sdk-go/option" "github.com/opencode-ai/opencode/internal/config" "github.com/opencode-ai/opencode/internal/llm/models" "github.com/opencode-ai/opencode/internal/llm/tools" @@ -20,9 +21,11 @@ import ( ) type anthropicOptions struct { - useBedrock bool - disableCache bool - shouldThink func(userMessage string) bool + useBedrock bool + useVertex bool + vertexOptions vertexOptions + disableCache bool + shouldThink func(userMessage string) bool } type AnthropicOption func(*anthropicOptions) @@ -40,14 +43,43 @@ func newAnthropicClient(opts providerClientOptions) AnthropicClient { for _, o := range opts.anthropicOptions { o(&anthropicOpts) } + resolvedBaseURL := "" anthropicClientOptions := []option.RequestOption{} - if opts.apiKey != "" { - anthropicClientOptions = append(anthropicClientOptions, option.WithAPIKey(opts.apiKey)) - } if anthropicOpts.useBedrock { anthropicClientOptions = append(anthropicClientOptions, bedrock.WithLoadDefaultConfig(context.Background())) } + if anthropicOpts.useVertex { + middleware := vertexMiddleware( + anthropicOpts.vertexOptions.location, + anthropicOpts.vertexOptions.projectID, + ) + anthropicClientOptions = append( + anthropicClientOptions, + sdkoption.WithMiddleware(middleware), + ) + if opts.baseURL == "" { + resolvedBaseURL = fmt.Sprintf("https://%s-aiplatform.googleapis.com/", anthropicOpts.vertexOptions.location) + } else { + resolvedBaseURL = opts.baseURL + } + } + + if opts.headers != nil { + for k, v := range opts.headers { + anthropicClientOptions = append(anthropicClientOptions, option.WithHeader(k, v)) + } + } + if resolvedBaseURL != "" { + anthropicClientOptions = append(anthropicClientOptions, option.WithBaseURL(resolvedBaseURL)) + } else if opts.baseURL != "" { + anthropicClientOptions = append(anthropicClientOptions, option.WithBaseURL(opts.baseURL)) + if opts.apiKey != "" { + anthropicClientOptions = append(anthropicClientOptions, option.WithAuthToken(opts.apiKey)) + } + } else if opts.apiKey != "" { + anthropicClientOptions = append(anthropicClientOptions, option.WithAPIKey(opts.apiKey)) + } client := anthropic.NewClient(anthropicClientOptions...) return &anthropicClient{ @@ -248,8 +280,8 @@ func (a *anthropicClient) stream(ctx context.Context, messages []message.Message preparedMessages := a.preparedMessages(a.convertMessages(messages), a.convertTools(tools)) cfg := config.Get() if cfg.Debug { - // jsonData, _ := json.Marshal(preparedMessages) - // logging.Debug("Prepared messages", "messages", string(jsonData)) + jsonData, _ := json.Marshal(preparedMessages) + logging.Debug("Prepared messages", "messages", string(jsonData)) } attempts := 0 eventChan := make(chan ProviderEvent) @@ -439,6 +471,9 @@ func (a *anthropicClient) usage(msg anthropic.Message) TokenUsage { func WithAnthropicBedrock(useBedrock bool) AnthropicOption { return func(options *anthropicOptions) { + if useBedrock { + options.useVertex = false + } options.useBedrock = useBedrock } } @@ -458,3 +493,11 @@ func WithAnthropicShouldThinkFn(fn func(string) bool) AnthropicOption { options.shouldThink = fn } } + +func WithVertexAI(projectID, localtion string) AnthropicOption { + return func(options *anthropicOptions) { + options.useVertex = true + options.useBedrock = false + options.vertexOptions = vertexOptions{projectID: projectID, location: localtion} + } +} diff --git a/internal/llm/provider/gemini.go b/internal/llm/provider/gemini.go index ebc36119..c7835c77 100644 --- a/internal/llm/provider/gemini.go +++ b/internal/llm/provider/gemini.go @@ -183,6 +183,11 @@ func (g *geminiClient) send(ctx context.Context, messages []message.Message, too Parts: []*genai.Part{{Text: g.providerOptions.systemMessage}}, }, } + if len(g.providerOptions.headers) != 0 { + config.HTTPOptions = &genai.HTTPOptions{ + Headers: *g.providerOptions.asHeader(), + } + } if len(tools) > 0 { config.Tools = g.convertTools(tools) } @@ -281,6 +286,9 @@ func (g *geminiClient) stream(ctx context.Context, messages []message.Message, t go func() { defer close(eventChan) + defer logging.RecoverPanic("gemini-client", func() { + logging.ErrorPersist("gemini client has failed") + }) for { attempts++ diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 8a561c77..6310ac7d 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -19,10 +19,8 @@ import ( ) type openaiOptions struct { - baseURL string disableCache bool reasoningEffort string - extraHeaders map[string]string } type OpenAIOption func(*openaiOptions) @@ -47,12 +45,11 @@ func newOpenAIClient(opts providerClientOptions) OpenAIClient { if opts.apiKey != "" { openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey)) } - if openaiOpts.baseURL != "" { - openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(openaiOpts.baseURL)) + if opts.baseURL != "" { + openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(opts.baseURL)) } - - if openaiOpts.extraHeaders != nil { - for key, value := range openaiOpts.extraHeaders { + if opts.headers != nil { + for key, value := range opts.headers { openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value)) } } @@ -393,18 +390,6 @@ func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage { } } -func WithOpenAIBaseURL(baseURL string) OpenAIOption { - return func(options *openaiOptions) { - options.baseURL = baseURL - } -} - -func WithOpenAIExtraHeaders(headers map[string]string) OpenAIOption { - return func(options *openaiOptions) { - options.extraHeaders = headers - } -} - func WithOpenAIDisableCache() OpenAIOption { return func(options *openaiOptions) { options.disableCache = true diff --git a/internal/llm/provider/provider.go b/internal/llm/provider/provider.go index 08175450..1ccdcd9e 100644 --- a/internal/llm/provider/provider.go +++ b/internal/llm/provider/provider.go @@ -3,6 +3,7 @@ package provider import ( "context" "fmt" + "net/http" "os" "github.com/opencode-ai/opencode/internal/llm/models" @@ -63,6 +64,8 @@ type providerClientOptions struct { model models.Model maxTokens int64 systemMessage string + baseURL string + headers map[string]string anthropicOptions []AnthropicOption openaiOptions []OpenAIOption @@ -70,6 +73,17 @@ type providerClientOptions struct { bedrockOptions []BedrockOption } +func (opts *providerClientOptions) asHeader() *http.Header { + header := http.Header{} + if opts.headers == nil { + return &header + } + for k, v := range opts.headers { + header.Add(k, v) + } + return &header +} + type ProviderClientOption func(*providerClientOptions) type ProviderClient interface { @@ -109,9 +123,9 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption client: newBedrockClient(clientOptions), }, nil case models.ProviderGROQ: - clientOptions.openaiOptions = append(clientOptions.openaiOptions, - WithOpenAIBaseURL("https://api.groq.com/openai/v1"), - ) + if clientOptions.baseURL == "" { + clientOptions.baseURL = "https://api.groq.com/openai/v1" + } return &baseProvider[OpenAIClient]{ options: clientOptions, client: newOpenAIClient(clientOptions), @@ -127,29 +141,27 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption client: newVertexAIClient(clientOptions), }, nil case models.ProviderOpenRouter: - clientOptions.openaiOptions = append(clientOptions.openaiOptions, - WithOpenAIBaseURL("https://openrouter.ai/api/v1"), - WithOpenAIExtraHeaders(map[string]string{ - "HTTP-Referer": "opencode.ai", - "X-Title": "OpenCode", - }), - ) + if clientOptions.baseURL == "" { + clientOptions.baseURL = "https://openrouter.ai/api/v1" + } + clientOptions.headers["HTTP-Referer"] = "opencode.ai" + clientOptions.headers["X-Title"] = "OpenCode" return &baseProvider[OpenAIClient]{ options: clientOptions, client: newOpenAIClient(clientOptions), }, nil case models.ProviderXAI: - clientOptions.openaiOptions = append(clientOptions.openaiOptions, - WithOpenAIBaseURL("https://api.x.ai/v1"), - ) + if clientOptions.baseURL == "" { + clientOptions.baseURL = "https://api.x.ai/v1" + } return &baseProvider[OpenAIClient]{ options: clientOptions, client: newOpenAIClient(clientOptions), }, nil case models.ProviderLocal: - clientOptions.openaiOptions = append(clientOptions.openaiOptions, - WithOpenAIBaseURL(os.Getenv("LOCAL_ENDPOINT")), - ) + if clientOptions.baseURL == "" { + clientOptions.baseURL = os.Getenv("LOCAL_ENDPOINT") + } return &baseProvider[OpenAIClient]{ options: clientOptions, client: newOpenAIClient(clientOptions), @@ -186,6 +198,18 @@ func (p *baseProvider[C]) StreamResponse(ctx context.Context, messages []message return p.client.stream(ctx, messages, tools) } +func WithBaseURL(baseURL string) ProviderClientOption { + return func(options *providerClientOptions) { + options.baseURL = baseURL + } +} + +func WithHeaders(headers map[string]string) ProviderClientOption { + return func(options *providerClientOptions) { + options.headers = headers + } +} + func WithAPIKey(apiKey string) ProviderClientOption { return func(options *providerClientOptions) { options.apiKey = apiKey diff --git a/internal/llm/provider/vertexai.go b/internal/llm/provider/vertexai.go index 2a13a957..170f0863 100644 --- a/internal/llm/provider/vertexai.go +++ b/internal/llm/provider/vertexai.go @@ -1,34 +1,133 @@ package provider import ( + "bytes" "context" + "fmt" + "io" + "net/http" "os" + "strings" + "cloud.google.com/go/auth" + sdkoption "github.com/anthropics/anthropic-sdk-go/option" + "github.com/anthropics/anthropic-sdk-go/vertex" + "github.com/opencode-ai/opencode/internal/llm/models" "github.com/opencode-ai/opencode/internal/logging" + + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" + "golang.org/x/oauth2" "google.golang.org/genai" ) type VertexAIClient ProviderClient +type vertexOptions struct { + projectID string + location string +} + +type mockTokenSoure struct{} + +func (s *mockTokenSoure) Token() (*oauth2.Token, error) { + return &oauth2.Token{}, nil +} + func newVertexAIClient(opts providerClientOptions) VertexAIClient { + for k := range models.VertexAIAnthropicModels { + if k == opts.model.ID { + logging.Info("Using Anthropic client with VertexAI provider", "model", k) + opts.anthropicOptions = []AnthropicOption{ + WithVertexAI(os.Getenv("VERTEXAI_PROJECT"), os.Getenv("VERTEXAI_LOCATION")), + } + return newAnthropicClient(opts) + } + } + geminiOpts := geminiOptions{} for _, o := range opts.geminiOptions { o(&geminiOpts) } - - client, err := genai.NewClient(context.Background(), &genai.ClientConfig{ + genaiConfig := &genai.ClientConfig{ Project: os.Getenv("VERTEXAI_PROJECT"), Location: os.Getenv("VERTEXAI_LOCATION"), Backend: genai.BackendVertexAI, - }) + } + + // HACK: assume litellm proxy, provide an excplicit way to define proxy-type + if opts.baseURL != "" { + genaiConfig.HTTPOptions = genai.HTTPOptions{ + BaseURL: opts.baseURL, + Headers: *opts.asHeader(), + } + genaiConfig.Credentials = &auth.Credentials{} + } + + client, err := genai.NewClient(context.Background(), genaiConfig) if err != nil { logging.Error("Failed to create VertexAI client", "error", err) return nil } + logging.Info("Using Gemini client with VertexAI provider", "model", opts.model.ID) return &geminiClient{ providerOptions: opts, options: geminiOpts, client: client, } } + +// NOTE: copied from (here)[github.com/anthropics/anthropic-sdk-go/vertex] +func vertexMiddleware(region, projectID string) sdkoption.Middleware { + return func(r *http.Request, next sdkoption.MiddlewareNext) (*http.Response, error) { + if r.Body != nil { + body, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + r.Body.Close() + + if !gjson.GetBytes(body, "anthropic_version").Exists() { + body, _ = sjson.SetBytes(body, "anthropic_version", vertex.DefaultVersion) + } + if strings.HasSuffix(r.URL.Path, "/v1/messages") && r.Method == http.MethodPost { + logging.Debug("vertext_ai message path", "path", r.URL.Path) + if projectID == "" { + return nil, fmt.Errorf("no projectId was given and it could not be resolved from credentials") + } + + model := gjson.GetBytes(body, "model").String() + stream := gjson.GetBytes(body, "stream").Bool() + + body, _ = sjson.DeleteBytes(body, "model") + + specifier := "rawPredict" + if stream { + specifier = "streamRawPredict" + } + newPath := fmt.Sprintf("/v1/projects/%s/locations/%s/publishers/anthropic/models/%s:%s", projectID, region, model, specifier) + r.URL.Path = strings.ReplaceAll(r.URL.Path, "/v1/messages", newPath) + } + + if strings.HasSuffix(r.URL.Path, "/v1/messages/count_tokensg") && r.Method == http.MethodPost { + if projectID == "" { + return nil, fmt.Errorf("no projectId was given and it could not be resolved from credentials") + } + + newPath := fmt.Sprintf("/v1/projects/%s/locations/%s/publishers/anthropic/models/count-tokens:rawPredict", projectID, region) + r.URL.Path = strings.ReplaceAll(r.URL.Path, "/v1/messages/count_tokensg", newPath) + } + + reader := bytes.NewReader(body) + r.Body = io.NopCloser(reader) + r.GetBody = func() (io.ReadCloser, error) { + _, err := reader.Seek(0, 0) + return io.NopCloser(reader), err + } + r.ContentLength = int64(len(body)) + } + + return next(r) + } +} From 95513ba7edc5a091389c2cde7c3a8560860a46c6 Mon Sep 17 00:00:00 2001 From: Artem Obukhov Date: Fri, 20 Jun 2025 21:07:37 +0400 Subject: [PATCH 3/8] fix(vertexai): make proxy intention clear --- README.md | 2 +- internal/llm/provider/vertexai.go | 29 ++++++++++++++++------------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index c4704414..a5cfcd91 100644 --- a/README.md +++ b/README.md @@ -599,7 +599,7 @@ It is possible to use LiteLLM as a passthrough proxy by providing `baseURL` and { "providers": { "vertexai": { - "apiKey": "", + "apiKey": "litellm-api-key", "disabled": false, "baseURL": "https://localhost/vertex_ai" "headers": { diff --git a/internal/llm/provider/vertexai.go b/internal/llm/provider/vertexai.go index 170f0863..9d0c3515 100644 --- a/internal/llm/provider/vertexai.go +++ b/internal/llm/provider/vertexai.go @@ -17,7 +17,6 @@ import ( "github.com/tidwall/gjson" "github.com/tidwall/sjson" - "golang.org/x/oauth2" "google.golang.org/genai" ) @@ -28,12 +27,6 @@ type vertexOptions struct { location string } -type mockTokenSoure struct{} - -func (s *mockTokenSoure) Token() (*oauth2.Token, error) { - return &oauth2.Token{}, nil -} - func newVertexAIClient(opts providerClientOptions) VertexAIClient { for k := range models.VertexAIAnthropicModels { if k == opts.model.ID { @@ -55,13 +48,23 @@ func newVertexAIClient(opts providerClientOptions) VertexAIClient { Backend: genai.BackendVertexAI, } - // HACK: assume litellm proxy, provide an excplicit way to define proxy-type if opts.baseURL != "" { - genaiConfig.HTTPOptions = genai.HTTPOptions{ - BaseURL: opts.baseURL, - Headers: *opts.asHeader(), + if opts.headers != nil { + header := opts.asHeader() + // HACK: assume litellm proxy, provide an excplicit way to define proxy-type + if h := header.Get("x-litellm-api-key"); h != "" { + // has to be empty to pass genai validation check with empty creds, auth handled by LiteLLM + genaiConfig.Credentials = &auth.Credentials{} + } + genaiConfig.HTTPOptions = genai.HTTPOptions{ + BaseURL: opts.baseURL, + Headers: *header, + } + } else { + genaiConfig.HTTPOptions = genai.HTTPOptions{ + BaseURL: opts.baseURL, + } } - genaiConfig.Credentials = &auth.Credentials{} } client, err := genai.NewClient(context.Background(), genaiConfig) @@ -78,7 +81,7 @@ func newVertexAIClient(opts providerClientOptions) VertexAIClient { } } -// NOTE: copied from (here)[github.com/anthropics/anthropic-sdk-go/vertex] +// NOTE: copied from (here)[github.com/anthropics/anthropic-sdk-go/vertex] to make LiteLLM passthrough work func vertexMiddleware(region, projectID string) sdkoption.Middleware { return func(r *http.Request, next sdkoption.MiddlewareNext) (*http.Response, error) { if r.Body != nil { From af5c2e1f745c6a5c09903fb7241bc0a38e947a59 Mon Sep 17 00:00:00 2001 From: Artem Obukhov Date: Mon, 23 Jun 2025 08:28:07 +0400 Subject: [PATCH 4/8] chore(schema):add missing fields --- cmd/schema/main.go | 11 +++ opencode-schema.json | 211 +++++++++++++++++++++++-------------------- 2 files changed, 124 insertions(+), 98 deletions(-) diff --git a/cmd/schema/main.go b/cmd/schema/main.go index 429267bc..f9b7505d 100644 --- a/cmd/schema/main.go +++ b/cmd/schema/main.go @@ -186,6 +186,17 @@ func generateSchema() map[string]any { "description": "Whether the provider is disabled", "default": false, }, + "baseURL": map[string]any{ + "type": "string", + "description": "Base URL for the provider instead of default one", + }, + "headers": map[string]any{ + "type": "object", + "description": "Extra headers to attach to request", + "additionalProperties": map[string]any{ + "type": "string", + }, + }, }, }, } diff --git a/opencode-schema.json b/opencode-schema.json index dc139fda..035cf141 100644 --- a/opencode-schema.json +++ b/opencode-schema.json @@ -12,72 +12,74 @@ "model": { "description": "Model ID for the agent", "enum": [ - "gpt-4.1", - "llama-3.3-70b-versatile", - "azure.gpt-4.1", + "meta-llama/llama-4-scout-17b-16e-instruct", + "azure.o1-mini", + "azure.gpt-4.1-nano", + "openrouter.o3-mini", + "azure.gpt-4o-mini", "openrouter.gpt-4o", - "openrouter.o1-mini", "openrouter.claude-3-haiku", + "openrouter.gpt-4.1-nano", + "grok-3-mini-beta", "claude-3-opus", - "gpt-4o", + "o4-mini", "gpt-4o-mini", "o1", - "meta-llama/llama-4-maverick-17b-128e-instruct", - "azure.o3-mini", + "gpt-4.5-preview", + "o3-mini", + "gemini-2.5", "openrouter.gpt-4o-mini", - "openrouter.o1", + "grok-3-mini-fast-beta", "claude-3.5-haiku", - "o4-mini", - "azure.gpt-4.1-mini", - "openrouter.o3", - "grok-3-beta", - "o3-mini", - "qwen-qwq", - "azure.o1", - "openrouter.gemini-2.5-flash", - "openrouter.gemini-2.5", "o1-mini", - "azure.gpt-4o", - "openrouter.gpt-4.1-mini", - "openrouter.claude-3.5-sonnet", - "openrouter.o3-mini", - "gpt-4.1-mini", - "gpt-4.5-preview", - "gpt-4.1-nano", - "deepseek-r1-distill-llama-70b", - "azure.gpt-4o-mini", - "openrouter.gpt-4.1", - "bedrock.claude-3.7-sonnet", + "gpt-4.1", + "openrouter.claude-3.5-haiku", "claude-3-haiku", - "o3", - "gemini-2.0-flash-lite", + "gemini-2.0-flash", "azure.o3", "azure.gpt-4.5-preview", + "openrouter.o4-mini", + "vertexai.claude-sonnet-4", + "bedrock.claude-3.7-sonnet", + "claude-4-opus", + "azure.gpt-4o", + "azure.gpt-4.1", "openrouter.claude-3-opus", - "grok-3-mini-fast-beta", - "claude-4-sonnet", + "openrouter.gemini-2.5", + "vertexai.gemini-2.5", + "deepseek-r1-distill-llama-70b", + "openrouter.gpt-4.1", + "openrouter.gemini-2.5-flash", + "azure.o1", + "openrouter.gpt-4.5-preview", + "vertexai.gemini-2.5-flash", + "vertexai.claude-opus-4", "azure.o4-mini", + "openrouter.o3", + "grok-3-beta", "grok-3-fast-beta", - "claude-3.5-sonnet", - "azure.o1-mini", + "o3", + "gpt-4.1-mini", + "openrouter.claude-3.5-sonnet", + "openrouter.o1-mini", + "o1-pro", + "gpt-4.1-nano", + "llama-3.3-70b-versatile", + "azure.o3-mini", + "azure.gpt-4.1-mini", + "openrouter.gpt-4.1-mini", + "openrouter.o1-pro", "openrouter.claude-3.7-sonnet", - "openrouter.gpt-4.5-preview", - "grok-3-mini-beta", + "claude-3.5-sonnet", + "gemini-2.5-flash", + "meta-llama/llama-4-maverick-17b-128e-instruct", + "openrouter.o1", + "gpt-4o", + "claude-4-sonnet", "claude-3.7-sonnet", - "gemini-2.0-flash", + "qwen-qwq", "openrouter.deepseek-r1-free", - "vertexai.gemini-2.5-flash", - "vertexai.gemini-2.5", - "o1-pro", - "gemini-2.5", - "meta-llama/llama-4-scout-17b-16e-instruct", - "azure.gpt-4.1-nano", - "openrouter.gpt-4.1-nano", - "gemini-2.5-flash", - "openrouter.o4-mini", - "openrouter.claude-3.5-haiku", - "claude-4-opus", - "openrouter.o1-pro" + "gemini-2.0-flash-lite" ], "type": "string" }, @@ -111,72 +113,74 @@ "model": { "description": "Model ID for the agent", "enum": [ - "gpt-4.1", - "llama-3.3-70b-versatile", - "azure.gpt-4.1", + "meta-llama/llama-4-scout-17b-16e-instruct", + "azure.o1-mini", + "azure.gpt-4.1-nano", + "openrouter.o3-mini", + "azure.gpt-4o-mini", "openrouter.gpt-4o", - "openrouter.o1-mini", "openrouter.claude-3-haiku", + "openrouter.gpt-4.1-nano", + "grok-3-mini-beta", "claude-3-opus", - "gpt-4o", + "o4-mini", "gpt-4o-mini", "o1", - "meta-llama/llama-4-maverick-17b-128e-instruct", - "azure.o3-mini", + "gpt-4.5-preview", + "o3-mini", + "gemini-2.5", "openrouter.gpt-4o-mini", - "openrouter.o1", + "grok-3-mini-fast-beta", "claude-3.5-haiku", - "o4-mini", - "azure.gpt-4.1-mini", - "openrouter.o3", - "grok-3-beta", - "o3-mini", - "qwen-qwq", - "azure.o1", - "openrouter.gemini-2.5-flash", - "openrouter.gemini-2.5", "o1-mini", - "azure.gpt-4o", - "openrouter.gpt-4.1-mini", - "openrouter.claude-3.5-sonnet", - "openrouter.o3-mini", - "gpt-4.1-mini", - "gpt-4.5-preview", - "gpt-4.1-nano", - "deepseek-r1-distill-llama-70b", - "azure.gpt-4o-mini", - "openrouter.gpt-4.1", - "bedrock.claude-3.7-sonnet", + "gpt-4.1", + "openrouter.claude-3.5-haiku", "claude-3-haiku", - "o3", - "gemini-2.0-flash-lite", + "gemini-2.0-flash", "azure.o3", "azure.gpt-4.5-preview", + "openrouter.o4-mini", + "vertexai.claude-sonnet-4", + "bedrock.claude-3.7-sonnet", + "claude-4-opus", + "azure.gpt-4o", + "azure.gpt-4.1", "openrouter.claude-3-opus", - "grok-3-mini-fast-beta", - "claude-4-sonnet", + "openrouter.gemini-2.5", + "vertexai.gemini-2.5", + "deepseek-r1-distill-llama-70b", + "openrouter.gpt-4.1", + "openrouter.gemini-2.5-flash", + "azure.o1", + "openrouter.gpt-4.5-preview", + "vertexai.gemini-2.5-flash", + "vertexai.claude-opus-4", "azure.o4-mini", + "openrouter.o3", + "grok-3-beta", "grok-3-fast-beta", - "claude-3.5-sonnet", - "azure.o1-mini", + "o3", + "gpt-4.1-mini", + "openrouter.claude-3.5-sonnet", + "openrouter.o1-mini", + "o1-pro", + "gpt-4.1-nano", + "llama-3.3-70b-versatile", + "azure.o3-mini", + "azure.gpt-4.1-mini", + "openrouter.gpt-4.1-mini", + "openrouter.o1-pro", "openrouter.claude-3.7-sonnet", - "openrouter.gpt-4.5-preview", - "grok-3-mini-beta", + "claude-3.5-sonnet", + "gemini-2.5-flash", + "meta-llama/llama-4-maverick-17b-128e-instruct", + "openrouter.o1", + "gpt-4o", + "claude-4-sonnet", "claude-3.7-sonnet", - "gemini-2.0-flash", + "qwen-qwq", "openrouter.deepseek-r1-free", - "vertexai.gemini-2.5-flash", - "vertexai.gemini-2.5", - "o1-pro", - "gemini-2.5", - "meta-llama/llama-4-scout-17b-16e-instruct", - "azure.gpt-4.1-nano", - "openrouter.gpt-4.1-nano", - "gemini-2.5-flash", - "openrouter.o4-mini", - "openrouter.claude-3.5-haiku", - "claude-4-opus", - "openrouter.o1-pro" + "gemini-2.0-flash-lite" ], "type": "string" }, @@ -345,11 +349,22 @@ "description": "API key for the provider", "type": "string" }, + "baseURL": { + "description": "Base URL for the provider instead of default one", + "type": "string" + }, "disabled": { "default": false, "description": "Whether the provider is disabled", "type": "boolean" }, + "headers": { + "additionalProperties": { + "type": "string" + }, + "description": "Extra headers to attach to request", + "type": "object" + }, "provider": { "description": "Provider type", "enum": [ From 2f71f46b248d842d4e04b643778cf542f90b7a82 Mon Sep 17 00:00:00 2001 From: Artem Obukhov Date: Fri, 27 Jun 2025 14:09:55 +0400 Subject: [PATCH 5/8] fix(gitignore): remove conflict --- .gitignore | 3 --- OpenCode.md | 23 ----------------------- 2 files changed, 26 deletions(-) delete mode 100644 OpenCode.md diff --git a/.gitignore b/.gitignore index 7273b9f8..f2ed9ae1 100644 --- a/.gitignore +++ b/.gitignore @@ -44,8 +44,5 @@ Thumbs.db .opencode/ opencode -<<<<<<< HEAD .aider* -======= opencode.md ->>>>>>> upstream/main diff --git a/OpenCode.md b/OpenCode.md deleted file mode 100644 index 10dbea3a..00000000 --- a/OpenCode.md +++ /dev/null @@ -1,23 +0,0 @@ -# OpenCode Development Guide - -## Build/Test Commands -- `go build -o opencode` - Build the project -- `go test ./...` - Run all tests -- `go test ./internal/llm/prompt` - Run specific package tests -- `go test -v ./internal/llm/prompt` - Run single test with verbose output -- `go test -run TestFunctionName` - Run specific test function -- `go vet ./...` - Static analysis -- `go mod tidy` - Clean up dependencies -- `./opencode` - Run the built binary - -## Code Style Guidelines -- **Imports**: Group stdlib, 3rd party, then internal packages with blank lines between groups -- **Naming**: Use camelCase for private, PascalCase for public; descriptive names preferred -- **Types**: Define custom types for clarity (e.g., `type AgentName string`, `type MCPType string`) -- **Constants**: Group related constants in const blocks with descriptive comments -- **Error Handling**: Always handle errors explicitly; use `require.NoError(t, err)` in tests -- **Testing**: Use testify/assert and testify/require; include `t.Parallel()` for parallel tests -- **Comments**: Package comments start with "Package name"; use descriptive function comments -- **Structure**: Follow standard Go project layout with `internal/` for private packages -- **JSON Tags**: Always include json tags for structs that marshal/unmarshal -- **Context**: Pass context.Context as first parameter for functions that need it From 9b052993489578aab3e6f6d4c1b1a910255d7561 Mon Sep 17 00:00:00 2001 From: Artem Obukhov Date: Fri, 27 Jun 2025 14:12:39 +0400 Subject: [PATCH 6/8] fix(models):add copilot to boot --- internal/llm/models/bootstrap.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/llm/models/bootstrap.go b/internal/llm/models/bootstrap.go index 5f535f3b..c4eb0a88 100644 --- a/internal/llm/models/bootstrap.go +++ b/internal/llm/models/bootstrap.go @@ -12,6 +12,7 @@ func init() { maps.Copy(SupportedModels, XAIModels) maps.Copy(SupportedModels, VertexAIGeminiModels) maps.Copy(SupportedModels, VertexAIAnthropicModels) + maps.Copy(SupportedModels, CopilotModels) initLocalModels() } From ccc8b12d9e9876d4f3d1d440ac23b3a742e8680b Mon Sep 17 00:00:00 2001 From: "Artem Obukhov (aider)" Date: Fri, 27 Jun 2025 14:58:24 +0400 Subject: [PATCH 7/8] fix(local): adjust logging --- internal/llm/models/local.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/llm/models/local.go b/internal/llm/models/local.go index db6cfe82..d35043a8 100644 --- a/internal/llm/models/local.go +++ b/internal/llm/models/local.go @@ -91,7 +91,7 @@ func listLocalModels(modelsEndpoint string) []localModel { if token != "" { req, reqErr := http.NewRequest("GET", modelsEndpoint, nil) if reqErr != nil { - logging.Warn("Failed to create local models request", + logging.Debug("Failed to create local models request", "error", reqErr, "endpoint", modelsEndpoint, ) @@ -103,7 +103,7 @@ func listLocalModels(modelsEndpoint string) []localModel { res, err = http.Get(modelsEndpoint) } if err != nil || res == nil { - logging.Warn("Failed to list local models", + logging.Debug("Failed to list local models", "error", err, "endpoint", modelsEndpoint, ) @@ -112,7 +112,7 @@ func listLocalModels(modelsEndpoint string) []localModel { defer res.Body.Close() if res.StatusCode != http.StatusOK { - logging.Warn("Failed to list local models", + logging.Debug("Failed to list local models", "status", res.StatusCode, "endpoint", modelsEndpoint, ) @@ -121,7 +121,7 @@ func listLocalModels(modelsEndpoint string) []localModel { var modelList localModelList if err = json.NewDecoder(res.Body).Decode(&modelList); err != nil { - logging.Warn("Failed to list local models", + logging.Debug("Failed to list local models", "error", err, "endpoint", modelsEndpoint, ) From 633d2d619ffc5b06d713f9460ff464d3e196dcc9 Mon Sep 17 00:00:00 2001 From: Artem Obukhov Date: Sun, 29 Jun 2025 15:56:42 +0400 Subject: [PATCH 8/8] fix(vertex):gemini pass token to avoid npe --- internal/llm/provider/gemini.go | 9 +++++++++ internal/llm/provider/vertexai.go | 11 ++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/internal/llm/provider/gemini.go b/internal/llm/provider/gemini.go index c7835c77..fa7cfd27 100644 --- a/internal/llm/provider/gemini.go +++ b/internal/llm/provider/gemini.go @@ -9,6 +9,7 @@ import ( "strings" "time" + "cloud.google.com/go/auth" "github.com/google/uuid" "github.com/opencode-ai/opencode/internal/config" "github.com/opencode-ai/opencode/internal/llm/tools" @@ -31,6 +32,14 @@ type geminiClient struct { type GeminiClient ProviderClient +type tokenProvider struct { + value string +} + +func (p *tokenProvider) Token(context.Context) (*auth.Token, error) { + return &auth.Token{Value: p.value}, nil +} + func newGeminiClient(opts providerClientOptions) GeminiClient { geminiOpts := geminiOptions{} for _, o := range opts.geminiOptions { diff --git a/internal/llm/provider/vertexai.go b/internal/llm/provider/vertexai.go index 9d0c3515..c1d9f981 100644 --- a/internal/llm/provider/vertexai.go +++ b/internal/llm/provider/vertexai.go @@ -51,11 +51,6 @@ func newVertexAIClient(opts providerClientOptions) VertexAIClient { if opts.baseURL != "" { if opts.headers != nil { header := opts.asHeader() - // HACK: assume litellm proxy, provide an excplicit way to define proxy-type - if h := header.Get("x-litellm-api-key"); h != "" { - // has to be empty to pass genai validation check with empty creds, auth handled by LiteLLM - genaiConfig.Credentials = &auth.Credentials{} - } genaiConfig.HTTPOptions = genai.HTTPOptions{ BaseURL: opts.baseURL, Headers: *header, @@ -67,6 +62,12 @@ func newVertexAIClient(opts providerClientOptions) VertexAIClient { } } + if opts.apiKey != "" { + genaiConfig.Credentials = &auth.Credentials{ + TokenProvider: &tokenProvider{value: opts.apiKey}, + } + } + client, err := genai.NewClient(context.Background(), genaiConfig) if err != nil { logging.Error("Failed to create VertexAI client", "error", err)