diff --git a/.gitignore b/.gitignore index 3a206a7f..f2ed9ae1 100644 --- a/.gitignore +++ b/.gitignore @@ -44,4 +44,5 @@ Thumbs.db .opencode/ opencode +.aider* opencode.md diff --git a/README.md b/README.md index eee06acd..9d35add2 100644 --- a/README.md +++ b/README.md @@ -269,6 +269,8 @@ OpenCode supports a variety of AI models from different providers: - Gemini 2.5 - Gemini 2.5 Flash +- Anthropic Sonnet 4 +- Anthropic Opus 4 ## Usage @@ -335,6 +337,7 @@ The output format is implemented as a strongly-typed `OutputFormat` in the codeb | `?` | Toggle help dialog (when not in editing mode) | | `Ctrl+L` | View logs | | `Ctrl+A` | Switch session | +| `Ctrl+P` | Prune session | | `Ctrl+K` | Command dialog | | `Ctrl+O` | Toggle model selection dialog | | `Esc` | Close current overlay/dialog or return to previous mode | @@ -626,11 +629,30 @@ This is useful for developers who want to experiment with custom models. ### Configuring a self-hosted provider -You can use a self-hosted model by setting the `LOCAL_ENDPOINT` environment variable. -This will cause OpenCode to load and use the models from the specified endpoint. +You can use a self-hosted model by setting the `LOCAL_ENDPOINT` and `LOCAL_ENDPOINT_API_KEY` environment variable. +This will cause OpenCode to load and use the models from the specified endpoint. When it loads model it tries to inherit settings +from predefined ones if possible. ```bash LOCAL_ENDPOINT=http://localhost:1235/v1 +LOCAL_ENDPOINT_API_KEY=secret +``` + +### Using LiteLLM Proxy +It is possible to use LiteLLM as a passthrough proxy by providing `baseURL` and auth header to provider configuration: +```json +{ + "providers": { + "vertexai": { + "apiKey": "litellm-api-key", + "disabled": false, + "baseURL": "https://localhost/vertex_ai" + "headers": { + "x-litellm-api-key": "litellm-api-key" + } + } + } +} ``` ### Configuring a self-hosted model diff --git a/cmd/schema/main.go b/cmd/schema/main.go index 429267bc..f9b7505d 100644 --- a/cmd/schema/main.go +++ b/cmd/schema/main.go @@ -186,6 +186,17 @@ func generateSchema() map[string]any { "description": "Whether the provider is disabled", "default": false, }, + "baseURL": map[string]any{ + "type": "string", + "description": "Base URL for the provider instead of default one", + }, + "headers": map[string]any{ + "type": "object", + "description": "Extra headers to attach to request", + "additionalProperties": map[string]any{ + "type": "string", + }, + }, }, }, } diff --git a/go.mod b/go.mod index 82994450..f63b8a03 100644 --- a/go.mod +++ b/go.mod @@ -33,9 +33,17 @@ require ( github.com/stretchr/testify v1.10.0 ) +require ( + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/api v0.215.0 // indirect +) + require ( cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth v0.13.0 cloud.google.com/go/compute/metadata v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect diff --git a/go.sum b/go.sum index 8b7e3074..cf58a701 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= @@ -250,6 +252,8 @@ github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= @@ -289,6 +293,8 @@ golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -329,11 +335,15 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.215.0 h1:jdYF4qnyczlEz2ReWIsosNLDuzXyvFHJtI5gcr0J7t0= +google.golang.org/api v0.215.0/go.mod h1:fta3CVtuJYOEdugLNWm6WodzOS8KdFckABwN4I40hzY= google.golang.org/genai v1.3.0 h1:tXhPJF30skOjnnDY7ZnjK3q7IKy4PuAlEA0fk7uEaEI= google.golang.org/genai v1.3.0/go.mod h1:TyfOKRz/QyCaj6f/ZDt505x+YreXnY40l2I6k8TvgqY= google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= diff --git a/internal/config/config.go b/internal/config/config.go index 630fac9b..277f3958 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -52,8 +52,10 @@ type Agent struct { // Provider defines configuration for an LLM provider. type Provider struct { - APIKey string `json:"apiKey"` - Disabled bool `json:"disabled"` + APIKey string `json:"apiKey"` + Disabled bool `json:"disabled"` + BaseURL string `json:"baseURL"` + Headers map[string]string `json:"headers,omitempty"` } // Data defines storage configuration. diff --git a/internal/llm/agent/agent.go b/internal/llm/agent/agent.go index 20b10fd3..fc436eb1 100644 --- a/internal/llm/agent/agent.go +++ b/internal/llm/agent/agent.go @@ -725,12 +725,20 @@ func createAgentProvider(agentName config.AgentName) (provider.Provider, error) if agentConfig.MaxTokens > 0 { maxTokens = agentConfig.MaxTokens } + opts := []provider.ProviderClientOption{ provider.WithAPIKey(providerCfg.APIKey), provider.WithModel(model), provider.WithSystemMessage(prompt.GetAgentPrompt(agentName, model.Provider)), provider.WithMaxTokens(maxTokens), } + if providerCfg.BaseURL != "" { + opts = append(opts, provider.WithBaseURL(providerCfg.BaseURL)) + } + if len(providerCfg.Headers) != 0 { + opts = append(opts, provider.WithHeaders(providerCfg.Headers)) + } + if model.Provider == models.ProviderOpenAI || model.Provider == models.ProviderLocal && model.CanReason { opts = append( opts, diff --git a/internal/llm/models/anthropic.go b/internal/llm/models/anthropic.go index 9da03a83..3bb31ffe 100644 --- a/internal/llm/models/anthropic.go +++ b/internal/llm/models/anthropic.go @@ -91,7 +91,7 @@ var AnthropicModels = map[ModelID]Model{ CostPer1MOutCached: 0.30, CostPer1MOut: 15.0, ContextWindow: 200000, - DefaultMaxTokens: 50000, + DefaultMaxTokens: 64000, CanReason: true, SupportsAttachments: true, }, @@ -105,7 +105,7 @@ var AnthropicModels = map[ModelID]Model{ CostPer1MOutCached: 1.50, CostPer1MOut: 75.0, ContextWindow: 200000, - DefaultMaxTokens: 4096, + DefaultMaxTokens: 32000, SupportsAttachments: true, }, } diff --git a/internal/llm/models/bootstrap.go b/internal/llm/models/bootstrap.go new file mode 100644 index 00000000..c4eb0a88 --- /dev/null +++ b/internal/llm/models/bootstrap.go @@ -0,0 +1,18 @@ +package models + +import "maps" + +func init() { + maps.Copy(SupportedModels, AnthropicModels) + maps.Copy(SupportedModels, OpenAIModels) + maps.Copy(SupportedModels, GeminiModels) + maps.Copy(SupportedModels, GroqModels) + maps.Copy(SupportedModels, AzureModels) + maps.Copy(SupportedModels, OpenRouterModels) + maps.Copy(SupportedModels, XAIModels) + maps.Copy(SupportedModels, VertexAIGeminiModels) + maps.Copy(SupportedModels, VertexAIAnthropicModels) + maps.Copy(SupportedModels, CopilotModels) + + initLocalModels() +} diff --git a/internal/llm/models/local.go b/internal/llm/models/local.go index db0ea11c..d35043a8 100644 --- a/internal/llm/models/local.go +++ b/internal/llm/models/local.go @@ -3,6 +3,7 @@ package models import ( "cmp" "encoding/json" + "fmt" "net/http" "net/url" "os" @@ -21,7 +22,7 @@ const ( lmStudioBetaModelsPath = "api/v0/models" ) -func init() { +func initLocalModels() { if endpoint := os.Getenv("LOCAL_ENDPOINT"); endpoint != "" { localEndpoint, err := url.Parse(endpoint) if err != nil { @@ -33,7 +34,8 @@ func init() { } load := func(url *url.URL, path string) []localModel { - url.Path = path + url = url.JoinPath(path) + logging.Debug(fmt.Sprintf("Trying to load models from %s", url)) return listLocalModels(url.String()) } @@ -43,16 +45,22 @@ func init() { models = load(localEndpoint, localModelsPath) } - if len(models) == 0 { + if c := len(models); c == 0 { logging.Debug("No local models found", "endpoint", endpoint, ) return + } else { + logging.Debug(fmt.Sprintf("%d local models found", c)) } loadLocalModels(models) - viper.SetDefault("providers.local.apiKey", "dummy") + if token, ok := os.LookupEnv("LOCAL_ENDPOINT_API_KEY"); ok { + viper.SetDefault("providers.local.apiKey", token) + } else { + viper.SetDefault("providers.local.apiKey", "dummy") + } ProviderPopularity[ProviderLocal] = 0 } } @@ -75,8 +83,26 @@ type localModel struct { } func listLocalModels(modelsEndpoint string) []localModel { - res, err := http.Get(modelsEndpoint) - if err != nil { + token := os.Getenv("LOCAL_ENDPOINT_API_KEY") + var ( + res *http.Response + err error + ) + if token != "" { + req, reqErr := http.NewRequest("GET", modelsEndpoint, nil) + if reqErr != nil { + logging.Debug("Failed to create local models request", + "error", reqErr, + "endpoint", modelsEndpoint, + ) + return nil + } + req.Header.Set("Authorization", "Bearer "+token) + res, err = http.DefaultClient.Do(req) + } else { + res, err = http.Get(modelsEndpoint) + } + if err != nil || res == nil { logging.Debug("Failed to list local models", "error", err, "endpoint", modelsEndpoint, @@ -125,7 +151,8 @@ func listLocalModels(modelsEndpoint string) []localModel { func loadLocalModels(models []localModel) { for i, m := range models { - model := convertLocalModel(m) + source := tryResolveSource(m.ID) + model := convertLocalModel(m, source) SupportedModels[model.ID] = model if i == 0 || m.State == "loaded" { @@ -137,16 +164,34 @@ func loadLocalModels(models []localModel) { } } -func convertLocalModel(model localModel) Model { - return Model{ - ID: ModelID("local." + model.ID), - Name: friendlyModelName(model.ID), - Provider: ProviderLocal, - APIModel: model.ID, - ContextWindow: cmp.Or(model.LoadedContextLength, 4096), - DefaultMaxTokens: cmp.Or(model.LoadedContextLength, 4096), - CanReason: true, - SupportsAttachments: true, +func tryResolveSource(localID string) *Model { + for _, model := range SupportedModels { + if strings.Contains(localID, model.APIModel) { + return &model + } + } + return nil +} + +func convertLocalModel(model localModel, source *Model) Model { + if source != nil { + m := *source + m.ID = ModelID("local." + model.ID) + m.Name = source.Name + m.APIModel = model.ID + m.Provider = ProviderLocal + return m + } else { + return Model{ + ID: ModelID("local." + model.ID), + Name: friendlyModelName(model.ID), + Provider: ProviderLocal, + APIModel: model.ID, + ContextWindow: cmp.Or(model.LoadedContextLength, 4096), + DefaultMaxTokens: cmp.Or(model.LoadedContextLength, 4096), + CanReason: false, + SupportsAttachments: false, + } } } diff --git a/internal/llm/models/vertexai.go b/internal/llm/models/vertexai.go index d71dfc0b..0b9ee953 100644 --- a/internal/llm/models/vertexai.go +++ b/internal/llm/models/vertexai.go @@ -6,6 +6,8 @@ const ( // Models VertexAIGemini25Flash ModelID = "vertexai.gemini-2.5-flash" VertexAIGemini25 ModelID = "vertexai.gemini-2.5" + VertexAISonnet4 ModelID = "vertexai.claude-sonnet-4" + VertexAIOpus4 ModelID = "vertexai.claude-opus-4" ) var VertexAIGeminiModels = map[ModelID]Model{ @@ -36,3 +38,34 @@ var VertexAIGeminiModels = map[ModelID]Model{ SupportsAttachments: true, }, } + +var VertexAIAnthropicModels = map[ModelID]Model{ + VertexAISonnet4: { + ID: VertexAISonnet4, + Name: "VertexAI: Claude Sonnet 4", + Provider: ProviderVertexAI, + APIModel: "claude-sonnet-4", + CostPer1MIn: AnthropicModels[Claude4Sonnet].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude4Sonnet].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude4Sonnet].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude4Sonnet].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude4Sonnet].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude4Sonnet].DefaultMaxTokens, + SupportsAttachments: AnthropicModels[Claude4Sonnet].SupportsAttachments, + CanReason: AnthropicModels[Claude4Sonnet].CanReason, + }, + VertexAIOpus4: { + ID: VertexAIOpus4, + Name: "VertexAI: Claude Opus 4", + Provider: ProviderVertexAI, + APIModel: "claude-opus-4@20250514", + CostPer1MIn: AnthropicModels[Claude4Opus].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude4Opus].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude4Opus].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude4Opus].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude4Opus].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude4Opus].DefaultMaxTokens, + SupportsAttachments: AnthropicModels[Claude4Opus].SupportsAttachments, + CanReason: AnthropicModels[Claude4Opus].CanReason, + }, +} diff --git a/internal/llm/provider/anthropic.go b/internal/llm/provider/anthropic.go index 213d4b94..0f5a51c6 100644 --- a/internal/llm/provider/anthropic.go +++ b/internal/llm/provider/anthropic.go @@ -12,6 +12,7 @@ import ( "github.com/anthropics/anthropic-sdk-go" "github.com/anthropics/anthropic-sdk-go/bedrock" "github.com/anthropics/anthropic-sdk-go/option" + sdkoption "github.com/anthropics/anthropic-sdk-go/option" "github.com/opencode-ai/opencode/internal/config" "github.com/opencode-ai/opencode/internal/llm/models" toolsPkg "github.com/opencode-ai/opencode/internal/llm/tools" @@ -20,9 +21,11 @@ import ( ) type anthropicOptions struct { - useBedrock bool - disableCache bool - shouldThink func(userMessage string) bool + useBedrock bool + useVertex bool + vertexOptions vertexOptions + disableCache bool + shouldThink func(userMessage string) bool } type AnthropicOption func(*anthropicOptions) @@ -40,14 +43,43 @@ func newAnthropicClient(opts providerClientOptions) AnthropicClient { for _, o := range opts.anthropicOptions { o(&anthropicOpts) } + resolvedBaseURL := "" anthropicClientOptions := []option.RequestOption{} - if opts.apiKey != "" { - anthropicClientOptions = append(anthropicClientOptions, option.WithAPIKey(opts.apiKey)) - } if anthropicOpts.useBedrock { anthropicClientOptions = append(anthropicClientOptions, bedrock.WithLoadDefaultConfig(context.Background())) } + if anthropicOpts.useVertex { + middleware := vertexMiddleware( + anthropicOpts.vertexOptions.location, + anthropicOpts.vertexOptions.projectID, + ) + anthropicClientOptions = append( + anthropicClientOptions, + sdkoption.WithMiddleware(middleware), + ) + if opts.baseURL == "" { + resolvedBaseURL = fmt.Sprintf("https://%s-aiplatform.googleapis.com/", anthropicOpts.vertexOptions.location) + } else { + resolvedBaseURL = opts.baseURL + } + } + + if opts.headers != nil { + for k, v := range opts.headers { + anthropicClientOptions = append(anthropicClientOptions, option.WithHeader(k, v)) + } + } + if resolvedBaseURL != "" { + anthropicClientOptions = append(anthropicClientOptions, option.WithBaseURL(resolvedBaseURL)) + } else if opts.baseURL != "" { + anthropicClientOptions = append(anthropicClientOptions, option.WithBaseURL(opts.baseURL)) + if opts.apiKey != "" { + anthropicClientOptions = append(anthropicClientOptions, option.WithAuthToken(opts.apiKey)) + } + } else if opts.apiKey != "" { + anthropicClientOptions = append(anthropicClientOptions, option.WithAPIKey(opts.apiKey)) + } client := anthropic.NewClient(anthropicClientOptions...) return &anthropicClient{ @@ -261,7 +293,6 @@ func (a *anthropicClient) stream(ctx context.Context, messages []message.Message } else { logging.Debug("Prepared messages", "messages", string(jsonData)) } - } attempts := 0 eventChan := make(chan ProviderEvent) @@ -451,6 +482,9 @@ func (a *anthropicClient) usage(msg anthropic.Message) TokenUsage { func WithAnthropicBedrock(useBedrock bool) AnthropicOption { return func(options *anthropicOptions) { + if useBedrock { + options.useVertex = false + } options.useBedrock = useBedrock } } @@ -470,3 +504,11 @@ func WithAnthropicShouldThinkFn(fn func(string) bool) AnthropicOption { options.shouldThink = fn } } + +func WithVertexAI(projectID, localtion string) AnthropicOption { + return func(options *anthropicOptions) { + options.useVertex = true + options.useBedrock = false + options.vertexOptions = vertexOptions{projectID: projectID, location: localtion} + } +} diff --git a/internal/llm/provider/gemini.go b/internal/llm/provider/gemini.go index ebc36119..fa7cfd27 100644 --- a/internal/llm/provider/gemini.go +++ b/internal/llm/provider/gemini.go @@ -9,6 +9,7 @@ import ( "strings" "time" + "cloud.google.com/go/auth" "github.com/google/uuid" "github.com/opencode-ai/opencode/internal/config" "github.com/opencode-ai/opencode/internal/llm/tools" @@ -31,6 +32,14 @@ type geminiClient struct { type GeminiClient ProviderClient +type tokenProvider struct { + value string +} + +func (p *tokenProvider) Token(context.Context) (*auth.Token, error) { + return &auth.Token{Value: p.value}, nil +} + func newGeminiClient(opts providerClientOptions) GeminiClient { geminiOpts := geminiOptions{} for _, o := range opts.geminiOptions { @@ -183,6 +192,11 @@ func (g *geminiClient) send(ctx context.Context, messages []message.Message, too Parts: []*genai.Part{{Text: g.providerOptions.systemMessage}}, }, } + if len(g.providerOptions.headers) != 0 { + config.HTTPOptions = &genai.HTTPOptions{ + Headers: *g.providerOptions.asHeader(), + } + } if len(tools) > 0 { config.Tools = g.convertTools(tools) } @@ -281,6 +295,9 @@ func (g *geminiClient) stream(ctx context.Context, messages []message.Message, t go func() { defer close(eventChan) + defer logging.RecoverPanic("gemini-client", func() { + logging.ErrorPersist("gemini client has failed") + }) for { attempts++ diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 8a561c77..6310ac7d 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -19,10 +19,8 @@ import ( ) type openaiOptions struct { - baseURL string disableCache bool reasoningEffort string - extraHeaders map[string]string } type OpenAIOption func(*openaiOptions) @@ -47,12 +45,11 @@ func newOpenAIClient(opts providerClientOptions) OpenAIClient { if opts.apiKey != "" { openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey)) } - if openaiOpts.baseURL != "" { - openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(openaiOpts.baseURL)) + if opts.baseURL != "" { + openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(opts.baseURL)) } - - if openaiOpts.extraHeaders != nil { - for key, value := range openaiOpts.extraHeaders { + if opts.headers != nil { + for key, value := range opts.headers { openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value)) } } @@ -393,18 +390,6 @@ func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage { } } -func WithOpenAIBaseURL(baseURL string) OpenAIOption { - return func(options *openaiOptions) { - options.baseURL = baseURL - } -} - -func WithOpenAIExtraHeaders(headers map[string]string) OpenAIOption { - return func(options *openaiOptions) { - options.extraHeaders = headers - } -} - func WithOpenAIDisableCache() OpenAIOption { return func(options *openaiOptions) { options.disableCache = true diff --git a/internal/llm/provider/provider.go b/internal/llm/provider/provider.go index d5be0ba0..521d4529 100644 --- a/internal/llm/provider/provider.go +++ b/internal/llm/provider/provider.go @@ -3,6 +3,7 @@ package provider import ( "context" "fmt" + "net/http" "os" "github.com/opencode-ai/opencode/internal/llm/models" @@ -63,6 +64,8 @@ type providerClientOptions struct { model models.Model maxTokens int64 systemMessage string + baseURL string + headers map[string]string anthropicOptions []AnthropicOption openaiOptions []OpenAIOption @@ -71,6 +74,17 @@ type providerClientOptions struct { copilotOptions []CopilotOption } +func (opts *providerClientOptions) asHeader() *http.Header { + header := http.Header{} + if opts.headers == nil { + return &header + } + for k, v := range opts.headers { + header.Add(k, v) + } + return &header +} + type ProviderClientOption func(*providerClientOptions) type ProviderClient interface { @@ -115,9 +129,9 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption client: newBedrockClient(clientOptions), }, nil case models.ProviderGROQ: - clientOptions.openaiOptions = append(clientOptions.openaiOptions, - WithOpenAIBaseURL("https://api.groq.com/openai/v1"), - ) + if clientOptions.baseURL == "" { + clientOptions.baseURL = "https://api.groq.com/openai/v1" + } return &baseProvider[OpenAIClient]{ options: clientOptions, client: newOpenAIClient(clientOptions), @@ -133,29 +147,27 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption client: newVertexAIClient(clientOptions), }, nil case models.ProviderOpenRouter: - clientOptions.openaiOptions = append(clientOptions.openaiOptions, - WithOpenAIBaseURL("https://openrouter.ai/api/v1"), - WithOpenAIExtraHeaders(map[string]string{ - "HTTP-Referer": "opencode.ai", - "X-Title": "OpenCode", - }), - ) + if clientOptions.baseURL == "" { + clientOptions.baseURL = "https://openrouter.ai/api/v1" + } + clientOptions.headers["HTTP-Referer"] = "opencode.ai" + clientOptions.headers["X-Title"] = "OpenCode" return &baseProvider[OpenAIClient]{ options: clientOptions, client: newOpenAIClient(clientOptions), }, nil case models.ProviderXAI: - clientOptions.openaiOptions = append(clientOptions.openaiOptions, - WithOpenAIBaseURL("https://api.x.ai/v1"), - ) + if clientOptions.baseURL == "" { + clientOptions.baseURL = "https://api.x.ai/v1" + } return &baseProvider[OpenAIClient]{ options: clientOptions, client: newOpenAIClient(clientOptions), }, nil case models.ProviderLocal: - clientOptions.openaiOptions = append(clientOptions.openaiOptions, - WithOpenAIBaseURL(os.Getenv("LOCAL_ENDPOINT")), - ) + if clientOptions.baseURL == "" { + clientOptions.baseURL = os.Getenv("LOCAL_ENDPOINT") + } return &baseProvider[OpenAIClient]{ options: clientOptions, client: newOpenAIClient(clientOptions), @@ -192,6 +204,18 @@ func (p *baseProvider[C]) StreamResponse(ctx context.Context, messages []message return p.client.stream(ctx, messages, tools) } +func WithBaseURL(baseURL string) ProviderClientOption { + return func(options *providerClientOptions) { + options.baseURL = baseURL + } +} + +func WithHeaders(headers map[string]string) ProviderClientOption { + return func(options *providerClientOptions) { + options.headers = headers + } +} + func WithAPIKey(apiKey string) ProviderClientOption { return func(options *providerClientOptions) { options.apiKey = apiKey diff --git a/internal/llm/provider/vertexai.go b/internal/llm/provider/vertexai.go index 2a13a957..c1d9f981 100644 --- a/internal/llm/provider/vertexai.go +++ b/internal/llm/provider/vertexai.go @@ -1,34 +1,137 @@ package provider import ( + "bytes" "context" + "fmt" + "io" + "net/http" "os" + "strings" + "cloud.google.com/go/auth" + sdkoption "github.com/anthropics/anthropic-sdk-go/option" + "github.com/anthropics/anthropic-sdk-go/vertex" + "github.com/opencode-ai/opencode/internal/llm/models" "github.com/opencode-ai/opencode/internal/logging" + + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" "google.golang.org/genai" ) type VertexAIClient ProviderClient +type vertexOptions struct { + projectID string + location string +} + func newVertexAIClient(opts providerClientOptions) VertexAIClient { + for k := range models.VertexAIAnthropicModels { + if k == opts.model.ID { + logging.Info("Using Anthropic client with VertexAI provider", "model", k) + opts.anthropicOptions = []AnthropicOption{ + WithVertexAI(os.Getenv("VERTEXAI_PROJECT"), os.Getenv("VERTEXAI_LOCATION")), + } + return newAnthropicClient(opts) + } + } + geminiOpts := geminiOptions{} for _, o := range opts.geminiOptions { o(&geminiOpts) } - - client, err := genai.NewClient(context.Background(), &genai.ClientConfig{ + genaiConfig := &genai.ClientConfig{ Project: os.Getenv("VERTEXAI_PROJECT"), Location: os.Getenv("VERTEXAI_LOCATION"), Backend: genai.BackendVertexAI, - }) + } + + if opts.baseURL != "" { + if opts.headers != nil { + header := opts.asHeader() + genaiConfig.HTTPOptions = genai.HTTPOptions{ + BaseURL: opts.baseURL, + Headers: *header, + } + } else { + genaiConfig.HTTPOptions = genai.HTTPOptions{ + BaseURL: opts.baseURL, + } + } + } + + if opts.apiKey != "" { + genaiConfig.Credentials = &auth.Credentials{ + TokenProvider: &tokenProvider{value: opts.apiKey}, + } + } + + client, err := genai.NewClient(context.Background(), genaiConfig) if err != nil { logging.Error("Failed to create VertexAI client", "error", err) return nil } + logging.Info("Using Gemini client with VertexAI provider", "model", opts.model.ID) return &geminiClient{ providerOptions: opts, options: geminiOpts, client: client, } } + +// NOTE: copied from (here)[github.com/anthropics/anthropic-sdk-go/vertex] to make LiteLLM passthrough work +func vertexMiddleware(region, projectID string) sdkoption.Middleware { + return func(r *http.Request, next sdkoption.MiddlewareNext) (*http.Response, error) { + if r.Body != nil { + body, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + r.Body.Close() + + if !gjson.GetBytes(body, "anthropic_version").Exists() { + body, _ = sjson.SetBytes(body, "anthropic_version", vertex.DefaultVersion) + } + if strings.HasSuffix(r.URL.Path, "/v1/messages") && r.Method == http.MethodPost { + logging.Debug("vertext_ai message path", "path", r.URL.Path) + if projectID == "" { + return nil, fmt.Errorf("no projectId was given and it could not be resolved from credentials") + } + + model := gjson.GetBytes(body, "model").String() + stream := gjson.GetBytes(body, "stream").Bool() + + body, _ = sjson.DeleteBytes(body, "model") + + specifier := "rawPredict" + if stream { + specifier = "streamRawPredict" + } + newPath := fmt.Sprintf("/v1/projects/%s/locations/%s/publishers/anthropic/models/%s:%s", projectID, region, model, specifier) + r.URL.Path = strings.ReplaceAll(r.URL.Path, "/v1/messages", newPath) + } + + if strings.HasSuffix(r.URL.Path, "/v1/messages/count_tokensg") && r.Method == http.MethodPost { + if projectID == "" { + return nil, fmt.Errorf("no projectId was given and it could not be resolved from credentials") + } + + newPath := fmt.Sprintf("/v1/projects/%s/locations/%s/publishers/anthropic/models/count-tokens:rawPredict", projectID, region) + r.URL.Path = strings.ReplaceAll(r.URL.Path, "/v1/messages/count_tokensg", newPath) + } + + reader := bytes.NewReader(body) + r.Body = io.NopCloser(reader) + r.GetBody = func() (io.ReadCloser, error) { + _, err := reader.Seek(0, 0) + return io.NopCloser(reader), err + } + r.ContentLength = int64(len(body)) + } + + return next(r) + } +} diff --git a/internal/tui/tui.go b/internal/tui/tui.go index 1c9c2f03..4380af8e 100644 --- a/internal/tui/tui.go +++ b/internal/tui/tui.go @@ -33,6 +33,7 @@ type keyMap struct { Filepicker key.Binding Models key.Binding SwitchTheme key.Binding + PruneSession key.Binding } type startCompactSessionMsg struct{} @@ -78,6 +79,11 @@ var keys = keyMap{ key.WithKeys("ctrl+t"), key.WithHelp("ctrl+t", "switch theme"), ), + + PruneSession: key.NewBinding( + key.WithKeys("ctrl+p"), + key.WithHelp("ctrl+p", "delete session"), + ), } var helpEsc = key.NewBinding( @@ -117,6 +123,9 @@ type appModel struct { showSessionDialog bool sessionDialog dialog.SessionDialog + showDeleteSessionDialog bool + deleteSessionDialog dialog.SessionDialog + showCommandDialog bool commandDialog dialog.CommandDialog commands []dialog.Command @@ -163,6 +172,8 @@ func (a appModel) Init() tea.Cmd { cmds = append(cmds, cmd) cmd = a.themeDialog.Init() cmds = append(cmds, cmd) + cmd = a.deleteSessionDialog.Init() + cmds = append(cmds, cmd) // Check if we should show the init dialog cmds = append(cmds, func() tea.Msg { @@ -296,6 +307,10 @@ func (a appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return a, nil case dialog.CloseSessionDialogMsg: + if a.showDeleteSessionDialog { + a.showDeleteSessionDialog = false + return a, nil + } a.showSessionDialog = false return a, nil @@ -400,6 +415,18 @@ func (a appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { a.selectedSession = msg.Payload } case dialog.SessionSelectedMsg: + // if we're in "delete" mode, delete instead of switch + if a.showDeleteSessionDialog { + a.showDeleteSessionDialog = false + return a, func() tea.Msg { + ctx := context.Background() + if err := a.app.Sessions.Delete(ctx, msg.Session.ID); err != nil { + return util.InfoMsg{Type: util.InfoTypeError, Msg: "Delete failed: " + err.Error()} + } + return util.InfoMsg{Type: util.InfoTypeInfo, Msg: "Session deleted"} + } + } + // otherwise fall through to normal "switch session" a.showSessionDialog = false if a.currentPage == page.ChatPage { return a, util.CmdHandler(chat.SessionSelectedMsg(msg.Session)) @@ -518,6 +545,20 @@ func (a appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return a, a.themeDialog.Init() } return a, nil + case key.Matches(msg, keys.PruneSession): + if a.currentPage == page.ChatPage && !a.showQuit && !a.showPermissions && + !a.showSessionDialog && !a.showCommandDialog { + sessions, err := a.app.Sessions.List(context.Background()) + if err != nil { + return a, util.ReportError(err) + } + if len(sessions) == 0 { + return a, util.ReportWarn("No sessions available") + } + a.deleteSessionDialog.SetSessions(sessions) + a.showDeleteSessionDialog = true + } + return a, nil case key.Matches(msg, returnKey) || key.Matches(msg): if msg.String() == quitKey { if a.currentPage == page.LogsPage { @@ -616,6 +657,16 @@ func (a appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } } + if a.showDeleteSessionDialog { + d, cmd := a.deleteSessionDialog.Update(msg) + a.deleteSessionDialog = d.(dialog.SessionDialog) + cmds = append(cmds, cmd) + // block other tea.KeyMsgs + if _, ok := msg.(tea.KeyMsg); ok { + return a, tea.Batch(cmds...) + } + } + if a.showCommandDialog { d, commandCmd := a.commandDialog.Update(msg) a.commandDialog = d.(dialog.CommandDialog) @@ -824,6 +875,21 @@ func (a appModel) View() string { ) } + if a.showDeleteSessionDialog { + overlay := a.deleteSessionDialog.View() + row := lipgloss.Height(appView) / 2 + row -= lipgloss.Height(overlay) / 2 + col := lipgloss.Width(appView) / 2 + col -= lipgloss.Width(overlay) / 2 + appView = layout.PlaceOverlay( + col, + row, + overlay, + appView, + true, + ) + } + if a.showModelDialog { overlay := a.modelDialog.View() row := lipgloss.Height(appView) / 2 @@ -906,8 +972,9 @@ func New(app *app.App) tea.Model { status: core.NewStatusCmp(app.LSPClients), help: dialog.NewHelpCmp(), quit: dialog.NewQuitCmp(), - sessionDialog: dialog.NewSessionDialogCmp(), - commandDialog: dialog.NewCommandDialogCmp(), + sessionDialog: dialog.NewSessionDialogCmp(), + deleteSessionDialog: dialog.NewSessionDialogCmp(), + commandDialog: dialog.NewCommandDialogCmp(), modelDialog: dialog.NewModelDialogCmp(), permissions: dialog.NewPermissionDialogCmp(), initDialog: dialog.NewInitDialogCmp(), diff --git a/opencode-schema.json b/opencode-schema.json index 406c75f8..757548bb 100644 --- a/opencode-schema.json +++ b/opencode-schema.json @@ -12,71 +12,73 @@ "model": { "description": "Model ID for the agent", "enum": [ - "gpt-4.1", - "llama-3.3-70b-versatile", - "azure.gpt-4.1", + "meta-llama/llama-4-scout-17b-16e-instruct", + "azure.o1-mini", + "azure.gpt-4.1-nano", + "openrouter.o3-mini", + "azure.gpt-4o-mini", "openrouter.gpt-4o", - "openrouter.o1-mini", "openrouter.claude-3-haiku", + "openrouter.gpt-4.1-nano", + "grok-3-mini-beta", "claude-3-opus", - "gpt-4o", + "o4-mini", "gpt-4o-mini", "o1", - "meta-llama/llama-4-maverick-17b-128e-instruct", - "azure.o3-mini", + "gpt-4.5-preview", + "o3-mini", + "gemini-2.5", "openrouter.gpt-4o-mini", - "openrouter.o1", + "grok-3-mini-fast-beta", "claude-3.5-haiku", - "o4-mini", - "azure.gpt-4.1-mini", - "openrouter.o3", - "grok-3-beta", - "o3-mini", - "qwen-qwq", - "azure.o1", - "openrouter.gemini-2.5-flash", - "openrouter.gemini-2.5", "o1-mini", - "azure.gpt-4o", - "openrouter.gpt-4.1-mini", - "openrouter.claude-3.5-sonnet", - "openrouter.o3-mini", - "gpt-4.1-mini", - "gpt-4.5-preview", - "gpt-4.1-nano", - "deepseek-r1-distill-llama-70b", - "azure.gpt-4o-mini", - "openrouter.gpt-4.1", - "bedrock.claude-3.7-sonnet", + "gpt-4.1", + "openrouter.claude-3.5-haiku", "claude-3-haiku", - "o3", - "gemini-2.0-flash-lite", + "gemini-2.0-flash", "azure.o3", "azure.gpt-4.5-preview", + "openrouter.o4-mini", + "vertexai.claude-sonnet-4", + "bedrock.claude-3.7-sonnet", + "claude-4-opus", + "azure.gpt-4o", + "azure.gpt-4.1", "openrouter.claude-3-opus", - "grok-3-mini-fast-beta", - "claude-4-sonnet", + "openrouter.gemini-2.5", + "vertexai.gemini-2.5", + "deepseek-r1-distill-llama-70b", + "openrouter.gpt-4.1", + "openrouter.gemini-2.5-flash", + "azure.o1", + "openrouter.gpt-4.5-preview", + "vertexai.gemini-2.5-flash", + "vertexai.claude-opus-4", "azure.o4-mini", + "openrouter.o3", + "grok-3-beta", "grok-3-fast-beta", - "claude-3.5-sonnet", - "azure.o1-mini", + "o3", + "gpt-4.1-mini", + "openrouter.claude-3.5-sonnet", + "openrouter.o1-mini", + "o1-pro", + "gpt-4.1-nano", + "llama-3.3-70b-versatile", + "azure.o3-mini", + "azure.gpt-4.1-mini", + "openrouter.gpt-4.1-mini", "openrouter.claude-3.7-sonnet", - "openrouter.gpt-4.5-preview", - "grok-3-mini-beta", + "claude-3.5-sonnet", + "gemini-2.5-flash", + "meta-llama/llama-4-maverick-17b-128e-instruct", + "openrouter.o1", + "gpt-4o", + "claude-4-sonnet", "claude-3.7-sonnet", - "gemini-2.0-flash", + "qwen-qwq", "openrouter.deepseek-r1-free", - "vertexai.gemini-2.5-flash", - "vertexai.gemini-2.5", - "o1-pro", - "gemini-2.5", - "meta-llama/llama-4-scout-17b-16e-instruct", - "azure.gpt-4.1-nano", - "openrouter.gpt-4.1-nano", - "gemini-2.5-flash", - "openrouter.o4-mini", - "openrouter.claude-3.5-haiku", - "claude-4-opus", + "gemini-2.0-flash-lite", "openrouter.o1-pro", "copilot.gpt-4o", "copilot.gpt-4o-mini", @@ -122,71 +124,73 @@ "model": { "description": "Model ID for the agent", "enum": [ - "gpt-4.1", - "llama-3.3-70b-versatile", - "azure.gpt-4.1", + "meta-llama/llama-4-scout-17b-16e-instruct", + "azure.o1-mini", + "azure.gpt-4.1-nano", + "openrouter.o3-mini", + "azure.gpt-4o-mini", "openrouter.gpt-4o", - "openrouter.o1-mini", "openrouter.claude-3-haiku", + "openrouter.gpt-4.1-nano", + "grok-3-mini-beta", "claude-3-opus", - "gpt-4o", + "o4-mini", "gpt-4o-mini", "o1", - "meta-llama/llama-4-maverick-17b-128e-instruct", - "azure.o3-mini", + "gpt-4.5-preview", + "o3-mini", + "gemini-2.5", "openrouter.gpt-4o-mini", - "openrouter.o1", + "grok-3-mini-fast-beta", "claude-3.5-haiku", - "o4-mini", - "azure.gpt-4.1-mini", - "openrouter.o3", - "grok-3-beta", - "o3-mini", - "qwen-qwq", - "azure.o1", - "openrouter.gemini-2.5-flash", - "openrouter.gemini-2.5", "o1-mini", - "azure.gpt-4o", - "openrouter.gpt-4.1-mini", - "openrouter.claude-3.5-sonnet", - "openrouter.o3-mini", - "gpt-4.1-mini", - "gpt-4.5-preview", - "gpt-4.1-nano", - "deepseek-r1-distill-llama-70b", - "azure.gpt-4o-mini", - "openrouter.gpt-4.1", - "bedrock.claude-3.7-sonnet", + "gpt-4.1", + "openrouter.claude-3.5-haiku", "claude-3-haiku", - "o3", - "gemini-2.0-flash-lite", + "gemini-2.0-flash", "azure.o3", "azure.gpt-4.5-preview", + "openrouter.o4-mini", + "vertexai.claude-sonnet-4", + "bedrock.claude-3.7-sonnet", + "claude-4-opus", + "azure.gpt-4o", + "azure.gpt-4.1", "openrouter.claude-3-opus", - "grok-3-mini-fast-beta", - "claude-4-sonnet", + "openrouter.gemini-2.5", + "vertexai.gemini-2.5", + "deepseek-r1-distill-llama-70b", + "openrouter.gpt-4.1", + "openrouter.gemini-2.5-flash", + "azure.o1", + "openrouter.gpt-4.5-preview", + "vertexai.gemini-2.5-flash", + "vertexai.claude-opus-4", "azure.o4-mini", + "openrouter.o3", + "grok-3-beta", "grok-3-fast-beta", - "claude-3.5-sonnet", - "azure.o1-mini", + "o3", + "gpt-4.1-mini", + "openrouter.claude-3.5-sonnet", + "openrouter.o1-mini", + "o1-pro", + "gpt-4.1-nano", + "llama-3.3-70b-versatile", + "azure.o3-mini", + "azure.gpt-4.1-mini", + "openrouter.gpt-4.1-mini", "openrouter.claude-3.7-sonnet", - "openrouter.gpt-4.5-preview", - "grok-3-mini-beta", + "claude-3.5-sonnet", + "gemini-2.5-flash", + "meta-llama/llama-4-maverick-17b-128e-instruct", + "openrouter.o1", + "gpt-4o", + "claude-4-sonnet", "claude-3.7-sonnet", - "gemini-2.0-flash", + "qwen-qwq", "openrouter.deepseek-r1-free", - "vertexai.gemini-2.5-flash", - "vertexai.gemini-2.5", - "o1-pro", - "gemini-2.5", - "meta-llama/llama-4-scout-17b-16e-instruct", - "azure.gpt-4.1-nano", - "openrouter.gpt-4.1-nano", - "gemini-2.5-flash", - "openrouter.o4-mini", - "openrouter.claude-3.5-haiku", - "claude-4-opus", + "gemini-2.0-flash-lite", "openrouter.o1-pro", "copilot.gpt-4o", "copilot.gpt-4o-mini", @@ -367,11 +371,22 @@ "description": "API key for the provider", "type": "string" }, + "baseURL": { + "description": "Base URL for the provider instead of default one", + "type": "string" + }, "disabled": { "default": false, "description": "Whether the provider is disabled", "type": "boolean" }, + "headers": { + "additionalProperties": { + "type": "string" + }, + "description": "Extra headers to attach to request", + "type": "object" + }, "provider": { "description": "Provider type", "enum": [