diff --git a/README.md b/README.md index 1e5433c2e..e94c6cb3a 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ OpenCode is a Go-based CLI application that brings AI assistance to your termina ## Features - **Interactive TUI**: Built with [Bubble Tea](https://github.com/charmbracelet/bubbletea) for a smooth terminal experience -- **Multiple AI Providers**: Support for OpenAI, Anthropic Claude, Google Gemini, AWS Bedrock, Groq, and Azure OpenAI +- **Multiple AI Providers**: Support for OpenAI, Anthropic Claude, Google Gemini, AWS Bedrock, Groq, Azure OpenAI, and OpenRouter - **Session Management**: Save and manage multiple conversation sessions - **Tool Integration**: AI can execute commands, search files, and modify code - **Vim-like Editor**: Integrated editor with text input capabilities @@ -97,8 +97,12 @@ You can configure OpenCode using environment variables: "disabled": false }, "groq": { - "apiKey": "your-api-key", - "disabled": false + "apiKey": "your-api-key", + "disabled": false + }, + "openrouter": { + "apiKey": "your-api-key", + "disabled": false } }, "agents": { diff --git a/cmd/schema/main.go b/cmd/schema/main.go index af9533cf3..cd550d3fe 100644 --- a/cmd/schema/main.go +++ b/cmd/schema/main.go @@ -173,6 +173,7 @@ func generateSchema() map[string]any { string(models.ProviderOpenAI), string(models.ProviderGemini), string(models.ProviderGROQ), + string(models.ProviderOpenRouter), string(models.ProviderBedrock), string(models.ProviderAzure), } diff --git a/internal/config/config.go b/internal/config/config.go index 9aa22bd4e..22781e189 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -267,6 +267,15 @@ func setProviderDefaults() { return } + // OpenRouter configuration + if apiKey := os.Getenv("OPENROUTER_API_KEY"); apiKey != "" { + viper.SetDefault("providers.openrouter.apiKey", apiKey) + viper.SetDefault("agents.coder.model", models.OpenRouterClaude37Sonnet) + viper.SetDefault("agents.task.model", models.OpenRouterClaude37Sonnet) + viper.SetDefault("agents.title.model", models.OpenRouterClaude35Haiku) + return + } + // AWS Bedrock configuration if hasAWSCredentials() { viper.SetDefault("agents.coder.model", models.BedrockClaude37Sonnet) @@ -527,6 +536,8 @@ func getProviderAPIKey(provider models.ModelProvider) string { return os.Getenv("GROQ_API_KEY") case models.ProviderAzure: return os.Getenv("AZURE_OPENAI_API_KEY") + case models.ProviderOpenRouter: + return os.Getenv("OPENROUTER_API_KEY") case models.ProviderBedrock: if hasAWSCredentials() { return "aws-credentials-available" @@ -578,6 +589,34 @@ func setDefaultModelForAgent(agent AgentName) bool { return true } + if apiKey := os.Getenv("OPENROUTER_API_KEY"); apiKey != "" { + var model models.ModelID + maxTokens := int64(5000) + reasoningEffort := "" + + switch agent { + case AgentTitle: + model = models.OpenRouterClaude35Haiku + maxTokens = 80 + case AgentTask: + model = models.OpenRouterClaude37Sonnet + default: + model = models.OpenRouterClaude37Sonnet + } + + // Check if model supports reasoning + if modelInfo, ok := models.SupportedModels[model]; ok && modelInfo.CanReason { + reasoningEffort = "medium" + } + + cfg.Agents[agent] = Agent{ + Model: model, + MaxTokens: maxTokens, + ReasoningEffort: reasoningEffort, + } + return true + } + if apiKey := os.Getenv("GEMINI_API_KEY"); apiKey != "" { var model models.ModelID maxTokens := int64(5000) diff --git a/internal/llm/models/models.go b/internal/llm/models/models.go index 5dd28359f..2c5e61d7e 100644 --- a/internal/llm/models/models.go +++ b/internal/llm/models/models.go @@ -86,4 +86,5 @@ func init() { maps.Copy(SupportedModels, GeminiModels) maps.Copy(SupportedModels, GroqModels) maps.Copy(SupportedModels, AzureModels) + maps.Copy(SupportedModels, OpenRouterModels) } diff --git a/internal/llm/models/openrouter.go b/internal/llm/models/openrouter.go new file mode 100644 index 000000000..6fc2a2119 --- /dev/null +++ b/internal/llm/models/openrouter.go @@ -0,0 +1,262 @@ +package models + +const ( + ProviderOpenRouter ModelProvider = "openrouter" + + OpenRouterGPT41 ModelID = "openrouter.gpt-4.1" + OpenRouterGPT41Mini ModelID = "openrouter.gpt-4.1-mini" + OpenRouterGPT41Nano ModelID = "openrouter.gpt-4.1-nano" + OpenRouterGPT45Preview ModelID = "openrouter.gpt-4.5-preview" + OpenRouterGPT4o ModelID = "openrouter.gpt-4o" + OpenRouterGPT4oMini ModelID = "openrouter.gpt-4o-mini" + OpenRouterO1 ModelID = "openrouter.o1" + OpenRouterO1Pro ModelID = "openrouter.o1-pro" + OpenRouterO1Mini ModelID = "openrouter.o1-mini" + OpenRouterO3 ModelID = "openrouter.o3" + OpenRouterO3Mini ModelID = "openrouter.o3-mini" + OpenRouterO4Mini ModelID = "openrouter.o4-mini" + OpenRouterGemini25Flash ModelID = "openrouter.gemini-2.5-flash" + OpenRouterGemini25 ModelID = "openrouter.gemini-2.5" + OpenRouterClaude35Sonnet ModelID = "openrouter.claude-3.5-sonnet" + OpenRouterClaude3Haiku ModelID = "openrouter.claude-3-haiku" + OpenRouterClaude37Sonnet ModelID = "openrouter.claude-3.7-sonnet" + OpenRouterClaude35Haiku ModelID = "openrouter.claude-3.5-haiku" + OpenRouterClaude3Opus ModelID = "openrouter.claude-3-opus" +) + +var OpenRouterModels = map[ModelID]Model{ + OpenRouterGPT41: { + ID: OpenRouterGPT41, + Name: "OpenRouter – GPT 4.1", + Provider: ProviderOpenRouter, + APIModel: "openai/gpt-4.1", + CostPer1MIn: OpenAIModels[GPT41].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT41].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT41].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT41].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT41].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT41].DefaultMaxTokens, + }, + OpenRouterGPT41Mini: { + ID: OpenRouterGPT41Mini, + Name: "OpenRouter – GPT 4.1 mini", + Provider: ProviderOpenRouter, + APIModel: "openai/gpt-4.1-mini", + CostPer1MIn: OpenAIModels[GPT41Mini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT41Mini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT41Mini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT41Mini].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT41Mini].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT41Mini].DefaultMaxTokens, + }, + OpenRouterGPT41Nano: { + ID: OpenRouterGPT41Nano, + Name: "OpenRouter – GPT 4.1 nano", + Provider: ProviderOpenRouter, + APIModel: "openai/gpt-4.1-nano", + CostPer1MIn: OpenAIModels[GPT41Nano].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT41Nano].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT41Nano].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT41Nano].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT41Nano].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT41Nano].DefaultMaxTokens, + }, + OpenRouterGPT45Preview: { + ID: OpenRouterGPT45Preview, + Name: "OpenRouter – GPT 4.5 preview", + Provider: ProviderOpenRouter, + APIModel: "openai/gpt-4.5-preview", + CostPer1MIn: OpenAIModels[GPT45Preview].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT45Preview].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT45Preview].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT45Preview].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT45Preview].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT45Preview].DefaultMaxTokens, + }, + OpenRouterGPT4o: { + ID: OpenRouterGPT4o, + Name: "OpenRouter – GPT 4o", + Provider: ProviderOpenRouter, + APIModel: "openai/gpt-4o", + CostPer1MIn: OpenAIModels[GPT4o].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT4o].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT4o].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT4o].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT4o].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT4o].DefaultMaxTokens, + }, + OpenRouterGPT4oMini: { + ID: OpenRouterGPT4oMini, + Name: "OpenRouter – GPT 4o mini", + Provider: ProviderOpenRouter, + APIModel: "openai/gpt-4o-mini", + CostPer1MIn: OpenAIModels[GPT4oMini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT4oMini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT4oMini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT4oMini].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT4oMini].ContextWindow, + }, + OpenRouterO1: { + ID: OpenRouterO1, + Name: "OpenRouter – O1", + Provider: ProviderOpenRouter, + APIModel: "openai/o1", + CostPer1MIn: OpenAIModels[O1].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O1].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O1].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O1].CostPer1MOutCached, + ContextWindow: OpenAIModels[O1].ContextWindow, + DefaultMaxTokens: OpenAIModels[O1].DefaultMaxTokens, + CanReason: OpenAIModels[O1].CanReason, + }, + OpenRouterO1Pro: { + ID: OpenRouterO1Pro, + Name: "OpenRouter – o1 pro", + Provider: ProviderOpenRouter, + APIModel: "openai/o1-pro", + CostPer1MIn: OpenAIModels[O1Pro].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O1Pro].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O1Pro].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O1Pro].CostPer1MOutCached, + ContextWindow: OpenAIModels[O1Pro].ContextWindow, + DefaultMaxTokens: OpenAIModels[O1Pro].DefaultMaxTokens, + CanReason: OpenAIModels[O1Pro].CanReason, + }, + OpenRouterO1Mini: { + ID: OpenRouterO1Mini, + Name: "OpenRouter – o1 mini", + Provider: ProviderOpenRouter, + APIModel: "openai/o1-mini", + CostPer1MIn: OpenAIModels[O1Mini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O1Mini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O1Mini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O1Mini].CostPer1MOutCached, + ContextWindow: OpenAIModels[O1Mini].ContextWindow, + DefaultMaxTokens: OpenAIModels[O1Mini].DefaultMaxTokens, + CanReason: OpenAIModels[O1Mini].CanReason, + }, + OpenRouterO3: { + ID: OpenRouterO3, + Name: "OpenRouter – o3", + Provider: ProviderOpenRouter, + APIModel: "openai/o3", + CostPer1MIn: OpenAIModels[O3].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O3].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O3].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O3].CostPer1MOutCached, + ContextWindow: OpenAIModels[O3].ContextWindow, + DefaultMaxTokens: OpenAIModels[O3].DefaultMaxTokens, + CanReason: OpenAIModels[O3].CanReason, + }, + OpenRouterO3Mini: { + ID: OpenRouterO3Mini, + Name: "OpenRouter – o3 mini", + Provider: ProviderOpenRouter, + APIModel: "openai/o3-mini-high", + CostPer1MIn: OpenAIModels[O3Mini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O3Mini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O3Mini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O3Mini].CostPer1MOutCached, + ContextWindow: OpenAIModels[O3Mini].ContextWindow, + DefaultMaxTokens: OpenAIModels[O3Mini].DefaultMaxTokens, + CanReason: OpenAIModels[O3Mini].CanReason, + }, + OpenRouterO4Mini: { + ID: OpenRouterO4Mini, + Name: "OpenRouter – o4 mini", + Provider: ProviderOpenRouter, + APIModel: "openai/o4-mini-high", + CostPer1MIn: OpenAIModels[O4Mini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O4Mini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O4Mini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O4Mini].CostPer1MOutCached, + ContextWindow: OpenAIModels[O4Mini].ContextWindow, + DefaultMaxTokens: OpenAIModels[O4Mini].DefaultMaxTokens, + CanReason: OpenAIModels[O4Mini].CanReason, + }, + OpenRouterGemini25Flash: { + ID: OpenRouterGemini25Flash, + Name: "OpenRouter – Gemini 2.5 Flash", + Provider: ProviderOpenRouter, + APIModel: "google/gemini-2.5-flash-preview:thinking", + CostPer1MIn: GeminiModels[Gemini25Flash].CostPer1MIn, + CostPer1MInCached: GeminiModels[Gemini25Flash].CostPer1MInCached, + CostPer1MOut: GeminiModels[Gemini25Flash].CostPer1MOut, + CostPer1MOutCached: GeminiModels[Gemini25Flash].CostPer1MOutCached, + ContextWindow: GeminiModels[Gemini25Flash].ContextWindow, + DefaultMaxTokens: GeminiModels[Gemini25Flash].DefaultMaxTokens, + }, + OpenRouterGemini25: { + ID: OpenRouterGemini25, + Name: "OpenRouter – Gemini 2.5 Pro", + Provider: ProviderOpenRouter, + APIModel: "google/gemini-2.5-pro-preview-03-25", + CostPer1MIn: GeminiModels[Gemini25].CostPer1MIn, + CostPer1MInCached: GeminiModels[Gemini25].CostPer1MInCached, + CostPer1MOut: GeminiModels[Gemini25].CostPer1MOut, + CostPer1MOutCached: GeminiModels[Gemini25].CostPer1MOutCached, + ContextWindow: GeminiModels[Gemini25].ContextWindow, + DefaultMaxTokens: GeminiModels[Gemini25].DefaultMaxTokens, + }, + OpenRouterClaude35Sonnet: { + ID: OpenRouterClaude35Sonnet, + Name: "OpenRouter – Claude 3.5 Sonnet", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3.5-sonnet", + CostPer1MIn: AnthropicModels[Claude35Sonnet].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude35Sonnet].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude35Sonnet].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude35Sonnet].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude35Sonnet].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude35Sonnet].DefaultMaxTokens, + }, + OpenRouterClaude3Haiku: { + ID: OpenRouterClaude3Haiku, + Name: "OpenRouter – Claude 3 Haiku", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3-haiku", + CostPer1MIn: AnthropicModels[Claude3Haiku].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude3Haiku].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude3Haiku].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude3Haiku].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude3Haiku].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude3Haiku].DefaultMaxTokens, + }, + OpenRouterClaude37Sonnet: { + ID: OpenRouterClaude37Sonnet, + Name: "OpenRouter – Claude 3.7 Sonnet", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3.7-sonnet", + CostPer1MIn: AnthropicModels[Claude37Sonnet].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude37Sonnet].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude37Sonnet].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude37Sonnet].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude37Sonnet].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude37Sonnet].DefaultMaxTokens, + CanReason: AnthropicModels[Claude37Sonnet].CanReason, + }, + OpenRouterClaude35Haiku: { + ID: OpenRouterClaude35Haiku, + Name: "OpenRouter – Claude 3.5 Haiku", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3.5-haiku", + CostPer1MIn: AnthropicModels[Claude35Haiku].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude35Haiku].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude35Haiku].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude35Haiku].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude35Haiku].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude35Haiku].DefaultMaxTokens, + }, + OpenRouterClaude3Opus: { + ID: OpenRouterClaude3Opus, + Name: "OpenRouter – Claude 3 Opus", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3-opus", + CostPer1MIn: AnthropicModels[Claude3Opus].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude3Opus].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude3Opus].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude3Opus].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude3Opus].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude3Opus].DefaultMaxTokens, + }, +} diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 4d45aebfa..b557df535 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -21,6 +21,7 @@ type openaiOptions struct { baseURL string disableCache bool reasoningEffort string + extraHeaders map[string]string } type OpenAIOption func(*openaiOptions) @@ -49,6 +50,12 @@ func newOpenAIClient(opts providerClientOptions) OpenAIClient { openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(openaiOpts.baseURL)) } + if openaiOpts.extraHeaders != nil { + for key, value := range openaiOpts.extraHeaders { + openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value)) + } + } + client := openai.NewClient(openaiClientOptions...) return &openaiClient{ providerOptions: opts, @@ -204,11 +211,18 @@ func (o *openaiClient) send(ctx context.Context, messages []message.Message, too content = openaiResponse.Choices[0].Message.Content } + toolCalls := o.toolCalls(*openaiResponse) + finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason)) + + if len(toolCalls) > 0 { + finishReason = message.FinishReasonToolUse + } + return &ProviderResponse{ Content: content, - ToolCalls: o.toolCalls(*openaiResponse), + ToolCalls: toolCalls, Usage: o.usage(*openaiResponse), - FinishReason: o.finishReason(string(openaiResponse.Choices[0].FinishReason)), + FinishReason: finishReason, }, nil } } @@ -267,13 +281,19 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t err := openaiStream.Err() if err == nil || errors.Is(err, io.EOF) { // Stream completed successfully + finishReason := o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason)) + + if len(toolCalls) > 0 { + finishReason = message.FinishReasonToolUse + } + eventChan <- ProviderEvent{ Type: EventComplete, Response: &ProviderResponse{ Content: currentContent, ToolCalls: toolCalls, Usage: o.usage(acc.ChatCompletion), - FinishReason: o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason)), + FinishReason: finishReason, }, } close(eventChan) @@ -375,6 +395,12 @@ func WithOpenAIBaseURL(baseURL string) OpenAIOption { } } +func WithOpenAIExtraHeaders(headers map[string]string) OpenAIOption { + return func(options *openaiOptions) { + options.extraHeaders = headers + } +} + func WithOpenAIDisableCache() OpenAIOption { return func(options *openaiOptions) { options.disableCache = true diff --git a/internal/llm/provider/provider.go b/internal/llm/provider/provider.go index 737b6fb00..1545bc27a 100644 --- a/internal/llm/provider/provider.go +++ b/internal/llm/provider/provider.go @@ -120,6 +120,18 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption options: clientOptions, client: newAzureClient(clientOptions), }, nil + case models.ProviderOpenRouter: + clientOptions.openaiOptions = append(clientOptions.openaiOptions, + WithOpenAIBaseURL("https://openrouter.ai/api/v1"), + WithOpenAIExtraHeaders(map[string]string{ + "HTTP-Referer": "opencode.ai", + "X-Title": "OpenCode", + }), + ) + return &baseProvider[OpenAIClient]{ + options: clientOptions, + client: newOpenAIClient(clientOptions), + }, nil case models.ProviderMock: // TODO: implement mock client for test panic("not implemented") diff --git a/opencode-schema.json b/opencode-schema.json index 766ca0260..7d1dde213 100644 --- a/opencode-schema.json +++ b/opencode-schema.json @@ -49,23 +49,38 @@ "gpt-4.1-mini", "azure.gpt-4.1-mini", "gemini-2.5", - "meta-llama/llama-4-scout-17b-16e-instruct" + "meta-llama/llama-4-scout-17b-16e-instruct", + "openrouter.deepseek-chat-free", + "openrouter.deepseek-r1-free", + "openrouter.gpt-4.1", + "openrouter.gpt-4.1-mini", + "openrouter.gpt-4.1-nano", + "openrouter.gpt-4.5-preview", + "openrouter.gpt-4o", + "openrouter.gpt-4o-mini", + "openrouter.o1", + "openrouter.o1-pro", + "openrouter.o1-mini", + "openrouter.o3", + "openrouter.o3-mini", + "openrouter.o4-mini", + "openrouter.gemini-2.5-flash", + "openrouter.gemini-2.5", + "openrouter.claude-3.5-sonnet", + "openrouter.claude-3-haiku", + "openrouter.claude-3.7-sonnet", + "openrouter.claude-3.5-haiku", + "openrouter.claude-3-opus" ], "type": "string" }, "reasoningEffort": { "description": "Reasoning effort for models that support it (OpenAI, Anthropic)", - "enum": [ - "low", - "medium", - "high" - ], + "enum": ["low", "medium", "high"], "type": "string" } }, - "required": [ - "model" - ], + "required": ["model"], "type": "object" } }, @@ -120,23 +135,38 @@ "gpt-4.1-mini", "azure.gpt-4.1-mini", "gemini-2.5", - "meta-llama/llama-4-scout-17b-16e-instruct" + "meta-llama/llama-4-scout-17b-16e-instruct", + "openrouter.deepseek-chat-free", + "openrouter.deepseek-r1-free", + "openrouter.gpt-4.1", + "openrouter.gpt-4.1-mini", + "openrouter.gpt-4.1-nano", + "openrouter.gpt-4.5-preview", + "openrouter.gpt-4o", + "openrouter.gpt-4o-mini", + "openrouter.o1", + "openrouter.o1-pro", + "openrouter.o1-mini", + "openrouter.o3", + "openrouter.o3-mini", + "openrouter.o4-mini", + "openrouter.gemini-2.5-flash", + "openrouter.gemini-2.5", + "openrouter.claude-3.5-sonnet", + "openrouter.claude-3-haiku", + "openrouter.claude-3.7-sonnet", + "openrouter.claude-3.5-haiku", + "openrouter.claude-3-opus" ], "type": "string" }, "reasoningEffort": { "description": "Reasoning effort for models that support it (OpenAI, Anthropic)", - "enum": [ - "low", - "medium", - "high" - ], + "enum": ["low", "medium", "high"], "type": "string" } }, - "required": [ - "model" - ], + "required": ["model"], "type": "object" }, "description": "Agent configurations", @@ -182,9 +212,7 @@ "type": "string" } }, - "required": [ - "directory" - ], + "required": ["directory"], "type": "object" }, "debug": { @@ -222,9 +250,7 @@ "type": "object" } }, - "required": [ - "command" - ], + "required": ["command"], "type": "object" }, "description": "Language Server Protocol configurations", @@ -262,10 +288,7 @@ "type": { "default": "stdio", "description": "Type of MCP server", - "enum": [ - "stdio", - "sse" - ], + "enum": ["stdio", "sse"], "type": "string" }, "url": { @@ -273,9 +296,7 @@ "type": "string" } }, - "required": [ - "command" - ], + "required": ["command"], "type": "object" }, "description": "Model Control Protocol server configurations", @@ -302,7 +323,8 @@ "gemini", "groq", "bedrock", - "azure" + "azure", + "openrouter" ], "type": "string" }