Skip to content
This repository was archived by the owner on Jul 29, 2025. It is now read-only.
Merged
10 changes: 7 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ OpenCode is a Go-based CLI application that brings AI assistance to your termina
## Features

- **Interactive TUI**: Built with [Bubble Tea](https://github.com/charmbracelet/bubbletea) for a smooth terminal experience
- **Multiple AI Providers**: Support for OpenAI, Anthropic Claude, Google Gemini, AWS Bedrock, Groq, and Azure OpenAI
- **Multiple AI Providers**: Support for OpenAI, Anthropic Claude, Google Gemini, AWS Bedrock, Groq, Azure OpenAI, and OpenRouter
- **Session Management**: Save and manage multiple conversation sessions
- **Tool Integration**: AI can execute commands, search files, and modify code
- **Vim-like Editor**: Integrated editor with text input capabilities
Expand Down Expand Up @@ -97,8 +97,12 @@ You can configure OpenCode using environment variables:
"disabled": false
},
"groq": {
"apiKey": "your-api-key",
"disabled": false
"apiKey": "your-api-key",
"disabled": false
},
"openrouter": {
"apiKey": "your-api-key",
"disabled": false
}
},
"agents": {
Expand Down
1 change: 1 addition & 0 deletions cmd/schema/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ func generateSchema() map[string]any {
string(models.ProviderOpenAI),
string(models.ProviderGemini),
string(models.ProviderGROQ),
string(models.ProviderOpenRouter),
string(models.ProviderBedrock),
string(models.ProviderAzure),
}
Expand Down
39 changes: 39 additions & 0 deletions internal/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,15 @@ func setProviderDefaults() {
return
}

// OpenRouter configuration
if apiKey := os.Getenv("OPENROUTER_API_KEY"); apiKey != "" {
viper.SetDefault("providers.openrouter.apiKey", apiKey)
viper.SetDefault("agents.coder.model", models.OpenRouterClaude37Sonnet)
viper.SetDefault("agents.task.model", models.OpenRouterClaude37Sonnet)
viper.SetDefault("agents.title.model", models.OpenRouterClaude35Haiku)
return
}

// AWS Bedrock configuration
if hasAWSCredentials() {
viper.SetDefault("agents.coder.model", models.BedrockClaude37Sonnet)
Expand Down Expand Up @@ -527,6 +536,8 @@ func getProviderAPIKey(provider models.ModelProvider) string {
return os.Getenv("GROQ_API_KEY")
case models.ProviderAzure:
return os.Getenv("AZURE_OPENAI_API_KEY")
case models.ProviderOpenRouter:
return os.Getenv("OPENROUTER_API_KEY")
case models.ProviderBedrock:
if hasAWSCredentials() {
return "aws-credentials-available"
Expand Down Expand Up @@ -578,6 +589,34 @@ func setDefaultModelForAgent(agent AgentName) bool {
return true
}

if apiKey := os.Getenv("OPENROUTER_API_KEY"); apiKey != "" {
var model models.ModelID
maxTokens := int64(5000)
reasoningEffort := ""

switch agent {
case AgentTitle:
model = models.OpenRouterClaude35Haiku
maxTokens = 80
case AgentTask:
model = models.OpenRouterClaude37Sonnet
default:
model = models.OpenRouterClaude37Sonnet
}

// Check if model supports reasoning
if modelInfo, ok := models.SupportedModels[model]; ok && modelInfo.CanReason {
reasoningEffort = "medium"
}

cfg.Agents[agent] = Agent{
Model: model,
MaxTokens: maxTokens,
ReasoningEffort: reasoningEffort,
}
return true
}

if apiKey := os.Getenv("GEMINI_API_KEY"); apiKey != "" {
var model models.ModelID
maxTokens := int64(5000)
Expand Down
1 change: 1 addition & 0 deletions internal/llm/models/models.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,4 +86,5 @@ func init() {
maps.Copy(SupportedModels, GeminiModels)
maps.Copy(SupportedModels, GroqModels)
maps.Copy(SupportedModels, AzureModels)
maps.Copy(SupportedModels, OpenRouterModels)
}
262 changes: 262 additions & 0 deletions internal/llm/models/openrouter.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,262 @@
package models

const (
ProviderOpenRouter ModelProvider = "openrouter"

OpenRouterGPT41 ModelID = "openrouter.gpt-4.1"
OpenRouterGPT41Mini ModelID = "openrouter.gpt-4.1-mini"
OpenRouterGPT41Nano ModelID = "openrouter.gpt-4.1-nano"
OpenRouterGPT45Preview ModelID = "openrouter.gpt-4.5-preview"
OpenRouterGPT4o ModelID = "openrouter.gpt-4o"
OpenRouterGPT4oMini ModelID = "openrouter.gpt-4o-mini"
OpenRouterO1 ModelID = "openrouter.o1"
OpenRouterO1Pro ModelID = "openrouter.o1-pro"
OpenRouterO1Mini ModelID = "openrouter.o1-mini"
OpenRouterO3 ModelID = "openrouter.o3"
OpenRouterO3Mini ModelID = "openrouter.o3-mini"
OpenRouterO4Mini ModelID = "openrouter.o4-mini"
OpenRouterGemini25Flash ModelID = "openrouter.gemini-2.5-flash"
OpenRouterGemini25 ModelID = "openrouter.gemini-2.5"
OpenRouterClaude35Sonnet ModelID = "openrouter.claude-3.5-sonnet"
OpenRouterClaude3Haiku ModelID = "openrouter.claude-3-haiku"
OpenRouterClaude37Sonnet ModelID = "openrouter.claude-3.7-sonnet"
OpenRouterClaude35Haiku ModelID = "openrouter.claude-3.5-haiku"
OpenRouterClaude3Opus ModelID = "openrouter.claude-3-opus"
)

var OpenRouterModels = map[ModelID]Model{
OpenRouterGPT41: {
ID: OpenRouterGPT41,
Name: "OpenRouter – GPT 4.1",
Provider: ProviderOpenRouter,
APIModel: "openai/gpt-4.1",
CostPer1MIn: OpenAIModels[GPT41].CostPer1MIn,
CostPer1MInCached: OpenAIModels[GPT41].CostPer1MInCached,
CostPer1MOut: OpenAIModels[GPT41].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[GPT41].CostPer1MOutCached,
ContextWindow: OpenAIModels[GPT41].ContextWindow,
DefaultMaxTokens: OpenAIModels[GPT41].DefaultMaxTokens,
},
OpenRouterGPT41Mini: {
ID: OpenRouterGPT41Mini,
Name: "OpenRouter – GPT 4.1 mini",
Provider: ProviderOpenRouter,
APIModel: "openai/gpt-4.1-mini",
CostPer1MIn: OpenAIModels[GPT41Mini].CostPer1MIn,
CostPer1MInCached: OpenAIModels[GPT41Mini].CostPer1MInCached,
CostPer1MOut: OpenAIModels[GPT41Mini].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[GPT41Mini].CostPer1MOutCached,
ContextWindow: OpenAIModels[GPT41Mini].ContextWindow,
DefaultMaxTokens: OpenAIModels[GPT41Mini].DefaultMaxTokens,
},
OpenRouterGPT41Nano: {
ID: OpenRouterGPT41Nano,
Name: "OpenRouter – GPT 4.1 nano",
Provider: ProviderOpenRouter,
APIModel: "openai/gpt-4.1-nano",
CostPer1MIn: OpenAIModels[GPT41Nano].CostPer1MIn,
CostPer1MInCached: OpenAIModels[GPT41Nano].CostPer1MInCached,
CostPer1MOut: OpenAIModels[GPT41Nano].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[GPT41Nano].CostPer1MOutCached,
ContextWindow: OpenAIModels[GPT41Nano].ContextWindow,
DefaultMaxTokens: OpenAIModels[GPT41Nano].DefaultMaxTokens,
},
OpenRouterGPT45Preview: {
ID: OpenRouterGPT45Preview,
Name: "OpenRouter – GPT 4.5 preview",
Provider: ProviderOpenRouter,
APIModel: "openai/gpt-4.5-preview",
CostPer1MIn: OpenAIModels[GPT45Preview].CostPer1MIn,
CostPer1MInCached: OpenAIModels[GPT45Preview].CostPer1MInCached,
CostPer1MOut: OpenAIModels[GPT45Preview].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[GPT45Preview].CostPer1MOutCached,
ContextWindow: OpenAIModels[GPT45Preview].ContextWindow,
DefaultMaxTokens: OpenAIModels[GPT45Preview].DefaultMaxTokens,
},
OpenRouterGPT4o: {
ID: OpenRouterGPT4o,
Name: "OpenRouter – GPT 4o",
Provider: ProviderOpenRouter,
APIModel: "openai/gpt-4o",
CostPer1MIn: OpenAIModels[GPT4o].CostPer1MIn,
CostPer1MInCached: OpenAIModels[GPT4o].CostPer1MInCached,
CostPer1MOut: OpenAIModels[GPT4o].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[GPT4o].CostPer1MOutCached,
ContextWindow: OpenAIModels[GPT4o].ContextWindow,
DefaultMaxTokens: OpenAIModels[GPT4o].DefaultMaxTokens,
},
OpenRouterGPT4oMini: {
ID: OpenRouterGPT4oMini,
Name: "OpenRouter – GPT 4o mini",
Provider: ProviderOpenRouter,
APIModel: "openai/gpt-4o-mini",
CostPer1MIn: OpenAIModels[GPT4oMini].CostPer1MIn,
CostPer1MInCached: OpenAIModels[GPT4oMini].CostPer1MInCached,
CostPer1MOut: OpenAIModels[GPT4oMini].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[GPT4oMini].CostPer1MOutCached,
ContextWindow: OpenAIModels[GPT4oMini].ContextWindow,
},
OpenRouterO1: {
ID: OpenRouterO1,
Name: "OpenRouter – O1",
Provider: ProviderOpenRouter,
APIModel: "openai/o1",
CostPer1MIn: OpenAIModels[O1].CostPer1MIn,
CostPer1MInCached: OpenAIModels[O1].CostPer1MInCached,
CostPer1MOut: OpenAIModels[O1].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[O1].CostPer1MOutCached,
ContextWindow: OpenAIModels[O1].ContextWindow,
DefaultMaxTokens: OpenAIModels[O1].DefaultMaxTokens,
CanReason: OpenAIModels[O1].CanReason,
},
OpenRouterO1Pro: {
ID: OpenRouterO1Pro,
Name: "OpenRouter – o1 pro",
Provider: ProviderOpenRouter,
APIModel: "openai/o1-pro",
CostPer1MIn: OpenAIModels[O1Pro].CostPer1MIn,
CostPer1MInCached: OpenAIModels[O1Pro].CostPer1MInCached,
CostPer1MOut: OpenAIModels[O1Pro].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[O1Pro].CostPer1MOutCached,
ContextWindow: OpenAIModels[O1Pro].ContextWindow,
DefaultMaxTokens: OpenAIModels[O1Pro].DefaultMaxTokens,
CanReason: OpenAIModels[O1Pro].CanReason,
},
OpenRouterO1Mini: {
ID: OpenRouterO1Mini,
Name: "OpenRouter – o1 mini",
Provider: ProviderOpenRouter,
APIModel: "openai/o1-mini",
CostPer1MIn: OpenAIModels[O1Mini].CostPer1MIn,
CostPer1MInCached: OpenAIModels[O1Mini].CostPer1MInCached,
CostPer1MOut: OpenAIModels[O1Mini].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[O1Mini].CostPer1MOutCached,
ContextWindow: OpenAIModels[O1Mini].ContextWindow,
DefaultMaxTokens: OpenAIModels[O1Mini].DefaultMaxTokens,
CanReason: OpenAIModels[O1Mini].CanReason,
},
OpenRouterO3: {
ID: OpenRouterO3,
Name: "OpenRouter – o3",
Provider: ProviderOpenRouter,
APIModel: "openai/o3",
CostPer1MIn: OpenAIModels[O3].CostPer1MIn,
CostPer1MInCached: OpenAIModels[O3].CostPer1MInCached,
CostPer1MOut: OpenAIModels[O3].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[O3].CostPer1MOutCached,
ContextWindow: OpenAIModels[O3].ContextWindow,
DefaultMaxTokens: OpenAIModels[O3].DefaultMaxTokens,
CanReason: OpenAIModels[O3].CanReason,
},
OpenRouterO3Mini: {
ID: OpenRouterO3Mini,
Name: "OpenRouter – o3 mini",
Provider: ProviderOpenRouter,
APIModel: "openai/o3-mini-high",
CostPer1MIn: OpenAIModels[O3Mini].CostPer1MIn,
CostPer1MInCached: OpenAIModels[O3Mini].CostPer1MInCached,
CostPer1MOut: OpenAIModels[O3Mini].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[O3Mini].CostPer1MOutCached,
ContextWindow: OpenAIModels[O3Mini].ContextWindow,
DefaultMaxTokens: OpenAIModels[O3Mini].DefaultMaxTokens,
CanReason: OpenAIModels[O3Mini].CanReason,
},
OpenRouterO4Mini: {
ID: OpenRouterO4Mini,
Name: "OpenRouter – o4 mini",
Provider: ProviderOpenRouter,
APIModel: "openai/o4-mini-high",
CostPer1MIn: OpenAIModels[O4Mini].CostPer1MIn,
CostPer1MInCached: OpenAIModels[O4Mini].CostPer1MInCached,
CostPer1MOut: OpenAIModels[O4Mini].CostPer1MOut,
CostPer1MOutCached: OpenAIModels[O4Mini].CostPer1MOutCached,
ContextWindow: OpenAIModels[O4Mini].ContextWindow,
DefaultMaxTokens: OpenAIModels[O4Mini].DefaultMaxTokens,
CanReason: OpenAIModels[O4Mini].CanReason,
},
OpenRouterGemini25Flash: {
ID: OpenRouterGemini25Flash,
Name: "OpenRouter – Gemini 2.5 Flash",
Provider: ProviderOpenRouter,
APIModel: "google/gemini-2.5-flash-preview:thinking",
CostPer1MIn: GeminiModels[Gemini25Flash].CostPer1MIn,
CostPer1MInCached: GeminiModels[Gemini25Flash].CostPer1MInCached,
CostPer1MOut: GeminiModels[Gemini25Flash].CostPer1MOut,
CostPer1MOutCached: GeminiModels[Gemini25Flash].CostPer1MOutCached,
ContextWindow: GeminiModels[Gemini25Flash].ContextWindow,
DefaultMaxTokens: GeminiModels[Gemini25Flash].DefaultMaxTokens,
},
OpenRouterGemini25: {
ID: OpenRouterGemini25,
Name: "OpenRouter – Gemini 2.5 Pro",
Provider: ProviderOpenRouter,
APIModel: "google/gemini-2.5-pro-preview-03-25",
CostPer1MIn: GeminiModels[Gemini25].CostPer1MIn,
CostPer1MInCached: GeminiModels[Gemini25].CostPer1MInCached,
CostPer1MOut: GeminiModels[Gemini25].CostPer1MOut,
CostPer1MOutCached: GeminiModels[Gemini25].CostPer1MOutCached,
ContextWindow: GeminiModels[Gemini25].ContextWindow,
DefaultMaxTokens: GeminiModels[Gemini25].DefaultMaxTokens,
},
OpenRouterClaude35Sonnet: {
ID: OpenRouterClaude35Sonnet,
Name: "OpenRouter – Claude 3.5 Sonnet",
Provider: ProviderOpenRouter,
APIModel: "anthropic/claude-3.5-sonnet",
CostPer1MIn: AnthropicModels[Claude35Sonnet].CostPer1MIn,
CostPer1MInCached: AnthropicModels[Claude35Sonnet].CostPer1MInCached,
CostPer1MOut: AnthropicModels[Claude35Sonnet].CostPer1MOut,
CostPer1MOutCached: AnthropicModels[Claude35Sonnet].CostPer1MOutCached,
ContextWindow: AnthropicModels[Claude35Sonnet].ContextWindow,
DefaultMaxTokens: AnthropicModels[Claude35Sonnet].DefaultMaxTokens,
},
OpenRouterClaude3Haiku: {
ID: OpenRouterClaude3Haiku,
Name: "OpenRouter – Claude 3 Haiku",
Provider: ProviderOpenRouter,
APIModel: "anthropic/claude-3-haiku",
CostPer1MIn: AnthropicModels[Claude3Haiku].CostPer1MIn,
CostPer1MInCached: AnthropicModels[Claude3Haiku].CostPer1MInCached,
CostPer1MOut: AnthropicModels[Claude3Haiku].CostPer1MOut,
CostPer1MOutCached: AnthropicModels[Claude3Haiku].CostPer1MOutCached,
ContextWindow: AnthropicModels[Claude3Haiku].ContextWindow,
DefaultMaxTokens: AnthropicModels[Claude3Haiku].DefaultMaxTokens,
},
OpenRouterClaude37Sonnet: {
ID: OpenRouterClaude37Sonnet,
Name: "OpenRouter – Claude 3.7 Sonnet",
Provider: ProviderOpenRouter,
APIModel: "anthropic/claude-3.7-sonnet",
CostPer1MIn: AnthropicModels[Claude37Sonnet].CostPer1MIn,
CostPer1MInCached: AnthropicModels[Claude37Sonnet].CostPer1MInCached,
CostPer1MOut: AnthropicModels[Claude37Sonnet].CostPer1MOut,
CostPer1MOutCached: AnthropicModels[Claude37Sonnet].CostPer1MOutCached,
ContextWindow: AnthropicModels[Claude37Sonnet].ContextWindow,
DefaultMaxTokens: AnthropicModels[Claude37Sonnet].DefaultMaxTokens,
CanReason: AnthropicModels[Claude37Sonnet].CanReason,
},
OpenRouterClaude35Haiku: {
ID: OpenRouterClaude35Haiku,
Name: "OpenRouter – Claude 3.5 Haiku",
Provider: ProviderOpenRouter,
APIModel: "anthropic/claude-3.5-haiku",
CostPer1MIn: AnthropicModels[Claude35Haiku].CostPer1MIn,
CostPer1MInCached: AnthropicModels[Claude35Haiku].CostPer1MInCached,
CostPer1MOut: AnthropicModels[Claude35Haiku].CostPer1MOut,
CostPer1MOutCached: AnthropicModels[Claude35Haiku].CostPer1MOutCached,
ContextWindow: AnthropicModels[Claude35Haiku].ContextWindow,
DefaultMaxTokens: AnthropicModels[Claude35Haiku].DefaultMaxTokens,
},
OpenRouterClaude3Opus: {
ID: OpenRouterClaude3Opus,
Name: "OpenRouter – Claude 3 Opus",
Provider: ProviderOpenRouter,
APIModel: "anthropic/claude-3-opus",
CostPer1MIn: AnthropicModels[Claude3Opus].CostPer1MIn,
CostPer1MInCached: AnthropicModels[Claude3Opus].CostPer1MInCached,
CostPer1MOut: AnthropicModels[Claude3Opus].CostPer1MOut,
CostPer1MOutCached: AnthropicModels[Claude3Opus].CostPer1MOutCached,
ContextWindow: AnthropicModels[Claude3Opus].ContextWindow,
DefaultMaxTokens: AnthropicModels[Claude3Opus].DefaultMaxTokens,
},
}
Loading