diff --git a/apps/agent-tars/src/main/ipcRoutes/llm.ts b/apps/agent-tars/src/main/ipcRoutes/llm.ts index 4f33e3d8b..ad26b1cf4 100644 --- a/apps/agent-tars/src/main/ipcRoutes/llm.ts +++ b/apps/agent-tars/src/main/ipcRoutes/llm.ts @@ -52,6 +52,7 @@ export const llmRoute = t.router({ messages, requestId: input.requestId, }); + logger.info('[llmRoute.askLLMText] response', response); return response; }), @@ -65,7 +66,6 @@ export const llmRoute = t.router({ .handle(async ({ input }) => { logger.info('[llmRoute.askLLMTool] input', input); const messages = input.messages.map((msg) => new Message(msg)); - const llm = createLLM(currentLLMConfigRef.current); logger.info( '[llmRoute.askLLMTool] Current LLM Config', maskSensitiveData(currentLLMConfigRef.current), @@ -74,6 +74,7 @@ export const llmRoute = t.router({ '[llmRoute.askLLMTool] Current Search Config', maskSensitiveData(SettingStore.get('search')), ); + const llm = createLLM(currentLLMConfigRef.current); logger.info('[llmRoute.askLLMTool] tools', extractToolNames(input.tools)); const response = await llm.askTool({ messages, @@ -81,6 +82,7 @@ export const llmRoute = t.router({ mcpServerKeys: input.mcpServerKeys, requestId: input.requestId, }); + logger.info('[llmRoute.askLLMTool] response', response); return response; }), @@ -104,6 +106,7 @@ export const llmRoute = t.router({ const windows = BrowserWindow.getAllWindows(); try { const stream = llm.askLLMTextStream({ messages, requestId }); + logger.info('[llmRoute.askLLMTextStream] stream', !!stream); for await (const chunk of stream) { if (!windows.length) { diff --git a/apps/agent-tars/src/main/ipcRoutes/settings.ts b/apps/agent-tars/src/main/ipcRoutes/settings.ts index bcc09942d..b7f3e0706 100644 --- a/apps/agent-tars/src/main/ipcRoutes/settings.ts +++ b/apps/agent-tars/src/main/ipcRoutes/settings.ts @@ -1,25 +1,46 @@ import { AppSettings } from '@agent-infra/shared'; import { SettingStore } from '@main/store/setting'; import { initIpc } from '@ui-tars/electron-ipc/main'; +import { logger } from '@main/utils/logger'; +import { maskSensitiveData } from '@main/utils/maskSensitiveData'; const t = initIpc.create(); export const settingsRoute = t.router({ getSettings: t.procedure.input().handle(async () => { - return SettingStore.getStore(); + const settings = SettingStore.getStore(); + // To many calls, hide it to avoid to many reduncdant log. + // logger.info( + // '[settingsRoute.getSettings] result', + // maskSensitiveData(settings), + // ); + return settings; }), getFileSystemSettings: t.procedure.input().handle(async () => { - return SettingStore.get('fileSystem'); + const settings = SettingStore.get('fileSystem'); + logger.info( + '[settingsRoute.getFileSystemSettings] result', + maskSensitiveData(settings), + ); + return settings; }), updateAppSettings: t.procedure .input() .handle(async ({ input }) => { + logger.info( + '[settingsRoute.updateAppSettings]', + maskSensitiveData(input), + ); SettingStore.setStore(input); return true; }), updateFileSystemSettings: t.procedure .input() .handle(async ({ input }) => { + logger.info( + '[settingsRoute.updateFileSystemSettings]', + maskSensitiveData(input), + ); SettingStore.set('fileSystem', input); return true; }), diff --git a/apps/agent-tars/src/main/llmProvider/ProviderFactory.ts b/apps/agent-tars/src/main/llmProvider/ProviderFactory.ts index 510e1f343..3b36ca5c5 100644 --- a/apps/agent-tars/src/main/llmProvider/ProviderFactory.ts +++ b/apps/agent-tars/src/main/llmProvider/ProviderFactory.ts @@ -5,6 +5,7 @@ import { AzureOpenAIProvider } from './providers/AzureOpenAIProvider'; import { GeminiProvider } from './providers/GeminiProvider'; import { MistralProvider } from './providers/MistralProvider'; import { logger } from '@main/utils/logger'; +import { maskSensitiveData } from '@main/utils/maskSensitiveData'; // Define model prefixes that will be used to determine the provider const MODEL_PREFIXES = { @@ -80,6 +81,11 @@ export class ProviderFactory { providerName: string, config: LLMConfig, ): LLMProvider { + logger.info( + '[createProviderByName]', + providerName, + maskSensitiveData(config), + ); switch (providerName.toLowerCase()) { case 'openai': return new OpenAIProvider(config); diff --git a/apps/agent-tars/src/main/llmProvider/index.ts b/apps/agent-tars/src/main/llmProvider/index.ts index e55c3887b..d83663ff1 100644 --- a/apps/agent-tars/src/main/llmProvider/index.ts +++ b/apps/agent-tars/src/main/llmProvider/index.ts @@ -34,6 +34,7 @@ export class LLM { constructor(config: LLMConfig = {}) { this.config = config; + // FIXME: config.configName does not exist !!! // Create provider instance based on the model or explicit provider setting this.provider = ProviderFactory.createProvider(config, config.configName); } diff --git a/apps/agent-tars/src/main/llmProvider/providers/AnthropicProvider.ts b/apps/agent-tars/src/main/llmProvider/providers/AnthropicProvider.ts index dedbcec2c..f14a5b0e9 100644 --- a/apps/agent-tars/src/main/llmProvider/providers/AnthropicProvider.ts +++ b/apps/agent-tars/src/main/llmProvider/providers/AnthropicProvider.ts @@ -3,6 +3,8 @@ import { Message, ToolCall } from '@agent-infra/shared'; import { ChatCompletionTool } from 'openai/resources'; import { BaseProvider } from './BaseProvider'; import { LLMConfig, LLMResponse, ToolChoice } from '../interfaces/LLMProvider'; +import { logger } from '@main/utils/logger'; +import { maskSensitiveData } from '@main/utils/maskSensitiveData'; /** * Helper to convert OpenAI tool format to Anthropic tool format @@ -42,6 +44,7 @@ export class AnthropicProvider extends BaseProvider { ); } + logger.info('[AnthropicProvider]', maskSensitiveData({ apiKey, baseURL })); this.client = new Anthropic({ apiKey, ...(baseURL && { baseURL }), diff --git a/apps/agent-tars/src/main/llmProvider/providers/AzureOpenAIProvider.ts b/apps/agent-tars/src/main/llmProvider/providers/AzureOpenAIProvider.ts index 2d47363fa..89f9f7b82 100644 --- a/apps/agent-tars/src/main/llmProvider/providers/AzureOpenAIProvider.ts +++ b/apps/agent-tars/src/main/llmProvider/providers/AzureOpenAIProvider.ts @@ -5,6 +5,9 @@ import { ChatCompletionTool } from 'openai/resources'; import { BaseProvider } from './BaseProvider'; import { LLMConfig, LLMResponse, ToolChoice } from '../interfaces/LLMProvider'; +import { logger } from '@main/utils/logger'; +import { maskSensitiveData } from '@main/utils/maskSensitiveData'; + /** * Azure OpenAI provider implementation */ @@ -37,6 +40,11 @@ export class AzureOpenAIProvider extends BaseProvider { ); } + logger.info( + '[AzureOpenAIProvider]', + maskSensitiveData({ apiKey, apiVersion, endpoint }), + ); + this.client = new AzureOpenAI({ apiKey, apiVersion, endpoint }); this.model = config.model || process.env.AZURE_OPENAI_MODEL || 'gpt-4o'; } diff --git a/apps/agent-tars/src/main/llmProvider/providers/GeminiProvider.ts b/apps/agent-tars/src/main/llmProvider/providers/GeminiProvider.ts index eea65b2c8..5527e6f1c 100644 --- a/apps/agent-tars/src/main/llmProvider/providers/GeminiProvider.ts +++ b/apps/agent-tars/src/main/llmProvider/providers/GeminiProvider.ts @@ -8,6 +8,8 @@ import { } from '@google/generative-ai'; import { BaseProvider } from './BaseProvider'; import { LLMConfig, LLMResponse, ToolChoice } from '../interfaces/LLMProvider'; +import { logger } from '@main/utils/logger'; +import { maskSensitiveData } from '@main/utils/maskSensitiveData'; /** * Google Gemini provider implementation @@ -28,12 +30,19 @@ export class GeminiProvider extends BaseProvider { ); } + logger.info('[GeminiProvider]', maskSensitiveData({ apiKey })); + // Initialize client with the actual SDK this.client = new GoogleGenerativeAI(apiKey); // Set default model or use provided one this.model = config.model || process.env.GEMINI_DEFAULT_MODEL || 'gemini-1.5-pro'; + + logger.info( + '[GeminiProvider]', + maskSensitiveData({ apiKey, model: this.model }), + ); } /** diff --git a/apps/agent-tars/src/main/llmProvider/providers/MistralProvider.ts b/apps/agent-tars/src/main/llmProvider/providers/MistralProvider.ts index 8b54c977c..8da3efa1a 100644 --- a/apps/agent-tars/src/main/llmProvider/providers/MistralProvider.ts +++ b/apps/agent-tars/src/main/llmProvider/providers/MistralProvider.ts @@ -7,6 +7,8 @@ import { Mistral } from '@mistralai/mistralai'; import { BaseProvider } from './BaseProvider'; import { LLMConfig, LLMResponse, ToolChoice } from '../interfaces/LLMProvider'; import { ChatCompletionResponse } from '@mistralai/mistralai/models/components'; +import { logger } from '@main/utils/logger'; +import { maskSensitiveData } from '@main/utils/maskSensitiveData'; /** * Mistral provider implementation @@ -37,6 +39,11 @@ export class MistralProvider extends BaseProvider { config.model || process.env.MISTRAL_DEFAULT_MODEL || 'mistral-large-latest'; + + logger.info( + '[MistralProvider]', + maskSensitiveData({ apiKey, model: this.model }), + ); } /** diff --git a/apps/agent-tars/src/main/llmProvider/providers/OpenAIProvider.ts b/apps/agent-tars/src/main/llmProvider/providers/OpenAIProvider.ts index 1f7974f50..76a21d197 100644 --- a/apps/agent-tars/src/main/llmProvider/providers/OpenAIProvider.ts +++ b/apps/agent-tars/src/main/llmProvider/providers/OpenAIProvider.ts @@ -3,6 +3,8 @@ import { Message, ToolCall } from '@agent-infra/shared'; import { ChatCompletionTool } from 'openai/resources'; import { BaseProvider } from './BaseProvider'; import { LLMConfig, LLMResponse, ToolChoice } from '../interfaces/LLMProvider'; +import { logger } from '@main/utils/logger'; +import { maskSensitiveData } from '@main/utils/maskSensitiveData'; /** * OpenAI provider implementation @@ -24,6 +26,7 @@ export class OpenAIProvider extends BaseProvider { ); } + logger.info('[OpenAIProvider]', maskSensitiveData({ apiKey, baseURL })); this.client = new OpenAI({ apiKey, baseURL }); this.model = config.model || process.env.OPENAI_DEFAULT_MODEL || 'gpt-4o'; } diff --git a/apps/agent-tars/src/main/store/setting.ts b/apps/agent-tars/src/main/store/setting.ts index d77c0f8ec..c49995a1d 100644 --- a/apps/agent-tars/src/main/store/setting.ts +++ b/apps/agent-tars/src/main/store/setting.ts @@ -13,6 +13,7 @@ import { AppSettings, } from '@agent-infra/shared'; import { logger } from '@main/utils/logger'; +import { maskSensitiveData } from '@main/utils/maskSensitiveData'; const DEFAULT_MODEL_SETTINGS: ModelSettings = { provider: ModelProvider.OPENAI, @@ -49,7 +50,10 @@ export class SettingStore { SettingStore.instance.onDidAnyChange((newValue, oldValue) => { logger.info( - `SettingStore: ${JSON.stringify(oldValue)} changed to ${JSON.stringify(newValue)}`, + `[SettingStore] Before: ${JSON.stringify(maskSensitiveData(oldValue!))}`, + ); + logger.info( + `[SettingStore] After: ${JSON.stringify(maskSensitiveData(newValue!))}`, ); // Notify that value updated BrowserWindow.getAllWindows().forEach((win) => {