From fb90c9e337d93407896d5ec9dc933eac0381d41d Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Fri, 29 Aug 2025 16:31:49 +0900 Subject: [PATCH 01/32] Add memory setting --- src/constants.ts | 2 ++ src/settings/model.ts | 17 ++++++++++++++ .../v2/components/CopilotPlusSettings.tsx | 23 +++++++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/src/constants.ts b/src/constants.ts index 97c971b4a..a9a687ede 100644 --- a/src/constants.ts +++ b/src/constants.ts @@ -749,6 +749,8 @@ export const DEFAULT_SETTINGS: CopilotSettings = { ], reasoningEffort: DEFAULT_MODEL_SETTING.REASONING_EFFORT, verbosity: DEFAULT_MODEL_SETTING.VERBOSITY, + memoryFolderName: "copilot-memory", + enableMemory: false, }; export const EVENT_NAMES = { diff --git a/src/settings/model.ts b/src/settings/model.ts index 2dd131245..c4eee53c7 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -129,6 +129,10 @@ export interface CopilotSettings { reasoningEffort: "minimal" | "low" | "medium" | "high"; /** Default verbosity level for models that support it */ verbosity: "low" | "medium" | "high"; + /** Folder where memory data is stored */ + memoryFolderName: string; + /** Enable memory feature to build user memory from conversation history */ + enableMemory: boolean; } export const settingsStore = createStore(); @@ -298,6 +302,19 @@ export function sanitizeSettings(settings: CopilotSettings): CopilotSettings { DEFAULT_SETTINGS.autonomousAgentEnabledToolIds; } + // Ensure memoryFolderName has a default value + if ( + !sanitizedSettings.memoryFolderName || + typeof sanitizedSettings.memoryFolderName !== "string" + ) { + sanitizedSettings.memoryFolderName = DEFAULT_SETTINGS.memoryFolderName; + } + + // Ensure enableMemory has a default value + if (typeof sanitizedSettings.enableMemory !== "boolean") { + sanitizedSettings.enableMemory = DEFAULT_SETTINGS.enableMemory; + } + return sanitizedSettings; } diff --git a/src/settings/v2/components/CopilotPlusSettings.tsx b/src/settings/v2/components/CopilotPlusSettings.tsx index abcb74fd7..b230cd228 100644 --- a/src/settings/v2/components/CopilotPlusSettings.tsx +++ b/src/settings/v2/components/CopilotPlusSettings.tsx @@ -116,6 +116,29 @@ export const CopilotPlusSettings: React.FC = () => { )} +
Memory
+ + { + updateSetting("enableMemory", checked); + }} + /> + + { + updateSetting("memoryFolderName", value); + }} + placeholder="copilot-memory" + /> +
Autocomplete
Date: Fri, 29 Aug 2025 18:31:34 +0900 Subject: [PATCH 02/32] Support recent conversation in memory --- src/components/Chat.tsx | 12 +++ src/main.ts | 10 ++ src/memory/MemoryManager.ts | 185 ++++++++++++++++++++++++++++++++++ src/settings/SettingsPage.tsx | 14 ++- 4 files changed, 220 insertions(+), 1 deletion(-) create mode 100644 src/memory/MemoryManager.ts diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index 7ec012c24..3ecc68ecc 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -11,6 +11,7 @@ import { import { ChainType } from "@/chainFactory"; import { useProjectContextStatus } from "@/hooks/useProjectContextStatus"; import { logInfo } from "@/logger"; +import { updateMemoryWithConversation } from "@/memory/MemoryManager"; import { ChatControls, reloadCurrentProject } from "@/components/chat-components/ChatControls"; import ChatInput from "@/components/chat-components/ChatInput"; @@ -538,6 +539,15 @@ const Chat: React.FC = ({ const handleNewChat = useCallback(async () => { handleStopGenerating(ABORT_REASON.NEW_CHAT); + // Analyze chat messages for memory if enabled + if (settings.enableMemory) { + try { + await updateMemoryWithConversation(app, chatUIState.getMessages()); + } catch (error) { + logInfo("Failed to analyze chat messages for memory:", error); + } + } + // First autosave the current chat if the setting is enabled if (settings.autosaveChat) { await handleSaveAsNote(); @@ -561,10 +571,12 @@ const Chat: React.FC = ({ handleStopGenerating, chatUIState, settings.autosaveChat, + settings.enableMemory, settings.includeActiveNoteAsContext, selectedChain, handleSaveAsNote, safeSet, + app, ]); const handleLoadHistory = useCallback(() => { diff --git a/src/main.ts b/src/main.ts index 90da3b82d..244279817 100644 --- a/src/main.ts +++ b/src/main.ts @@ -18,6 +18,7 @@ import { ChatManager } from "@/core/ChatManager"; import { MessageRepository } from "@/core/MessageRepository"; import { encryptAllKeys } from "@/encryptionService"; import { logInfo } from "@/logger"; +import { updateMemoryWithConversation } from "@/memory/MemoryManager"; import { checkIsPlusUser } from "@/plusUtils"; import VectorStoreManager from "@/search/vectorStoreManager"; import { CopilotSettingTab } from "@/settings/SettingsPage"; @@ -378,6 +379,15 @@ export default class CopilotPlugin extends Plugin { } async handleNewChat() { + // Analyze chat messages for memory if enabled + if (getSettings().enableMemory) { + try { + await updateMemoryWithConversation(this.app, this.chatUIState.getMessages()); + } catch (error) { + logInfo("Failed to analyze chat messages for memory:", error); + } + } + // First autosave the current chat if the setting is enabled await this.autosaveCurrentChat(); diff --git a/src/memory/MemoryManager.ts b/src/memory/MemoryManager.ts new file mode 100644 index 000000000..b934c47ec --- /dev/null +++ b/src/memory/MemoryManager.ts @@ -0,0 +1,185 @@ +import { App, TFile } from "obsidian"; +import { ChatMessage } from "@/types/message"; +import { logInfo, logError } from "@/logger"; +import { USER_SENDER } from "@/constants"; +import { getSettings } from "@/settings/model"; + +/** + * Memory Management Functions + * + * Pure functions for analyzing chat messages and storing useful information in memory files. + * Main exported function: updateMemoryWithConversation + */ + +/** + * Analyze chat messages and store useful information in memory files + */ +export async function updateMemoryWithConversation( + app: App, + messages: ChatMessage[] +): Promise { + try { + const settings = getSettings(); + + // Only proceed if memory is enabled + if (!settings.enableMemory) { + logInfo("[MemoryManager] Memory is disabled, skipping analysis"); + return; + } + + if (messages.length === 0) { + logInfo("[MemoryManager] No messages to analyze for memory"); + return; + } + + // Extract only user messages for analysis + const userMessages = messages.filter((message) => message.sender === USER_SENDER); + + if (userMessages.length === 0) { + logInfo("[MemoryManager] No user messages found to analyze for memory"); + return; + } + + // Format user messages for memory storage + const conversationLine = createConversationContent(userMessages); + + // Ensure memory folder exists + await ensureMemoryFolderExists(app); + + // Save to memory file + await saveToMemoryFile(app, conversationLine); + } catch (error) { + logError("[MemoryManager] Error analyzing chat messages for memory:", error); + } +} + +/** + * Create conversation content + * Format: - timestamp||||message1||||message2||||message3... + */ +function createConversationContent(userMessages: ChatMessage[]): string { + const timestamp = new Date().toISOString().split(".")[0]; // Remove milliseconds and Z + + // Extract just the message text from user messages + const messageTexts = userMessages.map((message) => message.message); + + // Join with |||| separator as specified + return `- ${timestamp}||||${messageTexts.join("||||")}`; +} + +/** + * Ensure the memory folder exists + */ +async function ensureMemoryFolderExists(app: App): Promise { + const settings = getSettings(); + const memoryFolderPath = settings.memoryFolderName; + + const folder = app.vault.getAbstractFileByPath(memoryFolderPath); + if (!folder) { + await app.vault.createFolder(memoryFolderPath); + logInfo(`[MemoryManager] Created memory folder: ${memoryFolderPath}`); + } +} + +/** + * Save content to the memory file by appending new conversation + */ +async function saveToMemoryFile(app: App, newConversationLine: string): Promise { + const settings = getSettings(); + const memoryFolderPath = settings.memoryFolderName; + const memoryFilePath = `${memoryFolderPath}/recent_conversation_content.md`; + + try { + const existingFile = app.vault.getAbstractFileByPath(memoryFilePath); + + if (existingFile instanceof TFile) { + // Read existing content and append new conversation + const existingContent = await app.vault.read(existingFile); + const updatedContent = existingContent.trim() + "\n" + newConversationLine; + await app.vault.modify(existingFile, updatedContent); + logInfo(`[MemoryManager] Appended conversation to existing memory file: ${memoryFilePath}`); + } else { + // Create new file with first conversation + await app.vault.create(memoryFilePath, newConversationLine); + logInfo(`[MemoryManager] Created new memory file: ${memoryFilePath}`); + } + } catch (error) { + logError("[MemoryManager] Error saving to memory file:", error); + throw error; + } +} + +/** + * Get the path to the memory file + */ +function getMemoryFilePath(): string { + const settings = getSettings(); + return `${settings.memoryFolderName}/recent_conversation_context.txt`; +} + +/** + * Check if memory functionality is enabled + */ +export function isMemoryEnabled(): boolean { + return getSettings().enableMemory; +} + +/** + * Read existing memory content + */ +async function readMemoryContent(app: App): Promise { + try { + const memoryFilePath = getMemoryFilePath(); + const memoryFile = app.vault.getAbstractFileByPath(memoryFilePath); + + if (memoryFile instanceof TFile) { + const content = await app.vault.read(memoryFile); + return content; + } + + return null; + } catch (error) { + logError("[MemoryManager] Error reading memory content:", error); + return null; + } +} + +/** + * Parse stored conversation lines into structured format + */ +export interface StoredConversation { + timestamp: string; + userMessages: string[]; +} + +/** + * Read and parse all stored conversations + */ +export async function getStoredConversations(app: App): Promise { + try { + const content = await readMemoryContent(app); + if (!content) { + return []; + } + + const lines = content + .trim() + .split("\n") + .filter((line) => line.trim()); + const conversations: StoredConversation[] = []; + + for (const line of lines) { + const parts = line.split("||||"); + if (parts.length >= 2) { + const timestamp = parts[0]; + const userMessages = parts.slice(1); // All parts after timestamp are user messages + conversations.push({ timestamp, userMessages }); + } + } + + return conversations; + } catch (error) { + logError("[MemoryManager] Error parsing stored conversations:", error); + return []; + } +} diff --git a/src/settings/SettingsPage.tsx b/src/settings/SettingsPage.tsx index 0c63145da..d2df0e0d9 100644 --- a/src/settings/SettingsPage.tsx +++ b/src/settings/SettingsPage.tsx @@ -2,6 +2,8 @@ import CopilotView from "@/components/CopilotView"; import { CHAT_VIEWTYPE } from "@/constants"; import CopilotPlugin from "@/main"; import { getSettings } from "@/settings/model"; +import { updateMemoryWithConversation } from "@/memory/MemoryManager"; +import { logInfo } from "@/logger"; import { App, Notice, PluginSettingTab } from "obsidian"; import React from "react"; import { createRoot } from "react-dom/client"; @@ -18,8 +20,18 @@ export class CopilotSettingTab extends PluginSettingTab { async reloadPlugin() { try { - // Autosave the current chat before reloading const chatView = this.app.workspace.getLeavesOfType(CHAT_VIEWTYPE)[0]?.view as CopilotView; + + // Analyze chat messages for memory if enabled + if (chatView && getSettings().enableMemory) { + try { + await updateMemoryWithConversation(this.app, this.plugin.chatUIState.getMessages()); + } catch (error) { + logInfo("Failed to analyze chat messages for memory:", error); + } + } + + // Autosave the current chat before reloading if (chatView && getSettings().autosaveChat) { await this.plugin.autosaveCurrentChat(); } From a51bbbec39c0125c6d17efc8f636f9312c1a0076 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sat, 30 Aug 2025 18:08:14 +0900 Subject: [PATCH 03/32] Add conversation summary to recent conversations --- src/components/Chat.tsx | 7 +- src/main.ts | 11 +- src/memory/MemoryManager.ts | 185 ---------------------------- src/memory/UserMemoryManager.ts | 208 ++++++++++++++++++++++++++++++++ src/settings/SettingsPage.tsx | 11 +- 5 files changed, 231 insertions(+), 191 deletions(-) delete mode 100644 src/memory/MemoryManager.ts create mode 100644 src/memory/UserMemoryManager.ts diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index 3ecc68ecc..feed63435 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -11,7 +11,7 @@ import { import { ChainType } from "@/chainFactory"; import { useProjectContextStatus } from "@/hooks/useProjectContextStatus"; import { logInfo } from "@/logger"; -import { updateMemoryWithConversation } from "@/memory/MemoryManager"; +import { UserMemoryManager } from "@/memory/UserMemoryManager"; import { ChatControls, reloadCurrentProject } from "@/components/chat-components/ChatControls"; import ChatInput from "@/components/chat-components/ChatInput"; @@ -542,7 +542,9 @@ const Chat: React.FC = ({ // Analyze chat messages for memory if enabled if (settings.enableMemory) { try { - await updateMemoryWithConversation(app, chatUIState.getMessages()); + // Get the current chat model from the chain manager + const chatModel = chainManager.chatModelManager.getChatModel(); + UserMemoryManager.updateRecentConversations(app, chatUIState.getMessages(), chatModel); } catch (error) { logInfo("Failed to analyze chat messages for memory:", error); } @@ -569,6 +571,7 @@ const Chat: React.FC = ({ } }, [ handleStopGenerating, + chainManager.chatModelManager, chatUIState, settings.autosaveChat, settings.enableMemory, diff --git a/src/main.ts b/src/main.ts index 244279817..cec3ae25e 100644 --- a/src/main.ts +++ b/src/main.ts @@ -18,7 +18,7 @@ import { ChatManager } from "@/core/ChatManager"; import { MessageRepository } from "@/core/MessageRepository"; import { encryptAllKeys } from "@/encryptionService"; import { logInfo } from "@/logger"; -import { updateMemoryWithConversation } from "@/memory/MemoryManager"; +import { UserMemoryManager } from "@/memory/UserMemoryManager"; import { checkIsPlusUser } from "@/plusUtils"; import VectorStoreManager from "@/search/vectorStoreManager"; import { CopilotSettingTab } from "@/settings/SettingsPage"; @@ -382,7 +382,14 @@ export default class CopilotPlugin extends Plugin { // Analyze chat messages for memory if enabled if (getSettings().enableMemory) { try { - await updateMemoryWithConversation(this.app, this.chatUIState.getMessages()); + // Get the current chat model from the chain manager + const chainManager = this.projectManager.getCurrentChainManager(); + const chatModel = chainManager.chatModelManager.getChatModel(); + UserMemoryManager.updateRecentConversations( + this.app, + this.chatUIState.getMessages(), + chatModel + ); } catch (error) { logInfo("Failed to analyze chat messages for memory:", error); } diff --git a/src/memory/MemoryManager.ts b/src/memory/MemoryManager.ts deleted file mode 100644 index b934c47ec..000000000 --- a/src/memory/MemoryManager.ts +++ /dev/null @@ -1,185 +0,0 @@ -import { App, TFile } from "obsidian"; -import { ChatMessage } from "@/types/message"; -import { logInfo, logError } from "@/logger"; -import { USER_SENDER } from "@/constants"; -import { getSettings } from "@/settings/model"; - -/** - * Memory Management Functions - * - * Pure functions for analyzing chat messages and storing useful information in memory files. - * Main exported function: updateMemoryWithConversation - */ - -/** - * Analyze chat messages and store useful information in memory files - */ -export async function updateMemoryWithConversation( - app: App, - messages: ChatMessage[] -): Promise { - try { - const settings = getSettings(); - - // Only proceed if memory is enabled - if (!settings.enableMemory) { - logInfo("[MemoryManager] Memory is disabled, skipping analysis"); - return; - } - - if (messages.length === 0) { - logInfo("[MemoryManager] No messages to analyze for memory"); - return; - } - - // Extract only user messages for analysis - const userMessages = messages.filter((message) => message.sender === USER_SENDER); - - if (userMessages.length === 0) { - logInfo("[MemoryManager] No user messages found to analyze for memory"); - return; - } - - // Format user messages for memory storage - const conversationLine = createConversationContent(userMessages); - - // Ensure memory folder exists - await ensureMemoryFolderExists(app); - - // Save to memory file - await saveToMemoryFile(app, conversationLine); - } catch (error) { - logError("[MemoryManager] Error analyzing chat messages for memory:", error); - } -} - -/** - * Create conversation content - * Format: - timestamp||||message1||||message2||||message3... - */ -function createConversationContent(userMessages: ChatMessage[]): string { - const timestamp = new Date().toISOString().split(".")[0]; // Remove milliseconds and Z - - // Extract just the message text from user messages - const messageTexts = userMessages.map((message) => message.message); - - // Join with |||| separator as specified - return `- ${timestamp}||||${messageTexts.join("||||")}`; -} - -/** - * Ensure the memory folder exists - */ -async function ensureMemoryFolderExists(app: App): Promise { - const settings = getSettings(); - const memoryFolderPath = settings.memoryFolderName; - - const folder = app.vault.getAbstractFileByPath(memoryFolderPath); - if (!folder) { - await app.vault.createFolder(memoryFolderPath); - logInfo(`[MemoryManager] Created memory folder: ${memoryFolderPath}`); - } -} - -/** - * Save content to the memory file by appending new conversation - */ -async function saveToMemoryFile(app: App, newConversationLine: string): Promise { - const settings = getSettings(); - const memoryFolderPath = settings.memoryFolderName; - const memoryFilePath = `${memoryFolderPath}/recent_conversation_content.md`; - - try { - const existingFile = app.vault.getAbstractFileByPath(memoryFilePath); - - if (existingFile instanceof TFile) { - // Read existing content and append new conversation - const existingContent = await app.vault.read(existingFile); - const updatedContent = existingContent.trim() + "\n" + newConversationLine; - await app.vault.modify(existingFile, updatedContent); - logInfo(`[MemoryManager] Appended conversation to existing memory file: ${memoryFilePath}`); - } else { - // Create new file with first conversation - await app.vault.create(memoryFilePath, newConversationLine); - logInfo(`[MemoryManager] Created new memory file: ${memoryFilePath}`); - } - } catch (error) { - logError("[MemoryManager] Error saving to memory file:", error); - throw error; - } -} - -/** - * Get the path to the memory file - */ -function getMemoryFilePath(): string { - const settings = getSettings(); - return `${settings.memoryFolderName}/recent_conversation_context.txt`; -} - -/** - * Check if memory functionality is enabled - */ -export function isMemoryEnabled(): boolean { - return getSettings().enableMemory; -} - -/** - * Read existing memory content - */ -async function readMemoryContent(app: App): Promise { - try { - const memoryFilePath = getMemoryFilePath(); - const memoryFile = app.vault.getAbstractFileByPath(memoryFilePath); - - if (memoryFile instanceof TFile) { - const content = await app.vault.read(memoryFile); - return content; - } - - return null; - } catch (error) { - logError("[MemoryManager] Error reading memory content:", error); - return null; - } -} - -/** - * Parse stored conversation lines into structured format - */ -export interface StoredConversation { - timestamp: string; - userMessages: string[]; -} - -/** - * Read and parse all stored conversations - */ -export async function getStoredConversations(app: App): Promise { - try { - const content = await readMemoryContent(app); - if (!content) { - return []; - } - - const lines = content - .trim() - .split("\n") - .filter((line) => line.trim()); - const conversations: StoredConversation[] = []; - - for (const line of lines) { - const parts = line.split("||||"); - if (parts.length >= 2) { - const timestamp = parts[0]; - const userMessages = parts.slice(1); // All parts after timestamp are user messages - conversations.push({ timestamp, userMessages }); - } - } - - return conversations; - } catch (error) { - logError("[MemoryManager] Error parsing stored conversations:", error); - return []; - } -} diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts new file mode 100644 index 000000000..b8d517cdd --- /dev/null +++ b/src/memory/UserMemoryManager.ts @@ -0,0 +1,208 @@ +import { App, TFile } from "obsidian"; +import { ChatMessage } from "@/types/message"; +import { logInfo, logError } from "@/logger"; +import { USER_SENDER } from "@/constants"; +import { getSettings } from "@/settings/model"; +import { BaseChatModel } from "@langchain/core/language_models/chat_models"; +import { HumanMessage, SystemMessage } from "@langchain/core/messages"; + +const MAX_CONVERSATION_LINES = 40; +/** + * User Memory Management Class + * + * Static methods for building and managing user memory based on conversations. + * The UserMemoryManager has methods to add recent conversations, user facts to the user memory + * which can then be used to personalize LLM response. + */ +export class UserMemoryManager { + /** + * Runs the user memory operation in the background without blocking execution + */ + static updateRecentConversations( + app: App, + messages: ChatMessage[], + chatModel?: BaseChatModel + ): void { + // Fire and forget - run in background + console.log("[UserMemoryManager] Adding to user memory", messages); + updateRecentConversations(app, messages, chatModel) + .catch((error) => { + logError("[UserMemoryManager] Background user memory operation failed:", error); + }) + .finally(() => { + console.log("[UserMemoryManager] Added to user memory"); + }); + } + + /** + * Get user memory prompt + */ + static async getUserMemoryPrompt(app: App): Promise { + try { + const recentConversationFile = app.vault.getAbstractFileByPath( + getRecentConversationFilePath() + ); + let recentConversationContent: string | null = null; + if (recentConversationFile instanceof TFile) { + const content = await app.vault.read(recentConversationFile); + recentConversationContent = content; + } + + return ` + # Recent Conversation Content + ${recentConversationContent} + `; + } catch (error) { + logError("[UserMemoryManager] Error reading user memory content:", error); + return null; + } + } +} + +/** + * Get the path to the user memory file + */ +function getRecentConversationFilePath(): string { + const settings = getSettings(); + return `${settings.memoryFolderName}/recent_conversation_context.md`; +} + +/** + * Ensure the user memory folder exists + */ +async function ensureMemoryFolderExists(app: App): Promise { + const settings = getSettings(); + const memoryFolderPath = settings.memoryFolderName; + + const folder = app.vault.getAbstractFileByPath(memoryFolderPath); + if (!folder) { + await app.vault.createFolder(memoryFolderPath); + logInfo(`[UserMemoryManager] Created user memory folder: ${memoryFolderPath}`); + } +} + +/** + * Save content to the user memory file by appending new conversation + */ +async function addToRecentConversationFile(app: App, newConversation: string): Promise { + const memoryFilePath = getRecentConversationFilePath(); + const newConversationLine = `- ${newConversation}`; + + try { + const existingFile = app.vault.getAbstractFileByPath(memoryFilePath); + + if (existingFile instanceof TFile) { + // Read existing conversation lines, append the new line. + // Make sure the content lines do not exceed 40 lines. If it does, remove the first line. + const fileContent = await app.vault.read(existingFile); + const lines = fileContent.split("\n"); + lines.push(newConversationLine); + + if (lines.length > MAX_CONVERSATION_LINES) { + // Remove the first line to keep within 40 lines limit + lines.shift(); + } + + const updatedContent = lines.join("\n"); + await app.vault.modify(existingFile, updatedContent); + } else { + await app.vault.create(memoryFilePath, newConversationLine); + } + } catch (error) { + logError(`[UserMemoryManager] Error saving to user memory file ${memoryFilePath}:`, error); + throw error; + } +} + +/** + * Summarize conversation using LLM into a few words + */ +async function summarizeConversation( + messageTexts: string[], + chatModel: BaseChatModel +): Promise { + const conversationText = messageTexts.join("\n\n"); + + const systemPrompt = `You are a helpful assistant that creates very brief conversation summaries. +Create a short summary of the following conversation in just a few words (2-5 words maximum). +Focus on the main topic or action. Examples: "Code debugging", "API integration", "UI design help", "Data analysis".`; + + const humanPrompt = `Summarize this conversation in just a few words: + +${conversationText}`; + + const messages = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; + + const response = await chatModel.invoke(messages); + return response.content.toString().trim(); +} + +/** + * Create conversation content with conversation summary and all messages + * Format: timestamp conversation_summary||||user_message_1||||user_message_2... + */ +async function createConversationContent( + messages: ChatMessage[], + chatModel?: BaseChatModel +): Promise { + if (messages.length === 0) { + return null; + } + + const timestamp = new Date().toISOString().split(".")[0]; // Remove milliseconds and Z + + // Extract all user message texts + const messageTexts = messages + .filter((message) => message.sender === USER_SENDER) + .map((message) => message.message); + + let summary: string = "No summary"; + + // Generate conversation summary + if (chatModel) { + try { + summary = await summarizeConversation(messageTexts, chatModel); + } catch (error) { + logError("[UserMemoryManager] Failed to generate conversation summary:", error); + } + } + const content = messageTexts.join("||||"); + return `${timestamp} ${summary}||||${content}`; +} + +/** + * Analyze chat messages and store useful information in user memory files + */ +async function updateRecentConversations( + app: App, + messages: ChatMessage[], + chatModel?: BaseChatModel +): Promise { + try { + const settings = getSettings(); + + // Only proceed if memory is enabled + if (!settings.enableMemory) { + logInfo("[UserMemoryManager] User memory tracking is disabled, skipping analysis"); + return; + } + + if (messages.length === 0) { + logInfo("[UserMemoryManager] No messages to analyze for user memory"); + return; + } + + // Format all messages for user memory storage, with conversation summary + const conversationLine = await createConversationContent(messages, chatModel); + + // Ensure user memory folder exists + await ensureMemoryFolderExists(app); + + // Save to user memory file + if (conversationLine) { + await addToRecentConversationFile(app, conversationLine); + } + } catch (error) { + logError("[UserMemoryManager] Error analyzing chat messages for user memory:", error); + } +} diff --git a/src/settings/SettingsPage.tsx b/src/settings/SettingsPage.tsx index d2df0e0d9..2783b3fa6 100644 --- a/src/settings/SettingsPage.tsx +++ b/src/settings/SettingsPage.tsx @@ -2,7 +2,7 @@ import CopilotView from "@/components/CopilotView"; import { CHAT_VIEWTYPE } from "@/constants"; import CopilotPlugin from "@/main"; import { getSettings } from "@/settings/model"; -import { updateMemoryWithConversation } from "@/memory/MemoryManager"; +import { UserMemoryManager } from "@/memory/UserMemoryManager"; import { logInfo } from "@/logger"; import { App, Notice, PluginSettingTab } from "obsidian"; import React from "react"; @@ -25,7 +25,14 @@ export class CopilotSettingTab extends PluginSettingTab { // Analyze chat messages for memory if enabled if (chatView && getSettings().enableMemory) { try { - await updateMemoryWithConversation(this.app, this.plugin.chatUIState.getMessages()); + // Get the current chat model from the chain manager + const chainManager = this.plugin.projectManager.getCurrentChainManager(); + const chatModel = chainManager.chatModelManager.getChatModel(); + UserMemoryManager.updateRecentConversations( + this.app, + this.plugin.chatUIState.getMessages(), + chatModel + ); } catch (error) { logInfo("Failed to analyze chat messages for memory:", error); } From 1ee76ae38f335a1f0551d0c2d5d4e0cb87ce76ac Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sun, 31 Aug 2025 08:48:31 +0900 Subject: [PATCH 04/32] Load recent conversation to prompt --- .../chainRunner/AutonomousAgentChainRunner.ts | 23 ++++++++++++------- .../chainRunner/CopilotPlusChainRunner.ts | 4 ++-- src/memory/UserMemoryManager.ts | 2 +- src/settings/model.ts | 16 +++++++++++++ 4 files changed, 34 insertions(+), 11 deletions(-) diff --git a/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts b/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts index 6154d47f5..759b55dfe 100644 --- a/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts +++ b/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts @@ -1,7 +1,8 @@ import { MessageContent } from "@/imageProcessing/imageProcessor"; import { logError, logInfo, logWarn } from "@/logger"; import { checkIsPlusUser } from "@/plusUtils"; -import { getSettings, getSystemPrompt } from "@/settings/model"; +import { getSettings, getSystemPromptWithMemory } from "@/settings/model"; +import { App } from "obsidian"; import { initializeBuiltinTools } from "@/tools/builtinTools"; import { extractParametersFromZod, SimpleTool } from "@/tools/SimpleTool"; import { ToolRegistry } from "@/tools/ToolRegistry"; @@ -78,11 +79,12 @@ ${params} .join("\n\n"); } - public static generateSystemPrompt( + public static async generateSystemPrompt( availableTools: SimpleTool[], - adapter: ModelAdapter - ): string { - const basePrompt = getSystemPrompt(); + adapter: ModelAdapter, + app?: App + ): Promise { + const basePrompt = await getSystemPromptWithMemory(app); const toolDescriptions = AutonomousAgentChainRunner.generateToolDescriptions(availableTools); const toolNames = availableTools.map((tool) => tool.name); @@ -96,14 +98,18 @@ ${params} return adapter.enhanceSystemPrompt(basePrompt, toolDescriptions, toolNames, toolMetadata); } - private generateSystemPrompt(): string { + private async generateSystemPrompt(): Promise { const availableTools = this.getAvailableTools(); // Use model adapter for clean model-specific handling const chatModel = this.chainManager.chatModelManager.getChatModel(); const adapter = ModelAdapterFactory.createAdapter(chatModel); - return AutonomousAgentChainRunner.generateSystemPrompt(availableTools, adapter); + return AutonomousAgentChainRunner.generateSystemPrompt( + availableTools, + adapter, + this.chainManager.app + ); } private getTemporaryToolCallId(toolName: string, index: number): string { @@ -144,7 +150,8 @@ ${params} const rawHistory = memoryVariables.history || []; // Build initial conversation messages - const customSystemPrompt = this.generateSystemPrompt(); + const customSystemPrompt = await this.generateSystemPrompt(); + console.log("customSystemPrompt", customSystemPrompt); const chatModel = this.chainManager.chatModelManager.getChatModel(); const adapter = ModelAdapterFactory.createAdapter(chatModel); diff --git a/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts b/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts index dc2d17278..bf0f6db13 100644 --- a/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts +++ b/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts @@ -14,7 +14,7 @@ import { } from "@/imageProcessing/imageProcessor"; import { BrevilabsClient } from "@/LLMProviders/brevilabsClient"; import { logError, logInfo, logWarn } from "@/logger"; -import { getSettings, getSystemPrompt } from "@/settings/model"; +import { getSettings, getSystemPromptWithMemory } from "@/settings/model"; import { ToolManager } from "@/tools/toolManager"; import { writeToFileTool } from "@/tools/ComposerTools"; import { ChatMessage } from "@/types/message"; @@ -746,6 +746,6 @@ export class CopilotPlusChainRunner extends BaseChainRunner { } protected async getSystemPrompt(): Promise { - return getSystemPrompt(); + return getSystemPromptWithMemory(this.chainManager.app); } } diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index b8d517cdd..3e9342ead 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -64,7 +64,7 @@ export class UserMemoryManager { */ function getRecentConversationFilePath(): string { const settings = getSettings(); - return `${settings.memoryFolderName}/recent_conversation_context.md`; + return `${settings.memoryFolderName}/recent_conversation_content.md`; } /** diff --git a/src/settings/model.ts b/src/settings/model.ts index c4eee53c7..b4b0b6400 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -12,6 +12,9 @@ import { DEFAULT_SYSTEM_PROMPT, EmbeddingModelProviders, } from "@/constants"; +import { UserMemoryManager } from "@/memory/UserMemoryManager"; +import { App } from "obsidian"; +import { logError } from "@/logger"; /** * We used to store commands in the settings file with the following interface. @@ -331,6 +334,19 @@ ${userPrompt} return basePrompt; } +export async function getSystemPromptWithMemory(app: App | undefined): Promise { + const systemPrompt = getSystemPrompt(); + if (!app) { + logError("No app provided to getSystemPromptWithMemory"); + return getSystemPrompt(); + } + const memoryPrompt = await UserMemoryManager.getUserMemoryPrompt(app); + return `${systemPrompt} + + ${memoryPrompt} + `; +} + function mergeAllActiveModelsWithCoreModels(settings: CopilotSettings): CopilotSettings { settings.activeModels = mergeActiveModels(settings.activeModels, BUILTIN_CHAT_MODELS); settings.activeEmbeddingModels = mergeActiveModels( From 6bb8528197451d97bdd74164cdcd28d9699adb23 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sun, 31 Aug 2025 12:53:15 +0900 Subject: [PATCH 05/32] Implement user insights --- src/components/Chat.tsx | 2 +- src/main.ts | 6 +- src/memory/UserMemoryManager.ts | 263 +++++++++++++++++++------------- src/settings/SettingsPage.tsx | 2 +- 4 files changed, 159 insertions(+), 114 deletions(-) diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index feed63435..56b40a64e 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -544,7 +544,7 @@ const Chat: React.FC = ({ try { // Get the current chat model from the chain manager const chatModel = chainManager.chatModelManager.getChatModel(); - UserMemoryManager.updateRecentConversations(app, chatUIState.getMessages(), chatModel); + UserMemoryManager.updateUserMemory(app, chatUIState.getMessages(), chatModel); } catch (error) { logInfo("Failed to analyze chat messages for memory:", error); } diff --git a/src/main.ts b/src/main.ts index cec3ae25e..7a572f565 100644 --- a/src/main.ts +++ b/src/main.ts @@ -385,11 +385,7 @@ export default class CopilotPlugin extends Plugin { // Get the current chat model from the chain manager const chainManager = this.projectManager.getCurrentChainManager(); const chatModel = chainManager.chatModelManager.getChatModel(); - UserMemoryManager.updateRecentConversations( - this.app, - this.chatUIState.getMessages(), - chatModel - ); + UserMemoryManager.updateUserMemory(this.app, this.chatUIState.getMessages(), chatModel); } catch (error) { logInfo("Failed to analyze chat messages for memory:", error); } diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 3e9342ead..c6bd26daf 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -6,7 +6,7 @@ import { getSettings } from "@/settings/model"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { HumanMessage, SystemMessage } from "@langchain/core/messages"; -const MAX_CONVERSATION_LINES = 40; +const MAX_MEMORY_LINES = 40; /** * User Memory Management Class * @@ -18,40 +18,51 @@ export class UserMemoryManager { /** * Runs the user memory operation in the background without blocking execution */ - static updateRecentConversations( - app: App, - messages: ChatMessage[], - chatModel?: BaseChatModel - ): void { + static updateUserMemory(app: App, messages: ChatMessage[], chatModel?: BaseChatModel): void { + const settings = getSettings(); + + // Only proceed if memory is enabled + if (!settings.enableMemory) { + logInfo("[UserMemoryManager] User memory tracking is disabled, skipping analysis"); + return; + } + + if (messages.length === 0) { + logInfo("[UserMemoryManager] No messages to analyze for user memory"); + return; + } + // Fire and forget - run in background - console.log("[UserMemoryManager] Adding to user memory", messages); - updateRecentConversations(app, messages, chatModel) - .catch((error) => { - logError("[UserMemoryManager] Background user memory operation failed:", error); - }) - .finally(() => { - console.log("[UserMemoryManager] Added to user memory"); - }); + updateMemory(app, messages, chatModel).catch((error) => { + logError("[UserMemoryManager] Background user memory operation failed:", error); + }); } /** * Get user memory prompt */ static async getUserMemoryPrompt(app: App): Promise { + const memoryFileMap = { + "Recent Conversation Content": getRecentConversationFilePath(), + "User Insights": getUserInsightsFilePath(), + "Response Preferences": getResponsePreferencesFilePath(), + }; + try { - const recentConversationFile = app.vault.getAbstractFileByPath( - getRecentConversationFilePath() - ); - let recentConversationContent: string | null = null; - if (recentConversationFile instanceof TFile) { - const content = await app.vault.read(recentConversationFile); - recentConversationContent = content; + let memoryPrompt = ""; + + // Read all memory files using the map + for (const [sectionName, filePath] of Object.entries(memoryFileMap)) { + const file = app.vault.getAbstractFileByPath(filePath); + if (file instanceof TFile) { + const content = await app.vault.read(file); + if (content) { + memoryPrompt += `\n# ${sectionName}\n${content}\n`; + } + } } - return ` - # Recent Conversation Content - ${recentConversationContent} - `; + return memoryPrompt.length > 0 ? memoryPrompt : null; } catch (error) { logError("[UserMemoryManager] Error reading user memory content:", error); return null; @@ -60,11 +71,53 @@ export class UserMemoryManager { } /** - * Get the path to the user memory file + * Analyze chat messages and store useful information in user memory files */ -function getRecentConversationFilePath(): string { - const settings = getSettings(); - return `${settings.memoryFolderName}/recent_conversation_content.md`; +async function updateMemory( + app: App, + messages: ChatMessage[], + chatModel?: BaseChatModel +): Promise { + try { + // Ensure user memory folder exists + await ensureMemoryFolderExists(app); + + if (!chatModel) { + logError("[UserMemoryManager] No chat model available, skipping memory update"); + return; + } + + if (messages.length === 0) { + logInfo("[UserMemoryManager] No messages available, skipping memory update"); + return; + } + + // Extract all information in a single LLM call for better performance + const extractedInfo = await extractConversationInfo(app, messages, chatModel); + + // 1. Save conversation summary to recent conversations + const timestamp = new Date().toISOString().split(".")[0]; // Remove milliseconds and Z + const userMessageTexts = messages + .filter((message) => message.sender === USER_SENDER) + .map((message) => message.message); + const content = userMessageTexts.join("||||"); + const conversationLine = `${timestamp} ${extractedInfo.summary}||||${content}`; + + if (conversationLine) { + await addToMemoryFile(app, getRecentConversationFilePath(), conversationLine); + } + + // 2. Save user insights (if extracted) + if (extractedInfo.userInsights) { + try { + await addToMemoryFile(app, getUserInsightsFilePath(), extractedInfo.userInsights); + } catch (error) { + logError("[UserMemoryManager] Error saving user insights:", error); + } + } + } catch (error) { + logError("[UserMemoryManager] Error analyzing chat messages for user memory:", error); + } } /** @@ -81,15 +134,24 @@ async function ensureMemoryFolderExists(app: App): Promise { } } +function getRecentConversationFilePath(): string { + const settings = getSettings(); + return `${settings.memoryFolderName}/recent_conversation_content.md`; +} + +function getUserInsightsFilePath(): string { + const settings = getSettings(); + return `${settings.memoryFolderName}/user_insights.md`; +} + /** * Save content to the user memory file by appending new conversation */ -async function addToRecentConversationFile(app: App, newConversation: string): Promise { - const memoryFilePath = getRecentConversationFilePath(); - const newConversationLine = `- ${newConversation}`; +async function addToMemoryFile(app: App, filePath: string, newContent: string): Promise { + const newConversationLine = `- ${newContent}`; try { - const existingFile = app.vault.getAbstractFileByPath(memoryFilePath); + const existingFile = app.vault.getAbstractFileByPath(filePath); if (existingFile instanceof TFile) { // Read existing conversation lines, append the new line. @@ -98,7 +160,7 @@ async function addToRecentConversationFile(app: App, newConversation: string): P const lines = fileContent.split("\n"); lines.push(newConversationLine); - if (lines.length > MAX_CONVERSATION_LINES) { + if (lines.length > MAX_MEMORY_LINES) { // Remove the first line to keep within 40 lines limit lines.shift(); } @@ -106,103 +168,90 @@ async function addToRecentConversationFile(app: App, newConversation: string): P const updatedContent = lines.join("\n"); await app.vault.modify(existingFile, updatedContent); } else { - await app.vault.create(memoryFilePath, newConversationLine); + await app.vault.create(filePath, newConversationLine); } } catch (error) { - logError(`[UserMemoryManager] Error saving to user memory file ${memoryFilePath}:`, error); + logError(`[UserMemoryManager] Error saving to user memory file ${filePath}:`, error); throw error; } } /** - * Summarize conversation using LLM into a few words + * Extract all conversation information using a single LLM call for better performance */ -async function summarizeConversation( - messageTexts: string[], +async function extractConversationInfo( + app: App, + messages: ChatMessage[], chatModel: BaseChatModel -): Promise { - const conversationText = messageTexts.join("\n\n"); +): Promise<{ + summary: string; + userInsights: string | null; +}> { + const conversationText = messages.map((msg) => `${msg.sender}: ${msg.message}`).join("\n\n"); - const systemPrompt = `You are a helpful assistant that creates very brief conversation summaries. -Create a short summary of the following conversation in just a few words (2-5 words maximum). -Focus on the main topic or action. Examples: "Code debugging", "API integration", "UI design help", "Data analysis".`; + // Read existing memory to avoid duplication + let existingInsights = ""; - const humanPrompt = `Summarize this conversation in just a few words: + try { + const userInsightsFile = app.vault.getAbstractFileByPath(getUserInsightsFilePath()); + if (userInsightsFile instanceof TFile) { + existingInsights = await app.vault.read(userInsightsFile); + } + } catch (error) { + logError("[UserMemoryManager] Error reading existing memory files:", error); + } -${conversationText}`; + const systemPrompt = `You are an AI assistant that analyzes conversations and extracts three types of information: - const messages = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; +1. CONVERSATION SUMMARY: Create a very brief summary in 2-5 words maximum (e.g., "Travel Plan", "Tokyo Weather") - const response = await chatModel.invoke(messages); - return response.content.toString().trim(); -} +2. USER INSIGHTS: Extract NEW factual information or preferences about the user such as: + - Their role/profession + - Technologies they work with + - Projects they're working on + - Skills and expertise areas + - Learning goals or interests + - Preferred level of detail (brief vs detailed explanations) + - Communication style (formal vs casual) + - Explanation depth (beginner vs advanced) + - Format preferences (step-by-step vs narrative) + - Specific requests about how to present information -/** - * Create conversation content with conversation summary and all messages - * Format: timestamp conversation_summary||||user_message_1||||user_message_2... - */ -async function createConversationContent( - messages: ChatMessage[], - chatModel?: BaseChatModel -): Promise { - if (messages.length === 0) { - return null; - } +IMPORTANT: Only extract NEW information that is NOT already captured in the existing memory below. - const timestamp = new Date().toISOString().split(".")[0]; // Remove milliseconds and Z + +${existingInsights || "None"} + - // Extract all user message texts - const messageTexts = messages - .filter((message) => message.sender === USER_SENDER) - .map((message) => message.message); +# OUTPUT FORMAT +Return your analysis in this exact JSON format with below keys: +* summary: brief 2-5 word summary. +* userInsights (optional): Only return if there are new insights found.`; - let summary: string = "No summary"; + const humanPrompt = `Analyze this conversation and extract the summary and any NEW user insights not already captured: - // Generate conversation summary - if (chatModel) { - try { - summary = await summarizeConversation(messageTexts, chatModel); - } catch (error) { - logError("[UserMemoryManager] Failed to generate conversation summary:", error); - } - } - const content = messageTexts.join("||||"); - return `${timestamp} ${summary}||||${content}`; -} +${conversationText}`; -/** - * Analyze chat messages and store useful information in user memory files - */ -async function updateRecentConversations( - app: App, - messages: ChatMessage[], - chatModel?: BaseChatModel -): Promise { - try { - const settings = getSettings(); + const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; - // Only proceed if memory is enabled - if (!settings.enableMemory) { - logInfo("[UserMemoryManager] User memory tracking is disabled, skipping analysis"); - return; - } + try { + const response = await chatModel.invoke(messages_llm); + const responseText = response.content.toString().trim(); - if (messages.length === 0) { - logInfo("[UserMemoryManager] No messages to analyze for user memory"); - return; + // Parse JSON response + let parsedResponse; + try { + parsedResponse = JSON.parse(responseText); + } catch (parseError) { + logError("[UserMemoryManager] Failed to parse LLM response as JSON:", parseError); } - // Format all messages for user memory storage, with conversation summary - const conversationLine = await createConversationContent(messages, chatModel); - - // Ensure user memory folder exists - await ensureMemoryFolderExists(app); - - // Save to user memory file - if (conversationLine) { - await addToRecentConversationFile(app, conversationLine); - } + return parsedResponse; } catch (error) { - logError("[UserMemoryManager] Error analyzing chat messages for user memory:", error); + logError("[UserMemoryManager] Failed to extract conversation info:", error); + return { + summary: "No summary", + userInsights: null, + }; } } diff --git a/src/settings/SettingsPage.tsx b/src/settings/SettingsPage.tsx index 2783b3fa6..a54d78afd 100644 --- a/src/settings/SettingsPage.tsx +++ b/src/settings/SettingsPage.tsx @@ -28,7 +28,7 @@ export class CopilotSettingTab extends PluginSettingTab { // Get the current chat model from the chain manager const chainManager = this.plugin.projectManager.getCurrentChainManager(); const chatModel = chainManager.chatModelManager.getChatModel(); - UserMemoryManager.updateRecentConversations( + UserMemoryManager.updateUserMemory( this.app, this.plugin.chatUIState.getMessages(), chatModel From 0d86d4333b9ee3f1b60793d83b5171ce70436f7f Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sun, 31 Aug 2025 16:58:44 +0900 Subject: [PATCH 06/32] Fix some bugs --- src/memory/UserMemoryManager.ts | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index c6bd26daf..07cc8488a 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -45,7 +45,6 @@ export class UserMemoryManager { const memoryFileMap = { "Recent Conversation Content": getRecentConversationFilePath(), "User Insights": getUserInsightsFilePath(), - "Response Preferences": getResponsePreferencesFilePath(), }; try { @@ -110,6 +109,7 @@ async function updateMemory( // 2. Save user insights (if extracted) if (extractedInfo.userInsights) { try { + console.log("[UserMemoryManager] Saving user insights:", extractedInfo.userInsights); await addToMemoryFile(app, getUserInsightsFilePath(), extractedInfo.userInsights); } catch (error) { logError("[UserMemoryManager] Error saving user insights:", error); @@ -201,11 +201,19 @@ async function extractConversationInfo( logError("[UserMemoryManager] Error reading existing memory files:", error); } - const systemPrompt = `You are an AI assistant that analyzes conversations and extracts three types of information: + const systemPrompt = `You are an AI assistant that analyzes conversations and extracts two types of information: -1. CONVERSATION SUMMARY: Create a very brief summary in 2-5 words maximum (e.g., "Travel Plan", "Tokyo Weather") +1. CONVERSATION SUMMARY: A very brief summary in 2-5 words maximum -2. USER INSIGHTS: Extract NEW factual information or preferences about the user such as: +Examples: "Travel Plan", "Tokyo Weather" + +2. USER INSIGHTS: NEW factual information or preferences written in a short sentence. + +The insights should have long-term impact on the user's behavior or preferences. Like their name, profession, learning goals, etc. + +Examples: "User's name is John", "User is studying software engineering" + + The insights can be about the user such as: - Their role/profession - Technologies they work with - Projects they're working on @@ -225,8 +233,8 @@ ${existingInsights || "None"} # OUTPUT FORMAT Return your analysis in this exact JSON format with below keys: -* summary: brief 2-5 word summary. -* userInsights (optional): Only return if there are new insights found.`; +* summary: String. brief 2-5 word summary. +* userInsights (optional): String. Only return if there are new insights found.`; const humanPrompt = `Analyze this conversation and extract the summary and any NEW user insights not already captured: @@ -237,11 +245,18 @@ ${conversationText}`; try { const response = await chatModel.invoke(messages_llm); const responseText = response.content.toString().trim(); + // Unwrap the response if it's wrapped in code blocks + let unwrappedResponse = responseText; + if (responseText.startsWith("```json") && responseText.endsWith("```")) { + unwrappedResponse = responseText.slice(7, -3).trim(); + } else if (responseText.startsWith("```") && responseText.endsWith("```")) { + unwrappedResponse = responseText.slice(3, -3).trim(); + } // Parse JSON response let parsedResponse; try { - parsedResponse = JSON.parse(responseText); + parsedResponse = JSON.parse(unwrappedResponse); } catch (parseError) { logError("[UserMemoryManager] Failed to parse LLM response as JSON:", parseError); } From 9e89ec99332d1d99e494f93d747f8c33dd931909 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 1 Sep 2025 11:05:28 +0900 Subject: [PATCH 07/32] Refactor --- src/LLMProviders/chainManager.ts | 5 +- .../chainRunner/AutonomousAgentChainRunner.ts | 8 +- .../chainRunner/CopilotPlusChainRunner.ts | 61 ++- src/LLMProviders/projectManager.ts | 2 +- src/components/Chat.tsx | 5 +- src/integration_tests/AgentPrompt.test.ts | 2 +- src/main.ts | 6 +- src/memory/UserMemoryManager.ts | 410 +++++++++++------- src/settings/SettingsPage.tsx | 4 +- src/settings/model.ts | 13 +- 10 files changed, 333 insertions(+), 183 deletions(-) diff --git a/src/LLMProviders/chainManager.ts b/src/LLMProviders/chainManager.ts index 003c682e2..559da8049 100644 --- a/src/LLMProviders/chainManager.ts +++ b/src/LLMProviders/chainManager.ts @@ -29,6 +29,7 @@ import { App, Notice } from "obsidian"; import ChatModelManager from "./chatModelManager"; import MemoryManager from "./memoryManager"; import PromptManager from "./promptManager"; +import CopilotPlugin from "@/main"; export default class ChainManager { // TODO: These chains are deprecated since we now use direct chat model calls in chain runners @@ -45,10 +46,12 @@ export default class ChainManager { public chatModelManager: ChatModelManager; public memoryManager: MemoryManager; public promptManager: PromptManager; + public plugin?: CopilotPlugin; - constructor(app: App) { + constructor(app: App, plugin?: CopilotPlugin) { // Instantiate singletons this.app = app; + this.plugin = plugin; this.memoryManager = MemoryManager.getInstance(); this.chatModelManager = ChatModelManager.getInstance(); this.promptManager = PromptManager.getInstance(); diff --git a/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts b/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts index 759b55dfe..a67b271a3 100644 --- a/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts +++ b/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts @@ -2,7 +2,7 @@ import { MessageContent } from "@/imageProcessing/imageProcessor"; import { logError, logInfo, logWarn } from "@/logger"; import { checkIsPlusUser } from "@/plusUtils"; import { getSettings, getSystemPromptWithMemory } from "@/settings/model"; -import { App } from "obsidian"; +import { UserMemoryManager } from "@/memory/UserMemoryManager"; import { initializeBuiltinTools } from "@/tools/builtinTools"; import { extractParametersFromZod, SimpleTool } from "@/tools/SimpleTool"; import { ToolRegistry } from "@/tools/ToolRegistry"; @@ -82,9 +82,9 @@ ${params} public static async generateSystemPrompt( availableTools: SimpleTool[], adapter: ModelAdapter, - app?: App + userMemoryManager?: UserMemoryManager ): Promise { - const basePrompt = await getSystemPromptWithMemory(app); + const basePrompt = await getSystemPromptWithMemory(userMemoryManager); const toolDescriptions = AutonomousAgentChainRunner.generateToolDescriptions(availableTools); const toolNames = availableTools.map((tool) => tool.name); @@ -108,7 +108,7 @@ ${params} return AutonomousAgentChainRunner.generateSystemPrompt( availableTools, adapter, - this.chainManager.app + this.chainManager.plugin?.userMemoryManager ); } diff --git a/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts b/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts index bf0f6db13..6169c0476 100644 --- a/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts +++ b/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts @@ -14,9 +14,10 @@ import { } from "@/imageProcessing/imageProcessor"; import { BrevilabsClient } from "@/LLMProviders/brevilabsClient"; import { logError, logInfo, logWarn } from "@/logger"; +import { checkIsPlusUser } from "@/plusUtils"; import { getSettings, getSystemPromptWithMemory } from "@/settings/model"; -import { ToolManager } from "@/tools/toolManager"; import { writeToFileTool } from "@/tools/ComposerTools"; +import { ToolManager } from "@/tools/toolManager"; import { ChatMessage } from "@/types/message"; import { extractYoutubeUrl, @@ -28,18 +29,17 @@ import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { COPILOT_TOOL_NAMES, IntentAnalyzer } from "../intentAnalyzer"; import { BaseChainRunner } from "./BaseChainRunner"; import { ActionBlockStreamer } from "./utils/ActionBlockStreamer"; -import { ThinkBlockStreamer } from "./utils/ThinkBlockStreamer"; import { addChatHistoryToMessages, - processRawChatHistory, processedMessagesToTextOnly, + processRawChatHistory, } from "./utils/chatHistoryUtils"; -import { checkIsPlusUser } from "@/plusUtils"; import { + extractSourcesFromSearchResults, formatSearchResultsForLLM, formatSearchResultStringForLLM, - extractSourcesFromSearchResults, } from "./utils/searchResultUtils"; +import { ThinkBlockStreamer } from "./utils/ThinkBlockStreamer"; import { deduplicateSources } from "./utils/toolExecution"; export class CopilotPlusChainRunner extends BaseChainRunner { @@ -78,6 +78,38 @@ export class CopilotPlusChainRunner extends BaseChainRunner { return processedImages; } + private extractNoteContent(textContent: string): string { + // Extract content from both and blocks, but not from blocks + const noteContextRegex = /([\s\S]*?)<\/note_context>/g; + const activeNoteRegex = /([\s\S]*?)<\/active_note>/g; + const contentRegex = /([\s\S]*?)<\/content>/g; + + let noteContent = ""; + let match; + + // Find all note_context blocks + while ((match = noteContextRegex.exec(textContent)) !== null) { + const noteBlock = match[1]; + // Extract content from within this note_context block + let contentMatch; + while ((contentMatch = contentRegex.exec(noteBlock)) !== null) { + noteContent += contentMatch[1] + "\n\n"; + } + } + + // Find all active_note blocks + while ((match = activeNoteRegex.exec(textContent)) !== null) { + const noteBlock = match[1]; + // Extract content from within this active_note block + let contentMatch; + while ((contentMatch = contentRegex.exec(noteBlock)) !== null) { + noteContent += contentMatch[1] + "\n\n"; + } + } + + return noteContent.trim(); + } + private async extractEmbeddedImages(content: string, sourcePath?: string): Promise { // Match both wiki-style ![[image.ext]] and standard markdown ![alt](image.ext) const wikiImageRegex = /!\[\[(.*?\.(png|jpg|jpeg|gif|webp|bmp|svg))\]\]/g; @@ -162,11 +194,8 @@ export class CopilotPlusChainRunner extends BaseChainRunner { // Collect all image sources const imageSources: { urls: string[]; type: string }[] = []; - // Safely check and add context URLs - const contextUrls = userMessage.context?.urls; - if (contextUrls && contextUrls.length > 0) { - imageSources.push({ urls: contextUrls, type: "context" }); - } + // NOTE: Context URLs are web pages we fetched content from, NOT images to process + // Do not add context URLs as image sources // Process embedded images only if setting is enabled if (settings.passMarkdownImages) { @@ -185,9 +214,13 @@ export class CopilotPlusChainRunner extends BaseChainRunner { } } - const embeddedImages = await this.extractEmbeddedImages(textContent, sourcePath); - if (embeddedImages.length > 0) { - imageSources.push({ urls: embeddedImages, type: "embedded" }); + // Extract note content (excluding URL content) for image processing + const noteContent = this.extractNoteContent(textContent); + if (noteContent) { + const embeddedImages = await this.extractEmbeddedImages(noteContent, sourcePath); + if (embeddedImages.length > 0) { + imageSources.push({ urls: embeddedImages, type: "embedded" }); + } } } @@ -746,6 +779,6 @@ export class CopilotPlusChainRunner extends BaseChainRunner { } protected async getSystemPrompt(): Promise { - return getSystemPromptWithMemory(this.chainManager.app); + return getSystemPromptWithMemory(this.chainManager.plugin?.userMemoryManager); } } diff --git a/src/LLMProviders/projectManager.ts b/src/LLMProviders/projectManager.ts index a68fa0bca..b814c833d 100644 --- a/src/LLMProviders/projectManager.ts +++ b/src/LLMProviders/projectManager.ts @@ -39,7 +39,7 @@ export default class ProjectManager { this.app = app; this.plugin = plugin; this.currentProjectId = null; - this.chainMangerInstance = new ChainManager(app); + this.chainMangerInstance = new ChainManager(app, plugin); this.projectContextCache = ProjectContextCache.getInstance(); this.fileParserManager = new FileParserManager( BrevilabsClient.getInstance(), diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index 56b40a64e..799ac029a 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -11,7 +11,6 @@ import { import { ChainType } from "@/chainFactory"; import { useProjectContextStatus } from "@/hooks/useProjectContextStatus"; import { logInfo } from "@/logger"; -import { UserMemoryManager } from "@/memory/UserMemoryManager"; import { ChatControls, reloadCurrentProject } from "@/components/chat-components/ChatControls"; import ChatInput from "@/components/chat-components/ChatInput"; @@ -544,7 +543,7 @@ const Chat: React.FC = ({ try { // Get the current chat model from the chain manager const chatModel = chainManager.chatModelManager.getChatModel(); - UserMemoryManager.updateUserMemory(app, chatUIState.getMessages(), chatModel); + plugin.userMemoryManager.updateUserMemory(chatUIState.getMessages(), chatModel); } catch (error) { logInfo("Failed to analyze chat messages for memory:", error); } @@ -579,7 +578,7 @@ const Chat: React.FC = ({ selectedChain, handleSaveAsNote, safeSet, - app, + plugin.userMemoryManager, ]); const handleLoadHistory = useCallback(() => { diff --git a/src/integration_tests/AgentPrompt.test.ts b/src/integration_tests/AgentPrompt.test.ts index d70c8e812..32745411c 100644 --- a/src/integration_tests/AgentPrompt.test.ts +++ b/src/integration_tests/AgentPrompt.test.ts @@ -140,7 +140,7 @@ async function generateSystemPrompt(availableTools: any[]): Promise { }); const adapter = ModelAdapterFactory.createAdapter(mockModel); - return AutonomousAgentChainRunner.generateSystemPrompt(availableTools, adapter); + return AutonomousAgentChainRunner.generateSystemPrompt(availableTools, adapter, undefined); } // Helper function to mock tool execution diff --git a/src/main.ts b/src/main.ts index 7a572f565..c3c416a7d 100644 --- a/src/main.ts +++ b/src/main.ts @@ -57,6 +57,7 @@ export default class CopilotPlugin extends Plugin { settingsUnsubscriber?: () => void; private autocompleteService: AutocompleteService; chatUIState: ChatUIState; + userMemoryManager: UserMemoryManager; async onload(): Promise { await this.loadSettings(); @@ -95,6 +96,9 @@ export default class CopilotPlugin extends Plugin { const chatManager = new ChatManager(messageRepo, chainManager, this.fileParserManager, this); this.chatUIState = new ChatUIState(chatManager); + // Initialize UserMemoryManager + this.userMemoryManager = new UserMemoryManager(this.app); + this.registerView(CHAT_VIEWTYPE, (leaf: WorkspaceLeaf) => new CopilotView(leaf, this)); this.registerView(APPLY_VIEW_TYPE, (leaf: WorkspaceLeaf) => new ApplyView(leaf)); @@ -385,7 +389,7 @@ export default class CopilotPlugin extends Plugin { // Get the current chat model from the chain manager const chainManager = this.projectManager.getCurrentChainManager(); const chatModel = chainManager.chatModelManager.getChatModel(); - UserMemoryManager.updateUserMemory(this.app, this.chatUIState.getMessages(), chatModel); + this.userMemoryManager.updateUserMemory(this.chatUIState.getMessages(), chatModel); } catch (error) { logInfo("Failed to analyze chat messages for memory:", error); } diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 07cc8488a..b4e56cff6 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -7,18 +7,61 @@ import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { HumanMessage, SystemMessage } from "@langchain/core/messages"; const MAX_MEMORY_LINES = 40; +const INSIGHT_UPDATE_THRESHOLD = 10; // Update insights every 10 new conversations + /** * User Memory Management Class * - * Static methods for building and managing user memory based on conversations. + * Instance-based methods for building and managing user memory based on conversations. * The UserMemoryManager has methods to add recent conversations, user facts to the user memory * which can then be used to personalize LLM response. */ export class UserMemoryManager { + private app: App; + private recentConversationsContent: string = ""; + private userInsightsContent: string = ""; + private newestUserInsightTimestamp: Date | null = null; + private isUpdatingMemory: boolean = false; + + constructor(app: App) { + this.app = app; + } + + /** + * Load memory data from files into class fields + */ + async loadMemory(): Promise { + try { + const recentConversationsFile = this.app.vault.getAbstractFileByPath( + this.getRecentConversationFilePath() + ); + if (recentConversationsFile instanceof TFile) { + this.recentConversationsContent = await this.app.vault.read(recentConversationsFile); + } + } catch (error) { + logError("[UserMemoryManager] Error reading recent conversations file:", error); + } + + try { + const userInsightsFile = this.app.vault.getAbstractFileByPath(this.getUserInsightsFilePath()); + if (userInsightsFile instanceof TFile) { + this.userInsightsContent = await this.app.vault.read(userInsightsFile); + const userInsightsLines = this.userInsightsContent + .split("\n") + .filter((line) => line.trim().startsWith("- ")); + this.newestUserInsightTimestamp = this.extractTimestampFromLine( + userInsightsLines[userInsightsLines.length - 1] + ); + } + } catch (error) { + logError("[UserMemoryManager] Error reading user insights file:", error); + } + } + /** * Runs the user memory operation in the background without blocking execution */ - static updateUserMemory(app: App, messages: ChatMessage[], chatModel?: BaseChatModel): void { + updateUserMemory(messages: ChatMessage[], chatModel?: BaseChatModel): void { const settings = getSettings(); // Only proceed if memory is enabled @@ -33,7 +76,7 @@ export class UserMemoryManager { } // Fire and forget - run in background - updateMemory(app, messages, chatModel).catch((error) => { + this.updateMemory(messages, chatModel).catch((error) => { logError("[UserMemoryManager] Background user memory operation failed:", error); }); } @@ -41,24 +84,18 @@ export class UserMemoryManager { /** * Get user memory prompt */ - static async getUserMemoryPrompt(app: App): Promise { - const memoryFileMap = { - "Recent Conversation Content": getRecentConversationFilePath(), - "User Insights": getUserInsightsFilePath(), - }; + async getUserMemoryPrompt(): Promise { + await this.loadMemory(); try { let memoryPrompt = ""; - // Read all memory files using the map - for (const [sectionName, filePath] of Object.entries(memoryFileMap)) { - const file = app.vault.getAbstractFileByPath(filePath); - if (file instanceof TFile) { - const content = await app.vault.read(file); - if (content) { - memoryPrompt += `\n# ${sectionName}\n${content}\n`; - } - } + if (this.recentConversationsContent) { + memoryPrompt += `\n# Recent Conversation Content\n${this.recentConversationsContent}\n`; + } + + if (this.userInsightsContent) { + memoryPrompt += `\n# User Insights\n${this.userInsightsContent}\n`; } return memoryPrompt.length > 0 ? memoryPrompt : null; @@ -67,147 +104,231 @@ export class UserMemoryManager { return null; } } -} - -/** - * Analyze chat messages and store useful information in user memory files - */ -async function updateMemory( - app: App, - messages: ChatMessage[], - chatModel?: BaseChatModel -): Promise { - try { - // Ensure user memory folder exists - await ensureMemoryFolderExists(app); - - if (!chatModel) { - logError("[UserMemoryManager] No chat model available, skipping memory update"); - return; - } - - if (messages.length === 0) { - logInfo("[UserMemoryManager] No messages available, skipping memory update"); - return; - } - // Extract all information in a single LLM call for better performance - const extractedInfo = await extractConversationInfo(app, messages, chatModel); - - // 1. Save conversation summary to recent conversations - const timestamp = new Date().toISOString().split(".")[0]; // Remove milliseconds and Z + /** + * Create a conversation line from messages and return it + */ + private async createConversationLine( + messages: ChatMessage[], + chatModel: BaseChatModel + ): Promise { + const conversationSummary = await this.extractConversationSummary(messages, chatModel); + const timestamp = new Date().toISOString().split(".")[0] + "Z"; // Remove milliseconds but keep Z for UTC const userMessageTexts = messages .filter((message) => message.sender === USER_SENDER) .map((message) => message.message); const content = userMessageTexts.join("||||"); - const conversationLine = `${timestamp} ${extractedInfo.summary}||||${content}`; + return `${timestamp} ${conversationSummary}||||${content}`; + } - if (conversationLine) { - await addToMemoryFile(app, getRecentConversationFilePath(), conversationLine); + /** + * Analyze chat messages and store useful information in user memory files + */ + private async updateMemory(messages: ChatMessage[], chatModel?: BaseChatModel): Promise { + // Prevent race conditions by ensuring only one memory update operation runs at a time + if (this.isUpdatingMemory) { + logInfo("[UserMemoryManager] Memory update already in progress, skipping."); + return; } - // 2. Save user insights (if extracted) - if (extractedInfo.userInsights) { - try { - console.log("[UserMemoryManager] Saving user insights:", extractedInfo.userInsights); - await addToMemoryFile(app, getUserInsightsFilePath(), extractedInfo.userInsights); - } catch (error) { - logError("[UserMemoryManager] Error saving user insights:", error); + this.isUpdatingMemory = true; + try { + // Ensure user memory folder exists + await this.ensureMemoryFolderExists(); + + if (!chatModel) { + logError("[UserMemoryManager] No chat model available, skipping memory update"); + return; + } + + if (messages.length === 0) { + logInfo("[UserMemoryManager] No messages available, skipping memory update"); + return; + } + + // 1. Always extract and save conversation summary to recent conversations + const conversationLine = await this.createConversationLine(messages, chatModel); + await this.addToMemoryFile(this.getRecentConversationFilePath(), conversationLine); + + // 2. Check if user insights should be updated + // We update insights every INSIGHT_UPDATE_THRESHOLD conversations to ensure + // important user information is preserved in long-term memory before being + // rotated out of the short-term conversation buffer + await this.loadMemory(); + if (this.shouldUpdateUserInsights()) { + logInfo("[UserMemoryManager] Updating user insights based on recent conversation activity"); + const userInsights = await this.extractUserInsights(chatModel); + if (userInsights) { + try { + console.log("[UserMemoryManager] Saving user insights:", userInsights); + const timestamp = new Date().toISOString().split(".")[0] + "Z"; + const timestampedInsight = `${timestamp} ${userInsights}`; + await this.addToMemoryFile(this.getUserInsightsFilePath(), timestampedInsight); + } catch (error) { + logError("[UserMemoryManager] Error saving user insights:", error); + } + } + } else { + logInfo( + "[UserMemoryManager] Skipping user insights update - not enough new conversations since last insight" + ); } + } catch (error) { + logError("[UserMemoryManager] Error analyzing chat messages for user memory:", error); + } finally { + this.isUpdatingMemory = false; } - } catch (error) { - logError("[UserMemoryManager] Error analyzing chat messages for user memory:", error); } -} -/** - * Ensure the user memory folder exists - */ -async function ensureMemoryFolderExists(app: App): Promise { - const settings = getSettings(); - const memoryFolderPath = settings.memoryFolderName; - - const folder = app.vault.getAbstractFileByPath(memoryFolderPath); - if (!folder) { - await app.vault.createFolder(memoryFolderPath); - logInfo(`[UserMemoryManager] Created user memory folder: ${memoryFolderPath}`); + /** + * Ensure the user memory folder exists + */ + private async ensureMemoryFolderExists(): Promise { + const settings = getSettings(); + const memoryFolderPath = settings.memoryFolderName; + + const folder = this.app.vault.getAbstractFileByPath(memoryFolderPath); + if (!folder) { + await this.app.vault.createFolder(memoryFolderPath); + logInfo(`[UserMemoryManager] Created user memory folder: ${memoryFolderPath}`); + } } -} -function getRecentConversationFilePath(): string { - const settings = getSettings(); - return `${settings.memoryFolderName}/recent_conversation_content.md`; -} + private getRecentConversationFilePath(): string { + const settings = getSettings(); + return `${settings.memoryFolderName}/recent_conversation_content.md`; + } -function getUserInsightsFilePath(): string { - const settings = getSettings(); - return `${settings.memoryFolderName}/user_insights.md`; -} + private getUserInsightsFilePath(): string { + const settings = getSettings(); + return `${settings.memoryFolderName}/user_insights.md`; + } -/** - * Save content to the user memory file by appending new conversation - */ -async function addToMemoryFile(app: App, filePath: string, newContent: string): Promise { - const newConversationLine = `- ${newContent}`; + /** + * Save content to the user memory file by appending new conversation + * Maintains a rolling buffer of conversations by removing the oldest when limit is exceeded + */ + private async addToMemoryFile(filePath: string, newContent: string): Promise { + const newConversationLine = `- ${newContent}`; - try { - const existingFile = app.vault.getAbstractFileByPath(filePath); + const existingFile = this.app.vault.getAbstractFileByPath(filePath); if (existingFile instanceof TFile) { // Read existing conversation lines, append the new line. // Make sure the content lines do not exceed 40 lines. If it does, remove the first line. - const fileContent = await app.vault.read(existingFile); + const fileContent = await this.app.vault.read(existingFile); const lines = fileContent.split("\n"); lines.push(newConversationLine); if (lines.length > MAX_MEMORY_LINES) { - // Remove the first line to keep within 40 lines limit + // Remove the first line to keep within the limit lines.shift(); } const updatedContent = lines.join("\n"); - await app.vault.modify(existingFile, updatedContent); + await this.app.vault.modify(existingFile, updatedContent); } else { - await app.vault.create(filePath, newConversationLine); + await this.app.vault.create(filePath, newConversationLine); } - } catch (error) { - logError(`[UserMemoryManager] Error saving to user memory file ${filePath}:`, error); - throw error; } -} -/** - * Extract all conversation information using a single LLM call for better performance - */ -async function extractConversationInfo( - app: App, - messages: ChatMessage[], - chatModel: BaseChatModel -): Promise<{ - summary: string; - userInsights: string | null; -}> { - const conversationText = messages.map((msg) => `${msg.sender}: ${msg.message}`).join("\n\n"); - - // Read existing memory to avoid duplication - let existingInsights = ""; - - try { - const userInsightsFile = app.vault.getAbstractFileByPath(getUserInsightsFilePath()); - if (userInsightsFile instanceof TFile) { - existingInsights = await app.vault.read(userInsightsFile); + /** + * Check if user insights should be updated based on conversation count + * Updates when there are at least INSIGHT_UPDATE_THRESHOLD new conversations since the last insight + */ + private shouldUpdateUserInsights(): boolean { + try { + // Always update if no insights exist yet + if (this.newestUserInsightTimestamp === null) { + return true; + } + + // Always update if no recent conversations exist + if (!this.recentConversationsContent.trim()) { + return false; + } + + // Count conversations newer than the latest insight + const recentLines = this.recentConversationsContent + .split("\n") + .filter((line) => line.trim().startsWith("- ")); + const newConversationsCount = recentLines.filter((line) => { + const timestamp = this.extractTimestampFromLine(line); + return timestamp && timestamp > this.newestUserInsightTimestamp!; + }).length; + + // Update if we have enough new conversations + return newConversationsCount >= INSIGHT_UPDATE_THRESHOLD; + } catch (error) { + logError("[UserMemoryManager] Error checking if user insights should be updated:", error); + // If there's an error, err on the side of updating + return true; + } + } + + /** + * Extract timestamp from a memory line (format: "- YYYY-MM-DDTHH:mm:ss ...") + */ + private extractTimestampFromLine(line: string): Date | null { + if (!line) return null; + + // Remove "- " prefix and extract timestamp + const trimmed = line.replace(/^-\s*/, ""); + const timestampMatch = trimmed.match(/^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})/); + + if (!timestampMatch) { + logError("[UserMemoryManager] Error extracting timestamp from line:", line); + return null; } - } catch (error) { - logError("[UserMemoryManager] Error reading existing memory files:", error); + return new Date(timestampMatch[1] + "Z"); // Add Z for UTC } - const systemPrompt = `You are an AI assistant that analyzes conversations and extracts two types of information: + /** + * Extract conversation summary using LLM + */ + private async extractConversationSummary( + messages: ChatMessage[], + chatModel: BaseChatModel + ): Promise { + const conversationText = messages.map((msg) => `${msg.sender}: ${msg.message}`).join("\n\n"); -1. CONVERSATION SUMMARY: A very brief summary in 2-5 words maximum + const systemPrompt = `You are an AI assistant that analyzes conversations and extracts a brief summary. + +CONVERSATION SUMMARY: A very brief summary in 2-5 words maximum Examples: "Travel Plan", "Tokyo Weather" -2. USER INSIGHTS: NEW factual information or preferences written in a short sentence. +# OUTPUT FORMAT +Return only the brief 2-5 word summary as plain text, no JSON format needed.`; + + const humanPrompt = `Analyze this conversation and extract a brief summary: + +${conversationText}`; + + const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; + + try { + const response = await chatModel.invoke(messages_llm); + const summary = response.content.toString().trim(); + return summary || "No summary"; + } catch (error) { + logError("[UserMemoryManager] Failed to extract conversation summary:", error); + return "No summary"; + } + } + + /** + * Extract user insights using LLM with deduplication from class fields + */ + private async extractUserInsights(chatModel: BaseChatModel): Promise { + if (!this.recentConversationsContent.trim()) { + logInfo("[UserMemoryManager] No recent conversations found for insight extraction"); + return null; + } + + const systemPrompt = `You are an AI assistant that analyzes past conversations and extracts user insights. + +USER INSIGHTS: NEW factual information or preferences written in a short sentence. The insights should have long-term impact on the user's behavior or preferences. Like their name, profession, learning goals, etc. @@ -228,45 +349,36 @@ Examples: "User's name is John", "User is studying software engineering" IMPORTANT: Only extract NEW information that is NOT already captured in the existing memory below. -${existingInsights || "None"} +${this.userInsightsContent || "None"} # OUTPUT FORMAT -Return your analysis in this exact JSON format with below keys: -* summary: String. brief 2-5 word summary. -* userInsights (optional): String. Only return if there are new insights found.`; +Return only the new user insight as plain text, or return "NONE" if no new insights are found.`; - const humanPrompt = `Analyze this conversation and extract the summary and any NEW user insights not already captured: + const humanPrompt = `Analyze these recent conversations and extract any NEW user insights not already captured. -${conversationText}`; + Each line is a separate conversation in the format: " ||||". - const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; - - try { - const response = await chatModel.invoke(messages_llm); - const responseText = response.content.toString().trim(); - // Unwrap the response if it's wrapped in code blocks - let unwrappedResponse = responseText; - if (responseText.startsWith("```json") && responseText.endsWith("```")) { - unwrappedResponse = responseText.slice(7, -3).trim(); - } else if (responseText.startsWith("```") && responseText.endsWith("```")) { - unwrappedResponse = responseText.slice(3, -3).trim(); - } +${this.recentConversationsContent}`; + + const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; - // Parse JSON response - let parsedResponse; try { - parsedResponse = JSON.parse(unwrappedResponse); - } catch (parseError) { - logError("[UserMemoryManager] Failed to parse LLM response as JSON:", parseError); - } + const response = await chatModel.invoke(messages_llm); + const insight = response.content.toString().trim(); + + if ( + !insight || + insight.toLowerCase() === "none" || + insight.toLowerCase() === "no new insights" + ) { + return null; + } - return parsedResponse; - } catch (error) { - logError("[UserMemoryManager] Failed to extract conversation info:", error); - return { - summary: "No summary", - userInsights: null, - }; + return insight; + } catch (error) { + logError("[UserMemoryManager] Failed to extract user insights:", error); + return null; + } } } diff --git a/src/settings/SettingsPage.tsx b/src/settings/SettingsPage.tsx index a54d78afd..743c2202d 100644 --- a/src/settings/SettingsPage.tsx +++ b/src/settings/SettingsPage.tsx @@ -2,7 +2,6 @@ import CopilotView from "@/components/CopilotView"; import { CHAT_VIEWTYPE } from "@/constants"; import CopilotPlugin from "@/main"; import { getSettings } from "@/settings/model"; -import { UserMemoryManager } from "@/memory/UserMemoryManager"; import { logInfo } from "@/logger"; import { App, Notice, PluginSettingTab } from "obsidian"; import React from "react"; @@ -28,8 +27,7 @@ export class CopilotSettingTab extends PluginSettingTab { // Get the current chat model from the chain manager const chainManager = this.plugin.projectManager.getCurrentChainManager(); const chatModel = chainManager.chatModelManager.getChatModel(); - UserMemoryManager.updateUserMemory( - this.app, + this.plugin.userMemoryManager.updateUserMemory( this.plugin.chatUIState.getMessages(), chatModel ); diff --git a/src/settings/model.ts b/src/settings/model.ts index b4b0b6400..c778fa53d 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -1,6 +1,7 @@ import { CustomModel, ProjectConfig } from "@/aiParams"; import { atom, createStore, useAtomValue } from "jotai"; import { v4 as uuidv4 } from "uuid"; +import { UserMemoryManager } from "@/memory/UserMemoryManager"; import { AcceptKeyOption } from "@/autocomplete/codemirrorIntegration"; import { type ChainType } from "@/chainFactory"; @@ -12,8 +13,6 @@ import { DEFAULT_SYSTEM_PROMPT, EmbeddingModelProviders, } from "@/constants"; -import { UserMemoryManager } from "@/memory/UserMemoryManager"; -import { App } from "obsidian"; import { logError } from "@/logger"; /** @@ -334,13 +333,15 @@ ${userPrompt} return basePrompt; } -export async function getSystemPromptWithMemory(app: App | undefined): Promise { +export async function getSystemPromptWithMemory( + userMemoryManager: UserMemoryManager | undefined +): Promise { const systemPrompt = getSystemPrompt(); - if (!app) { - logError("No app provided to getSystemPromptWithMemory"); + if (!userMemoryManager) { + logError("No UserMemoryManager provided to getSystemPromptWithMemory"); return getSystemPrompt(); } - const memoryPrompt = await UserMemoryManager.getUserMemoryPrompt(app); + const memoryPrompt = await userMemoryManager.getUserMemoryPrompt(); return `${systemPrompt} ${memoryPrompt} From 52f7ad752bb9f96c71f97ee9c9cd654f0d788100 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 8 Sep 2025 19:10:12 +0900 Subject: [PATCH 08/32] Support returning multiple insights --- src/memory/UserMemoryManager.ts | 81 ++++++++++++++++++++++--------- src/memory/memory-flow-diagram.md | 68 ++++++++++++++++++++++++++ 2 files changed, 126 insertions(+), 23 deletions(-) create mode 100644 src/memory/memory-flow-diagram.md diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index b4e56cff6..50d75ca60 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -7,7 +7,7 @@ import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { HumanMessage, SystemMessage } from "@langchain/core/messages"; const MAX_MEMORY_LINES = 40; -const INSIGHT_UPDATE_THRESHOLD = 10; // Update insights every 10 new conversations +const INSIGHT_UPDATE_THRESHOLD = 1; // Update insights every 10 new conversations /** * User Memory Management Class @@ -162,8 +162,13 @@ export class UserMemoryManager { try { console.log("[UserMemoryManager] Saving user insights:", userInsights); const timestamp = new Date().toISOString().split(".")[0] + "Z"; - const timestampedInsight = `${timestamp} ${userInsights}`; - await this.addToMemoryFile(this.getUserInsightsFilePath(), timestampedInsight); + + // Split insights by line and save each separately + const insights = userInsights.split("\n").filter((line) => line.trim().length > 0); + for (const insight of insights) { + const timestampedInsight = `${timestamp} ${insight.trim()}`; + await this.addToMemoryFile(this.getUserInsightsFilePath(), timestampedInsight); + } } catch (error) { logError("[UserMemoryManager] Error saving user insights:", error); } @@ -328,23 +333,42 @@ ${conversationText}`; const systemPrompt = `You are an AI assistant that analyzes past conversations and extracts user insights. -USER INSIGHTS: NEW factual information or preferences written in a short sentence. - -The insights should have long-term impact on the user's behavior or preferences. Like their name, profession, learning goals, etc. - -Examples: "User's name is John", "User is studying software engineering" - - The insights can be about the user such as: - - Their role/profession - - Technologies they work with - - Projects they're working on - - Skills and expertise areas - - Learning goals or interests - - Preferred level of detail (brief vs detailed explanations) - - Communication style (formal vs casual) - - Explanation depth (beginner vs advanced) - - Format preferences (step-by-step vs narrative) - - Specific requests about how to present information +USER INSIGHTS: PERSISTENT factual information or preferences about the user written in a short sentence. + +CRITICAL RULES: +1. Only extract PERMANENT, LONG-TERM characteristics about the user +2. DO NOT extract temporary activities, events, or situational information +3. DO NOT make inferences or assumptions - only extract what the user EXPLICITLY states about themselves + +✅ EXTRACT (Persistent traits - ONLY when explicitly stated by user): +- Their name, role, profession, or company +- Technologies they regularly work with +- Long-term projects or areas of focus +- Skills, expertise areas, or certifications +- Learning goals or career interests +- Communication preferences +- Preferred explanation depth +- Format preferences + +❌ DO NOT EXTRACT (Temporary information OR inferences): +- Travel plans, vacation destinations, or weekend activities +- Current weather, temporary locations, or daily schedules +- One-time events, meetings, or appointments +- Temporary projects with specific deadlines +- Current mood, health status, or temporary circumstances +- Time-sensitive information or situational context +- LOCATION INFERENCES: Do not assume where user lives based on questions about places +- ASSUMPTIONS: Do not infer user characteristics from indirect context +- INTERESTS: Do not assume interests just because user asks about a topic + +Examples of GOOD insights: "User's name is John" (when user says "My name is John"), "User is a senior software engineer" (when user says "I'm a senior engineer"), "User prefers TypeScript over JavaScript" (when user explicitly states preference), "User works in fintech industry" (when user mentions their industry) + +Examples of BAD insights: +- "User is traveling to Hakone this weekend" (temporary activity) +- "User has a meeting tomorrow" (temporary event) +- "User lives in Tokyo" (inferred from asking about Chiba - WRONG!) +- "User is interested in Japanese culture" (inferred from location questions - WRONG!) +- "User is feeling stressed about deadlines" (temporary mood) IMPORTANT: Only extract NEW information that is NOT already captured in the existing memory below. @@ -353,11 +377,22 @@ ${this.userInsightsContent || "None"} # OUTPUT FORMAT -Return only the new user insight as plain text, or return "NONE" if no new insights are found.`; +Return new user insights, one per line. If multiple insights are found, list them like this: +User's name is John +User is a senior software engineer +User prefers detailed explanations + +If no new insights are found, return "NONE".`; + + const humanPrompt = `Analyze these recent conversations and extract any NEW PERSISTENT user insights not already captured. - const humanPrompt = `Analyze these recent conversations and extract any NEW user insights not already captured. +STRICT REQUIREMENTS: +- Focus only on LONG-TERM characteristics that the user EXPLICITLY states about themselves +- DO NOT make inferences from questions they ask or topics they discuss +- DO NOT assume location, interests, or characteristics from conversation context +- Look for multiple insights if present - return each on a separate line - Each line is a separate conversation in the format: " ||||". +Each line is a separate conversation in the format: " ||||". ${this.recentConversationsContent}`; diff --git a/src/memory/memory-flow-diagram.md b/src/memory/memory-flow-diagram.md new file mode 100644 index 000000000..5b0b050e0 --- /dev/null +++ b/src/memory/memory-flow-diagram.md @@ -0,0 +1,68 @@ +# User Memory Management Flow (Planned) + +## Overview + +Planned design for how the user memory system should work in Obsidian Copilot, including triggers, short-term retention policy, and long-term memory types. + +## Flow Diagram + +```mermaid +graph TD + %% Triggers for Memory Updates + A[Chat Conversation Ends] --> B[handleNewChat called] + B --> C{Memory Enabled?} + C -->|Yes| D[Append last conversation to Recent
Format: timestamp topic user_msg1,user_msg2,...] + C -->|No| Z[Skip Memory Update] + + %% Short-term (Recent) Memory + D --> E{Configurable Rolling Window Policy} + E -->|Count based: N items| F[Trim to last N] + E -->|Time based: T days| G[Drop entries older than T] + F --> H[Save to recent_conversations.md] + G --> H[Save to recent_conversations.md] + + %% LTM Update Decision happens AFTER recent update + H --> I{Check Long-Term-Memory Update Needed?} + I -->|Yes| J[Extract & Classify Long-term Signals] + I -->|No| Y[Skip LTM Update] + + %% Long-term (Persistent) Memory Types + J --> K[User Insights
LONG-TERM characteristics about the user] + J --> L["Response Preferences (V2?)"] + J --> M["Topic Highlights (V2?)"] + + K --> K2[Upsert user_insights.md] + L --> L2[Upsert response_preferences.md] + M --> M2[Upsert topic_highlights.md] +``` + +## Key Points + +### Memory Update Triggers: + +- **Trigger**: When a chat conversation ends and `handleNewChat()` is called +- **Guard**: Only if `enableMemory` setting is on + +### Recent Conversations (Short-term): + +- **When**: Updated after every conversation +- **Retention policy**: Rolling window is user-configurable by either count (keep last N items) or time (keep items within T days, e.g., 7 days) +- **Content**: Timestamp + brief summary + user message excerpts +- **Storage**: Example file `recent_conversations.md` + +### Long-term Memory (Persistent): + +- **Update check**: After recent memory is updated, evaluate whether LTM should be updated (e.g., new persistent info detected, thresholds reached) +- **Types**: `user_insights` (facts about the user), `response_preferences` (format/tone/style), `topic_highlights` (recurring themes) +- **Storage**: Separate files, e.g., `user_insights.md`, `response_preferences.md`, `topic_highlights.md` +- **Behavior**: Upsert with deduplication and timestamps; per-type enablement is possible + +### Configuration (proposed): + +- **`enableMemory`**: Master switch +- **`recentWindowType`**: `count` | `time` +- **`recentMaxItems` / `recentMaxAgeDays`**: Applies based on window type +- **`enableLTMemoryTypes`**: Toggle per type `{ user_insights, response_preferences, topic_highlights }` +- **`ltmUpdatePolicy`**: Heuristic or schedule to decide when LTM updates run + +This planned design ensures recent context stays concise and fresh while selectively promoting durable knowledge into well-structured long-term categories. From db083315f6fe26275e51d695596ba7a7e0bd8a5e Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 8 Sep 2025 20:41:48 +0900 Subject: [PATCH 09/32] Use condensedMessage for recent conversation content --- src/core/ChatManager.ts | 31 +++++++++++ src/core/MessageRepository.ts | 21 ++++++++ src/memory/UserMemoryManager.ts | 95 ++++++++++++++++++++++++++++++++- src/types/message.ts | 4 ++ 4 files changed, 150 insertions(+), 1 deletion(-) diff --git a/src/core/ChatManager.ts b/src/core/ChatManager.ts index c63ae573c..e612ad9fa 100644 --- a/src/core/ChatManager.ts +++ b/src/core/ChatManager.ts @@ -140,6 +140,37 @@ export class ChatManager { // Update the processed content currentRepo.updateProcessedText(messageId, processedContent); + // Create condensed message for user messages + if (message.sender === USER_SENDER && this.plugin.userMemoryManager) { + try { + const settings = getSettings(); + if (settings.enableMemory) { + const chainManager = this.plugin.projectManager.getCurrentChainManager(); + const chatModel = chainManager.chatModelManager.getChatModel(); + + // Create condensed message asynchronously (fire and forget) + this.plugin.userMemoryManager + .createCondensedMessage(displayText, chatModel) + .then((condensedMessage) => { + if (condensedMessage) { + currentRepo.updateCondensedMessage(messageId, condensedMessage); + logInfo( + `[ChatManager] Created condensed message for ${messageId}: "${condensedMessage}"` + ); + } + }) + .catch((error) => { + logInfo( + `[ChatManager] Failed to create condensed message for ${messageId}:`, + error + ); + }); + } + } catch (error) { + logInfo(`[ChatManager] Error setting up condensed message creation:`, error); + } + } + logInfo(`[ChatManager] Successfully sent message ${messageId}`); return messageId; } catch (error) { diff --git a/src/core/MessageRepository.ts b/src/core/MessageRepository.ts index d51cbcc51..0c13b4767 100644 --- a/src/core/MessageRepository.ts +++ b/src/core/MessageRepository.ts @@ -88,6 +88,7 @@ export class MessageRepository { id, displayText: message.message, processedText: message.originalMessage || message.message, + condensedMessage: message.condensedMessage, sender: message.sender, timestamp, context: message.context, @@ -148,6 +149,21 @@ export class MessageRepository { return true; } + /** + * Update condensed message for a message + */ + updateCondensedMessage(id: string, condensedMessage: string): boolean { + const message = this.messages.find((msg) => msg.id === id); + if (!message) { + logInfo(`[MessageRepository] Message not found for condensed message update: ${id}`); + return false; + } + + message.condensedMessage = condensedMessage; + logInfo(`[MessageRepository] Updated condensed message for message: ${id}`); + return true; + } + /** * Delete a message */ @@ -201,6 +217,7 @@ export class MessageRepository { id: msg.id, message: msg.displayText, originalMessage: msg.displayText, + condensedMessage: msg.condensedMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: true, @@ -223,6 +240,7 @@ export class MessageRepository { id: msg.id, message: msg.processedText, originalMessage: msg.displayText, + condensedMessage: msg.condensedMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: false, // LLM messages are not for display @@ -242,6 +260,7 @@ export class MessageRepository { id: msg.id, message: msg.processedText, originalMessage: msg.displayText, + condensedMessage: msg.condensedMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: false, @@ -263,6 +282,7 @@ export class MessageRepository { id: msg.id, message: msg.displayText, originalMessage: msg.displayText, + condensedMessage: msg.condensedMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: msg.isVisible, @@ -283,6 +303,7 @@ export class MessageRepository { id: msg.id || this.generateId(), displayText: msg.message, processedText: msg.originalMessage || msg.message, + condensedMessage: msg.condensedMessage, sender: msg.sender, timestamp: msg.timestamp || formatDateTime(new Date()), context: msg.context, diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 50d75ca60..1a8198d02 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -81,6 +81,96 @@ export class UserMemoryManager { }); } + /** + * Create a condensed version of a user message for memory purposes. + * Optimized for Obsidian note-taking context and knowledge management workflows. + * + * @param userMessage - The original user message to condense + * @param chatModel - The chat model to use for condensing (optional) + * @returns Promise - The condensed message or null if failed/unnecessary + * + * Features: + * - Skips condensing for very short messages or simple commands + * - Validates that condensed message is actually shorter than original + * - Provides fallback truncation if AI condensing fails + * - Optimized prompts for Obsidian-specific use cases + */ + async createCondensedMessage( + userMessage: string, + chatModel?: BaseChatModel + ): Promise { + if (!chatModel) { + logError("[UserMemoryManager] No chat model available for condensed message creation"); + return null; + } + + // Remove newlines and other formatting + const formattedMessage = userMessage.replace(/\n/g, " ").replace(/\\n/g, " ").trim(); + const trimmedMessage = formattedMessage.trim(); + if (!trimmedMessage) { + return null; + } + + const systemPrompt = `Your task is to condense user messages into concise one-line summaries while preserving user intent and important details. + +The condensed message will be used as part of the recent conversation content for memory purposes. + +CRITICAL RULES: +1. Keep it to ONE sentence maximum +2. Preserve the user's core intent and request +3. Include important details like note names, tags, search queries, or Obsidian features mentioned +4. Maintain the meaning and specificity of the original message +5. Use clear, direct language +6. Prioritize Obsidian-specific features (links, tags, graphs, plugins, etc.) + +# OUTPUT FORMAT +Return only the condensed message as plain text, no quotes or additional formatting.`; + + const humanPrompt = ` +${trimmedMessage} + + +Condense the user message into a single concise sentence while preserving intent and important details`; + + const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; + + try { + const response = await chatModel.invoke(messages_llm); + if (!response || !response.content) { + logError("[UserMemoryManager] Empty response from chat model for condensed message"); + return null; + } + + const condensed = response.content.toString().trim(); + + // Validate the condensed message + if (!condensed) { + logError("[UserMemoryManager] Chat model returned empty condensed message"); + return null; + } + + // Ensure the condensed message is actually shorter than the original + if (condensed.length >= trimmedMessage.length) { + logInfo("[UserMemoryManager] Condensed message not shorter than original, using original"); + return trimmedMessage; + } + + // Remove any quotes or formatting that might have been added + const cleanedCondensed = condensed.replace(/^["']|["']$/g, "").trim(); + + return cleanedCondensed || null; + } catch (error) { + logError("[UserMemoryManager] Failed to create condensed message:", error); + // Fallback: return a truncated version of the original message if it's too long + if (trimmedMessage.length > 100) { + const fallback = trimmedMessage.substring(0, 97) + "..."; + logInfo("[UserMemoryManager] Using fallback truncated message"); + return fallback; + } + return null; + } + } + /** * Get user memory prompt */ @@ -116,7 +206,10 @@ export class UserMemoryManager { const timestamp = new Date().toISOString().split(".")[0] + "Z"; // Remove milliseconds but keep Z for UTC const userMessageTexts = messages .filter((message) => message.sender === USER_SENDER) - .map((message) => message.message); + .map((message) => { + // Use condensed message if available + return message.condensedMessage; + }); const content = userMessageTexts.join("||||"); return `${timestamp} ${conversationSummary}||||${content}`; } diff --git a/src/types/message.ts b/src/types/message.ts index 6b55aaa51..0ea979ad3 100644 --- a/src/types/message.ts +++ b/src/types/message.ts @@ -44,6 +44,9 @@ export interface ChatMessage { /** Original user input before processing (for LLM messages) */ originalMessage?: string; + /** The condensed message content (for memory and only for user messages) */ + condensedMessage?: string; + /** Message sender ("user", "AI", etc.) */ sender: string; @@ -82,6 +85,7 @@ export interface StoredMessage { id: string; displayText: string; // What user typed/what AI responded processedText: string; // For user messages: with context added. For AI: same as display + condensedMessage?: string; // Condensed version for memory (user messages only) sender: string; timestamp: FormattedDateTime; context?: MessageContext; From e0127270f3da0c91bd51452f2e660a30e4cf0b74 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Fri, 12 Sep 2025 16:27:53 +0900 Subject: [PATCH 10/32] Remove user insights/long-term memory functionality - Removed user insights extraction and storage logic - Simplified UserMemoryManager to focus only on recent conversations - Updated memory design documentation to reflect current implementation - Maintains recent conversation tracking with 40-line rolling buffer - No breaking changes to public API --- src/memory/UserMemoryManager.ts | 206 ++---------------------------- src/memory/memory-design.md | 50 ++++++++ src/memory/memory-flow-diagram.md | 68 ---------- 3 files changed, 58 insertions(+), 266 deletions(-) create mode 100644 src/memory/memory-design.md delete mode 100644 src/memory/memory-flow-diagram.md diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 1a8198d02..00157b207 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -7,20 +7,17 @@ import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { HumanMessage, SystemMessage } from "@langchain/core/messages"; const MAX_MEMORY_LINES = 40; -const INSIGHT_UPDATE_THRESHOLD = 1; // Update insights every 10 new conversations /** * User Memory Management Class * * Instance-based methods for building and managing user memory based on conversations. - * The UserMemoryManager has methods to add recent conversations, user facts to the user memory - * which can then be used to personalize LLM response. + * The UserMemoryManager has methods to add recent conversations to memory + * which can then be used to provide recent conversation context for LLM responses. */ export class UserMemoryManager { private app: App; private recentConversationsContent: string = ""; - private userInsightsContent: string = ""; - private newestUserInsightTimestamp: Date | null = null; private isUpdatingMemory: boolean = false; constructor(app: App) { @@ -42,20 +39,7 @@ export class UserMemoryManager { logError("[UserMemoryManager] Error reading recent conversations file:", error); } - try { - const userInsightsFile = this.app.vault.getAbstractFileByPath(this.getUserInsightsFilePath()); - if (userInsightsFile instanceof TFile) { - this.userInsightsContent = await this.app.vault.read(userInsightsFile); - const userInsightsLines = this.userInsightsContent - .split("\n") - .filter((line) => line.trim().startsWith("- ")); - this.newestUserInsightTimestamp = this.extractTimestampFromLine( - userInsightsLines[userInsightsLines.length - 1] - ); - } - } catch (error) { - logError("[UserMemoryManager] Error reading user insights file:", error); - } + // User insights functionality removed - focusing only on recent memory } /** @@ -184,10 +168,6 @@ Condense the user message into a single concise sentence while preserving intent memoryPrompt += `\n# Recent Conversation Content\n${this.recentConversationsContent}\n`; } - if (this.userInsightsContent) { - memoryPrompt += `\n# User Insights\n${this.userInsightsContent}\n`; - } - return memoryPrompt.length > 0 ? memoryPrompt : null; } catch (error) { logError("[UserMemoryManager] Error reading user memory content:", error); @@ -243,34 +223,7 @@ Condense the user message into a single concise sentence while preserving intent const conversationLine = await this.createConversationLine(messages, chatModel); await this.addToMemoryFile(this.getRecentConversationFilePath(), conversationLine); - // 2. Check if user insights should be updated - // We update insights every INSIGHT_UPDATE_THRESHOLD conversations to ensure - // important user information is preserved in long-term memory before being - // rotated out of the short-term conversation buffer - await this.loadMemory(); - if (this.shouldUpdateUserInsights()) { - logInfo("[UserMemoryManager] Updating user insights based on recent conversation activity"); - const userInsights = await this.extractUserInsights(chatModel); - if (userInsights) { - try { - console.log("[UserMemoryManager] Saving user insights:", userInsights); - const timestamp = new Date().toISOString().split(".")[0] + "Z"; - - // Split insights by line and save each separately - const insights = userInsights.split("\n").filter((line) => line.trim().length > 0); - for (const insight of insights) { - const timestampedInsight = `${timestamp} ${insight.trim()}`; - await this.addToMemoryFile(this.getUserInsightsFilePath(), timestampedInsight); - } - } catch (error) { - logError("[UserMemoryManager] Error saving user insights:", error); - } - } - } else { - logInfo( - "[UserMemoryManager] Skipping user insights update - not enough new conversations since last insight" - ); - } + // User insights functionality removed - only maintain recent conversations } catch (error) { logError("[UserMemoryManager] Error analyzing chat messages for user memory:", error); } finally { @@ -297,10 +250,7 @@ Condense the user message into a single concise sentence while preserving intent return `${settings.memoryFolderName}/recent_conversation_content.md`; } - private getUserInsightsFilePath(): string { - const settings = getSettings(); - return `${settings.memoryFolderName}/user_insights.md`; - } + // getUserInsightsFilePath removed - user insights functionality removed /** * Save content to the user memory file by appending new conversation @@ -330,56 +280,9 @@ Condense the user message into a single concise sentence while preserving intent } } - /** - * Check if user insights should be updated based on conversation count - * Updates when there are at least INSIGHT_UPDATE_THRESHOLD new conversations since the last insight - */ - private shouldUpdateUserInsights(): boolean { - try { - // Always update if no insights exist yet - if (this.newestUserInsightTimestamp === null) { - return true; - } + // shouldUpdateUserInsights removed - user insights functionality removed - // Always update if no recent conversations exist - if (!this.recentConversationsContent.trim()) { - return false; - } - - // Count conversations newer than the latest insight - const recentLines = this.recentConversationsContent - .split("\n") - .filter((line) => line.trim().startsWith("- ")); - const newConversationsCount = recentLines.filter((line) => { - const timestamp = this.extractTimestampFromLine(line); - return timestamp && timestamp > this.newestUserInsightTimestamp!; - }).length; - - // Update if we have enough new conversations - return newConversationsCount >= INSIGHT_UPDATE_THRESHOLD; - } catch (error) { - logError("[UserMemoryManager] Error checking if user insights should be updated:", error); - // If there's an error, err on the side of updating - return true; - } - } - - /** - * Extract timestamp from a memory line (format: "- YYYY-MM-DDTHH:mm:ss ...") - */ - private extractTimestampFromLine(line: string): Date | null { - if (!line) return null; - - // Remove "- " prefix and extract timestamp - const trimmed = line.replace(/^-\s*/, ""); - const timestampMatch = trimmed.match(/^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})/); - - if (!timestampMatch) { - logError("[UserMemoryManager] Error extracting timestamp from line:", line); - return null; - } - return new Date(timestampMatch[1] + "Z"); // Add Z for UTC - } + // extractTimestampFromLine removed - no longer needed without user insights functionality /** * Extract conversation summary using LLM @@ -415,98 +318,5 @@ ${conversationText}`; } } - /** - * Extract user insights using LLM with deduplication from class fields - */ - private async extractUserInsights(chatModel: BaseChatModel): Promise { - if (!this.recentConversationsContent.trim()) { - logInfo("[UserMemoryManager] No recent conversations found for insight extraction"); - return null; - } - - const systemPrompt = `You are an AI assistant that analyzes past conversations and extracts user insights. - -USER INSIGHTS: PERSISTENT factual information or preferences about the user written in a short sentence. - -CRITICAL RULES: -1. Only extract PERMANENT, LONG-TERM characteristics about the user -2. DO NOT extract temporary activities, events, or situational information -3. DO NOT make inferences or assumptions - only extract what the user EXPLICITLY states about themselves - -✅ EXTRACT (Persistent traits - ONLY when explicitly stated by user): -- Their name, role, profession, or company -- Technologies they regularly work with -- Long-term projects or areas of focus -- Skills, expertise areas, or certifications -- Learning goals or career interests -- Communication preferences -- Preferred explanation depth -- Format preferences - -❌ DO NOT EXTRACT (Temporary information OR inferences): -- Travel plans, vacation destinations, or weekend activities -- Current weather, temporary locations, or daily schedules -- One-time events, meetings, or appointments -- Temporary projects with specific deadlines -- Current mood, health status, or temporary circumstances -- Time-sensitive information or situational context -- LOCATION INFERENCES: Do not assume where user lives based on questions about places -- ASSUMPTIONS: Do not infer user characteristics from indirect context -- INTERESTS: Do not assume interests just because user asks about a topic - -Examples of GOOD insights: "User's name is John" (when user says "My name is John"), "User is a senior software engineer" (when user says "I'm a senior engineer"), "User prefers TypeScript over JavaScript" (when user explicitly states preference), "User works in fintech industry" (when user mentions their industry) - -Examples of BAD insights: -- "User is traveling to Hakone this weekend" (temporary activity) -- "User has a meeting tomorrow" (temporary event) -- "User lives in Tokyo" (inferred from asking about Chiba - WRONG!) -- "User is interested in Japanese culture" (inferred from location questions - WRONG!) -- "User is feeling stressed about deadlines" (temporary mood) - -IMPORTANT: Only extract NEW information that is NOT already captured in the existing memory below. - - -${this.userInsightsContent || "None"} - - -# OUTPUT FORMAT -Return new user insights, one per line. If multiple insights are found, list them like this: -User's name is John -User is a senior software engineer -User prefers detailed explanations - -If no new insights are found, return "NONE".`; - - const humanPrompt = `Analyze these recent conversations and extract any NEW PERSISTENT user insights not already captured. - -STRICT REQUIREMENTS: -- Focus only on LONG-TERM characteristics that the user EXPLICITLY states about themselves -- DO NOT make inferences from questions they ask or topics they discuss -- DO NOT assume location, interests, or characteristics from conversation context -- Look for multiple insights if present - return each on a separate line - -Each line is a separate conversation in the format: " ||||". - -${this.recentConversationsContent}`; - - const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; - - try { - const response = await chatModel.invoke(messages_llm); - const insight = response.content.toString().trim(); - - if ( - !insight || - insight.toLowerCase() === "none" || - insight.toLowerCase() === "no new insights" - ) { - return null; - } - - return insight; - } catch (error) { - logError("[UserMemoryManager] Failed to extract user insights:", error); - return null; - } - } + // extractUserInsights removed - user insights functionality removed } diff --git a/src/memory/memory-design.md b/src/memory/memory-design.md new file mode 100644 index 000000000..f20ef5545 --- /dev/null +++ b/src/memory/memory-design.md @@ -0,0 +1,50 @@ +# User Memory Management Flow (Current) + +## Overview + +Current design for how the user memory system works in Obsidian Copilot, focusing on recent conversation memory only. Long-term memory features like user insights have been removed to simplify the system. + +## Flow Diagram + +```mermaid +graph TD + %% Triggers for Memory Updates + A[Chat Conversation Ends] --> B[updateUserMemory called] + B --> C{Memory Enabled?} + C -->|Yes| D[Extract conversation summary and condensed user messages
Format: timestamp summary||||condensed_user_msg1,condensed_user_msg2,...] + C -->|No| Z[Skip Memory Update] + + %% Recent Memory Only (Simplified) + D --> E[Rolling Buffer Policy - Count Based] + E --> F[Keep last 40 conversations max] + F --> G[Save to recent_conversation_content.md] + G --> H[Memory Update Complete] +``` + +## Key Points + +### Memory Update Triggers: + +- **Trigger**: When a chat conversation ends and `updateUserMemory()` is called +- **Guard**: Only if `enableMemory` setting is on + +### Recent Conversations (Current Implementation): + +- **When**: Updated after every conversation +- **Retention policy**: Fixed rolling buffer - keeps last 40 conversations maximum +- **Content**: Timestamp + brief conversation summary + condensed user message excerpts +- **Format**: `- {timestamp} {summary}||||{condensed_user_msg1},{condensed_user_msg2},...` +- **Storage**: `recent_conversation_content.md` in the configured memory folder + +### Configuration (Current): + +- **`enableMemory`**: Master switch for all memory functionality +- **`memoryFolderName`**: Folder where memory files are stored + +### Removed Features: + +- **Long-term Memory**: User insights, response preferences, and topic highlights have been removed +- **Complex Update Logic**: No more threshold-based updates or insight extraction +- **Multiple Memory Types**: Simplified to only recent conversations + +This simplified design focuses on providing recent conversation context without the complexity of long-term memory management. diff --git a/src/memory/memory-flow-diagram.md b/src/memory/memory-flow-diagram.md deleted file mode 100644 index 5b0b050e0..000000000 --- a/src/memory/memory-flow-diagram.md +++ /dev/null @@ -1,68 +0,0 @@ -# User Memory Management Flow (Planned) - -## Overview - -Planned design for how the user memory system should work in Obsidian Copilot, including triggers, short-term retention policy, and long-term memory types. - -## Flow Diagram - -```mermaid -graph TD - %% Triggers for Memory Updates - A[Chat Conversation Ends] --> B[handleNewChat called] - B --> C{Memory Enabled?} - C -->|Yes| D[Append last conversation to Recent
Format: timestamp topic user_msg1,user_msg2,...] - C -->|No| Z[Skip Memory Update] - - %% Short-term (Recent) Memory - D --> E{Configurable Rolling Window Policy} - E -->|Count based: N items| F[Trim to last N] - E -->|Time based: T days| G[Drop entries older than T] - F --> H[Save to recent_conversations.md] - G --> H[Save to recent_conversations.md] - - %% LTM Update Decision happens AFTER recent update - H --> I{Check Long-Term-Memory Update Needed?} - I -->|Yes| J[Extract & Classify Long-term Signals] - I -->|No| Y[Skip LTM Update] - - %% Long-term (Persistent) Memory Types - J --> K[User Insights
LONG-TERM characteristics about the user] - J --> L["Response Preferences (V2?)"] - J --> M["Topic Highlights (V2?)"] - - K --> K2[Upsert user_insights.md] - L --> L2[Upsert response_preferences.md] - M --> M2[Upsert topic_highlights.md] -``` - -## Key Points - -### Memory Update Triggers: - -- **Trigger**: When a chat conversation ends and `handleNewChat()` is called -- **Guard**: Only if `enableMemory` setting is on - -### Recent Conversations (Short-term): - -- **When**: Updated after every conversation -- **Retention policy**: Rolling window is user-configurable by either count (keep last N items) or time (keep items within T days, e.g., 7 days) -- **Content**: Timestamp + brief summary + user message excerpts -- **Storage**: Example file `recent_conversations.md` - -### Long-term Memory (Persistent): - -- **Update check**: After recent memory is updated, evaluate whether LTM should be updated (e.g., new persistent info detected, thresholds reached) -- **Types**: `user_insights` (facts about the user), `response_preferences` (format/tone/style), `topic_highlights` (recurring themes) -- **Storage**: Separate files, e.g., `user_insights.md`, `response_preferences.md`, `topic_highlights.md` -- **Behavior**: Upsert with deduplication and timestamps; per-type enablement is possible - -### Configuration (proposed): - -- **`enableMemory`**: Master switch -- **`recentWindowType`**: `count` | `time` -- **`recentMaxItems` / `recentMaxAgeDays`**: Applies based on window type -- **`enableLTMemoryTypes`**: Toggle per type `{ user_insights, response_preferences, topic_highlights }` -- **`ltmUpdatePolicy`**: Heuristic or schedule to decide when LTM updates run - -This planned design ensures recent context stays concise and fresh while selectively promoting durable knowledge into well-structured long-term categories. From 6dac3742d30981604f5a31db24e5481609d2bfa9 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Fri, 12 Sep 2025 18:27:56 +0900 Subject: [PATCH 11/32] Add maxRecentConversations setting and update memory formatting --- src/constants.ts | 1 + src/memory/UserMemoryManager.ts | 181 +++++++++++++++--- src/memory/memory-design.md | 14 +- src/settings/model.ts | 12 +- .../v2/components/CopilotPlusSettings.tsx | 35 ++-- 5 files changed, 196 insertions(+), 47 deletions(-) diff --git a/src/constants.ts b/src/constants.ts index a9a687ede..df45d3613 100644 --- a/src/constants.ts +++ b/src/constants.ts @@ -751,6 +751,7 @@ export const DEFAULT_SETTINGS: CopilotSettings = { verbosity: DEFAULT_MODEL_SETTING.VERBOSITY, memoryFolderName: "copilot-memory", enableMemory: false, + maxRecentConversations: 30, }; export const EVENT_NAMES = { diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 00157b207..3f76456a4 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -6,8 +6,6 @@ import { getSettings } from "@/settings/model"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { HumanMessage, SystemMessage } from "@langchain/core/messages"; -const MAX_MEMORY_LINES = 40; - /** * User Memory Management Class * @@ -50,7 +48,7 @@ export class UserMemoryManager { // Only proceed if memory is enabled if (!settings.enableMemory) { - logInfo("[UserMemoryManager] User memory tracking is disabled, skipping analysis"); + logInfo("[UserMemoryManager] Recent history referencing is disabled, skipping analysis"); return; } @@ -108,7 +106,8 @@ CRITICAL RULES: 6. Prioritize Obsidian-specific features (links, tags, graphs, plugins, etc.) # OUTPUT FORMAT -Return only the condensed message as plain text, no quotes or additional formatting.`; +* Return only the condensed message as plain text, no quotes or additional formatting. +* Use the same language as the original message.`; const humanPrompt = ` ${trimmedMessage} @@ -165,7 +164,7 @@ Condense the user message into a single concise sentence while preserving intent let memoryPrompt = ""; if (this.recentConversationsContent) { - memoryPrompt += `\n# Recent Conversation Content\n${this.recentConversationsContent}\n`; + memoryPrompt += `\n${this.recentConversationsContent}\n`; } return memoryPrompt.length > 0 ? memoryPrompt : null; @@ -176,9 +175,9 @@ Condense the user message into a single concise sentence while preserving intent } /** - * Create a conversation line from messages and return it + * Create a conversation section from messages and return it in Markdown format */ - private async createConversationLine( + private async createConversationSection( messages: ChatMessage[], chatModel: BaseChatModel ): Promise { @@ -188,10 +187,21 @@ Condense the user message into a single concise sentence while preserving intent .filter((message) => message.sender === USER_SENDER) .map((message) => { // Use condensed message if available - return message.condensedMessage; + return `- ${message.condensedMessage}`; }); - const content = userMessageTexts.join("||||"); - return `${timestamp} ${conversationSummary}||||${content}`; + + // Generate key conclusion if conversation is substantial enough + const keyConclusion = await this.extractKeyConclusion(messages, chatModel); + + let section = `## ${conversationSummary}\n`; + section += `**Time:** ${timestamp}\n`; + section += `**User Messages:**\n${userMessageTexts.join("\n")}\n`; + + if (keyConclusion) { + section += `**Key Conclusion:** ${keyConclusion}\n`; + } + + return section; } /** @@ -220,8 +230,8 @@ Condense the user message into a single concise sentence while preserving intent } // 1. Always extract and save conversation summary to recent conversations - const conversationLine = await this.createConversationLine(messages, chatModel); - await this.addToMemoryFile(this.getRecentConversationFilePath(), conversationLine); + const conversationSection = await this.createConversationSection(messages, chatModel); + await this.addToMemoryFile(this.getRecentConversationFilePath(), conversationSection); // User insights functionality removed - only maintain recent conversations } catch (error) { @@ -240,50 +250,164 @@ Condense the user message into a single concise sentence while preserving intent const folder = this.app.vault.getAbstractFileByPath(memoryFolderPath); if (!folder) { - await this.app.vault.createFolder(memoryFolderPath); + await this.createFolderRecursively(memoryFolderPath); logInfo(`[UserMemoryManager] Created user memory folder: ${memoryFolderPath}`); } } + /** + * Recursively create folders for the given path + */ + private async createFolderRecursively(folderPath: string): Promise { + const pathParts = folderPath.split("/").filter((part) => part.length > 0); + let currentPath = ""; + + for (const part of pathParts) { + currentPath = currentPath ? `${currentPath}/${part}` : part; + + const exists = this.app.vault.getAbstractFileByPath(currentPath); + if (!exists) { + await this.app.vault.createFolder(currentPath); + } + } + } + private getRecentConversationFilePath(): string { const settings = getSettings(); - return `${settings.memoryFolderName}/recent_conversation_content.md`; + return `${settings.memoryFolderName}/Recent Conversations Content.md`; } // getUserInsightsFilePath removed - user insights functionality removed /** - * Save content to the user memory file by appending new conversation + * Save content to the user memory file by appending new conversation section * Maintains a rolling buffer of conversations by removing the oldest when limit is exceeded */ - private async addToMemoryFile(filePath: string, newContent: string): Promise { - const newConversationLine = `- ${newContent}`; - + private async addToMemoryFile(filePath: string, newConversationSection: string): Promise { const existingFile = this.app.vault.getAbstractFileByPath(filePath); if (existingFile instanceof TFile) { - // Read existing conversation lines, append the new line. - // Make sure the content lines do not exceed 40 lines. If it does, remove the first line. + // Read existing content and parse conversations const fileContent = await this.app.vault.read(existingFile); - const lines = fileContent.split("\n"); - lines.push(newConversationLine); - if (lines.length > MAX_MEMORY_LINES) { - // Remove the first line to keep within the limit - lines.shift(); + let updatedContent: string; + + if (fileContent.trim() === "") { + // Create new file without header + updatedContent = `${newConversationSection}\n`; + } else { + // Parse existing conversations and add new one + const conversations = this.parseExistingConversations(fileContent); + conversations.push(newConversationSection); + + // Keep only the most recent conversations + const settings = getSettings(); + const maxConversations = settings.maxRecentConversations; + if (conversations.length > maxConversations) { + conversations.splice(0, conversations.length - maxConversations); + } + + updatedContent = `${conversations.join("\n")}\n`; } - const updatedContent = lines.join("\n"); await this.app.vault.modify(existingFile, updatedContent); } else { - await this.app.vault.create(filePath, newConversationLine); + // Create new file + const initialContent = `${newConversationSection}\n`; + await this.app.vault.create(filePath, initialContent); + } + } + + /** + * Parse existing conversations from file content + */ + private parseExistingConversations(content: string): string[] { + const conversations: string[] = []; + + // Remove any old header if it exists + const cleanContent = content.replace(/^# Recent Conversations\s*\n\n?/m, "").trim(); + + // Split by ## headings to get individual conversations + const sections = cleanContent.split(/^## /m); + + if (sections.length === 1 && sections[0].trim()) { + // Content doesn't start with ##, but has content + if (sections[0].trim().startsWith("##")) { + conversations.push(sections[0].trim()); + } else { + // Find any ## sections in the content + const matches = cleanContent.match(/^## [\s\S]+?(?=^## |$)/gm); + if (matches) { + conversations.push(...matches.map((match) => match.trim())); + } + } + } else { + for (let i = 1; i < sections.length; i++) { + // Skip the first section (before first ##) + const section = `## ${sections[i]}`.trim(); + if (section.length > 0) { + conversations.push(section); + } + } } + + return conversations; } // shouldUpdateUserInsights removed - user insights functionality removed // extractTimestampFromLine removed - no longer needed without user insights functionality + /** + * Extract key conclusion from conversation if it contains important insights + */ + private async extractKeyConclusion( + messages: ChatMessage[], + chatModel: BaseChatModel + ): Promise { + // Only generate key conclusions for conversations with substantial content + const conversationText = messages.map((msg) => `${msg.sender}: ${msg.message}`).join("\n\n"); + + // Skip if conversation is too short or simple + if (conversationText.length < 300) { + return null; + } + + const systemPrompt = `You are an AI assistant that analyzes conversations and determines if they contain important conclusions worth remembering. + +TASK: Analyze the conversation and extract a key conclusion ONLY if the conversation contains: +- Important insights, decisions, or learnings +- Technical solutions or discoveries +- Significant planning or strategy discussions +- Important facts or knowledge gained + +If the conversation is just casual chat, simple questions, or routine tasks, return "NONE". + +# OUTPUT FORMAT +If there's a key conclusion: Return a concise 1-2 sentence summary of the key insight/conclusion. Use the same language as the conversation. +If no important conclusion: Return exactly "NONE"`; + + const humanPrompt = `Analyze this conversation and determine if there's a key conclusion worth remembering: + +${conversationText}`; + + const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; + + try { + const response = await chatModel.invoke(messages_llm); + const conclusion = response.content.toString().trim(); + + if (conclusion === "NONE" || !conclusion) { + return null; + } + + return conclusion; + } catch (error) { + logError("[UserMemoryManager] Failed to extract key conclusion:", error); + return null; + } + } + /** * Extract conversation summary using LLM */ @@ -300,7 +424,8 @@ CONVERSATION SUMMARY: A very brief summary in 2-5 words maximum Examples: "Travel Plan", "Tokyo Weather" # OUTPUT FORMAT -Return only the brief 2-5 word summary as plain text, no JSON format needed.`; +* Return only the brief 2-5 word summary as plain text, no JSON format needed. +* Use the same language as the conversation.`; const humanPrompt = `Analyze this conversation and extract a brief summary: diff --git a/src/memory/memory-design.md b/src/memory/memory-design.md index f20ef5545..a56715b86 100644 --- a/src/memory/memory-design.md +++ b/src/memory/memory-design.md @@ -11,12 +11,12 @@ graph TD %% Triggers for Memory Updates A[Chat Conversation Ends] --> B[updateUserMemory called] B --> C{Memory Enabled?} - C -->|Yes| D[Extract conversation summary and condensed user messages
Format: timestamp summary||||condensed_user_msg1,condensed_user_msg2,...] + C -->|Yes| D[Extract conversation summary, condensed user messages,
and optional key conclusion for Markdown format] C -->|No| Z[Skip Memory Update] %% Recent Memory Only (Simplified) D --> E[Rolling Buffer Policy - Count Based] - E --> F[Keep last 40 conversations max] + E --> F[Keep last 20 conversations max] F --> G[Save to recent_conversation_content.md] G --> H[Memory Update Complete] ``` @@ -26,19 +26,19 @@ graph TD ### Memory Update Triggers: - **Trigger**: When a chat conversation ends and `updateUserMemory()` is called -- **Guard**: Only if `enableMemory` setting is on +- **Guard**: Only if `enableMemory` setting (Reference Recent History) is on ### Recent Conversations (Current Implementation): - **When**: Updated after every conversation -- **Retention policy**: Fixed rolling buffer - keeps last 40 conversations maximum -- **Content**: Timestamp + brief conversation summary + condensed user message excerpts -- **Format**: `- {timestamp} {summary}||||{condensed_user_msg1},{condensed_user_msg2},...` +- **Retention policy**: Fixed rolling buffer - keeps last 20 conversations maximum +- **Content**: Timestamp + brief conversation summary + condensed user message excerpts + optional key conclusion +- **Format**: Markdown format with `# Recent Conversations` header and `## conversation title` sections containing time, user messages, and optional key conclusions - **Storage**: `recent_conversation_content.md` in the configured memory folder ### Configuration (Current): -- **`enableMemory`**: Master switch for all memory functionality +- **`enableMemory`**: Master switch for all recent history referencing functionality - **`memoryFolderName`**: Folder where memory files are stored ### Removed Features: diff --git a/src/settings/model.ts b/src/settings/model.ts index c778fa53d..ff57668a4 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -133,8 +133,10 @@ export interface CopilotSettings { verbosity: "low" | "medium" | "high"; /** Folder where memory data is stored */ memoryFolderName: string; - /** Enable memory feature to build user memory from conversation history */ + /** Reference recent conversation history to provide more contextually relevant responses */ enableMemory: boolean; + /** Maximum number of recent conversations to remember (10-50) */ + maxRecentConversations: number; } export const settingsStore = createStore(); @@ -317,6 +319,14 @@ export function sanitizeSettings(settings: CopilotSettings): CopilotSettings { sanitizedSettings.enableMemory = DEFAULT_SETTINGS.enableMemory; } + // Ensure maxRecentConversations has a valid value (10-50 range) + const maxRecentConversations = Number(settingsToSanitize.maxRecentConversations); + if (isNaN(maxRecentConversations) || maxRecentConversations < 10 || maxRecentConversations > 50) { + sanitizedSettings.maxRecentConversations = DEFAULT_SETTINGS.maxRecentConversations; + } else { + sanitizedSettings.maxRecentConversations = maxRecentConversations; + } + return sanitizedSettings; } diff --git a/src/settings/v2/components/CopilotPlusSettings.tsx b/src/settings/v2/components/CopilotPlusSettings.tsx index b230cd228..40b28e56c 100644 --- a/src/settings/v2/components/CopilotPlusSettings.tsx +++ b/src/settings/v2/components/CopilotPlusSettings.tsx @@ -118,16 +118,6 @@ export const CopilotPlusSettings: React.FC = () => {
Memory
- { - updateSetting("enableMemory", checked); - }} - /> - { onChange={(value) => { updateSetting("memoryFolderName", value); }} - placeholder="copilot-memory" + placeholder="copilot/memory" + /> + + { + updateSetting("enableMemory", checked); + }} /> + {settings.enableMemory && ( + updateSetting("maxRecentConversations", value)} + /> + )} +
Autocomplete
Date: Sat, 13 Sep 2025 09:09:45 +0900 Subject: [PATCH 12/32] Support multiple conclusions and add unit tests --- src/constants.ts | 2 +- src/memory/UserMemoryManager.test.ts | 294 +++++++++++++++++++++++++++ src/memory/UserMemoryManager.ts | 61 +++--- src/memory/memory-design.md | 63 ++++-- 4 files changed, 373 insertions(+), 47 deletions(-) create mode 100644 src/memory/UserMemoryManager.test.ts diff --git a/src/constants.ts b/src/constants.ts index df45d3613..e3d4ccbd7 100644 --- a/src/constants.ts +++ b/src/constants.ts @@ -749,7 +749,7 @@ export const DEFAULT_SETTINGS: CopilotSettings = { ], reasoningEffort: DEFAULT_MODEL_SETTING.REASONING_EFFORT, verbosity: DEFAULT_MODEL_SETTING.VERBOSITY, - memoryFolderName: "copilot-memory", + memoryFolderName: "copilot/memory", enableMemory: false, maxRecentConversations: 30, }; diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts new file mode 100644 index 000000000..81ddd9bda --- /dev/null +++ b/src/memory/UserMemoryManager.test.ts @@ -0,0 +1,294 @@ +// Mock dependencies first to avoid circular dependencies +jest.mock("@/logger", () => ({ + logInfo: jest.fn(), + logError: jest.fn(), +})); + +jest.mock("@/settings/model", () => ({ + getSettings: jest.fn(), +})); + +jest.mock("@/constants", () => ({ + USER_SENDER: "user", +})); + +import { UserMemoryManager } from "./UserMemoryManager"; +import { App, TFile, Vault } from "obsidian"; +import { ChatMessage } from "@/types/message"; +import { logInfo, logError } from "@/logger"; +import { getSettings } from "@/settings/model"; +import { USER_SENDER } from "@/constants"; +import { BaseChatModel } from "@langchain/core/language_models/chat_models"; +import { AIMessageChunk } from "@langchain/core/messages"; + +// Helper to create TFile mock instances +const createMockTFile = (path: string): TFile => { + const file = Object.create(TFile.prototype); + file.path = path; + file.name = path.split("/").pop() || ""; + file.basename = file.name.replace(/\.[^/.]+$/, ""); + file.extension = path.split(".").pop() || ""; + return file; +}; + +describe("UserMemoryManager", () => { + let userMemoryManager: UserMemoryManager; + let mockApp: jest.Mocked; + let mockVault: jest.Mocked; + let mockChatModel: jest.Mocked; + let mockSettings: any; + + beforeEach(() => { + jest.clearAllMocks(); + + // Mock settings + mockSettings = { + enableMemory: true, + memoryFolderName: "copilot/memory", + maxRecentConversations: 30, + }; + (getSettings as jest.Mock).mockReturnValue(mockSettings); + + // Mock vault + mockVault = { + getAbstractFileByPath: jest.fn(), + read: jest.fn(), + modify: jest.fn(), + create: jest.fn(), + createFolder: jest.fn(), + } as any; + + // Mock app + mockApp = { + vault: mockVault, + } as any; + + // Mock chat model + mockChatModel = { + invoke: jest.fn(), + } as any; + + userMemoryManager = new UserMemoryManager(mockApp); + }); + + describe("updateUserMemory", () => { + const createMockMessage = ( + id: string, + message: string, + sender: string = USER_SENDER + ): ChatMessage => ({ + id, + message, + sender, + timestamp: null, + isVisible: true, + condensedMessage: `Condensed: ${message}`, + }); + + it("should skip memory update when memory is disabled", () => { + mockSettings.enableMemory = false; + const messages = [createMockMessage("1", "test message")]; + + userMemoryManager.updateUserMemory(messages, mockChatModel); + + expect(logInfo).toHaveBeenCalledWith( + "[UserMemoryManager] Recent history referencing is disabled, skipping analysis" + ); + }); + + it("should skip memory update when no messages provided", () => { + userMemoryManager.updateUserMemory([], mockChatModel); + + expect(logInfo).toHaveBeenCalledWith( + "[UserMemoryManager] No messages to analyze for user memory" + ); + }); + + it("should create nested memory folders recursively", async () => { + const messages = [createMockMessage("1", "test user message")]; + + // Set nested folder path in settings + mockSettings.memoryFolderName = "deep/nested/memory/folder"; + + // Mock folders don't exist + mockVault.getAbstractFileByPath.mockReturnValue(null); + + // Mock LLM responses + const mockResponse1 = new AIMessageChunk({ content: "Test Conversation Title" }); + const mockResponse2 = new AIMessageChunk({ content: "NONE" }); + mockChatModel.invoke.mockResolvedValueOnce(mockResponse1); + mockChatModel.invoke.mockResolvedValueOnce(mockResponse2); + + userMemoryManager.updateUserMemory(messages, mockChatModel); + + // Wait for async operation to complete + await new Promise((resolve) => setTimeout(resolve, 50)); + + // Verify all nested folders were created + expect(mockVault.createFolder).toHaveBeenCalledWith("deep"); + expect(mockVault.createFolder).toHaveBeenCalledWith("deep/nested"); + expect(mockVault.createFolder).toHaveBeenCalledWith("deep/nested/memory"); + expect(mockVault.createFolder).toHaveBeenCalledWith("deep/nested/memory/folder"); + + // Verify file creation in nested path + expect(mockVault.create).toHaveBeenCalledWith( + "deep/nested/memory/folder/Recent Conversations.md", + expect.stringContaining("## Test Conversation Title") + ); + }); + + it("should complete end-to-end memory update with existing file", async () => { + // Setup: Create test messages simulating a real conversation with enough content for key conclusions + const messages = [ + createMockMessage( + "1", + "How do I create a daily note template in Obsidian with automatic date formatting? I want to have a template that automatically inserts today's date and creates sections for tasks, notes, and reflections." + ), + createMockMessage( + "2", + "I can help you create a daily note template with automatic date formatting. Here's how you can set this up: First, create a template file in your templates folder with variables like {{date}} for automatic date insertion. You can use format strings to customize the date display. For the sections, you can create headers for Tasks, Notes, and Reflections that will be included every time you create a new daily note.", + "ai" + ), + createMockMessage( + "3", + "That's perfect! Can you also show me how to add tags automatically to these daily notes? I'd like them to be tagged with #daily-note and maybe the current month." + ), + createMockMessage( + "4", + "Certainly! You can add automatic tags to your template by including tag syntax directly in the template file. Add #daily-note and #{{date:MMMM}} to automatically tag with the current month. This way every daily note will be consistently tagged and easy to find later.", + "ai" + ), + ]; + + // Mock existing memory file with previous conversations + const existingMemoryContent = `## Previous Conversation +**Time:** 2024-01-01T09:00:00Z +**User Messages:** +- Asked about plugin installation +**Key Conclusions:** +- Plugins enhance Obsidian functionality + +## Another Conversation +**Time:** 2024-01-01T10:00:00Z +**User Messages:** +- Inquired about linking notes +**Key Conclusions:** +- Backlinks create knowledge connections +`; + + const mockMemoryFile = createMockTFile("copilot/memory/Recent Conversations.md"); + + // Mock vault responses for folder and file existence + const mockFolder = { path: "copilot/memory", name: "memory" } as any; + mockVault.getAbstractFileByPath + .mockReturnValueOnce(mockFolder) // Folder exists + .mockReturnValueOnce(mockMemoryFile); // File exists + + // Mock reading existing file content + mockVault.read.mockResolvedValue(existingMemoryContent); + + // Mock LLM responses for conversation processing + const mockTitleResponse = new AIMessageChunk({ content: "Daily Note Template Setup" }); + const mockConclusionResponse = new AIMessageChunk({ + content: + "- Templates can automatically insert dates and metadata\n- Tags can be added through template variables", + }); + mockChatModel.invoke + .mockResolvedValueOnce(mockTitleResponse) + .mockResolvedValueOnce(mockConclusionResponse); + + // Execute the updateUserMemory function + userMemoryManager.updateUserMemory(messages, mockChatModel); + + // Wait for async operation to complete + await new Promise((resolve) => setTimeout(resolve, 100)); + + // Verify the end result: file was modified with new conversation + const modifyCall = mockVault.modify.mock.calls[0]; + const actualContent = modifyCall[1]; + + // Verify that the content includes all previous conversations plus the new one + expect(actualContent).toContain("## Previous Conversation"); + expect(actualContent).toContain("## Another Conversation"); + expect(actualContent).toContain("## Daily Note Template Setup"); + + // Verify the new conversation structure + expect(actualContent).toMatch(/\*\*Time:\*\* \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/); + expect(actualContent).toContain("**User Messages:**"); + expect(actualContent).toContain("- Condensed: How do I create a daily note template"); + expect(actualContent).toContain( + "- Condensed: That's perfect! Can you also show me how to add tags" + ); + + // Since we provided a detailed conversation, key conclusions should be included + expect(actualContent).toContain("**Key Conclusions:**"); + expect(actualContent).toContain("- Templates can automatically insert dates and metadata"); + expect(actualContent).toContain("- Tags can be added through template variables"); + + // Verify that the conversation title and key conclusions were extracted via LLM + expect(mockChatModel.invoke).toHaveBeenCalledTimes(2); + + // Verify title extraction call + expect(mockChatModel.invoke).toHaveBeenNthCalledWith( + 1, + expect.arrayContaining([ + expect.objectContaining({ + content: expect.stringContaining("Generate a title for the conversation"), + }), + ]) + ); + + // Verify key conclusions extraction call + expect(mockChatModel.invoke).toHaveBeenNthCalledWith( + 2, + expect.arrayContaining([ + expect.objectContaining({ + content: expect.stringContaining("extract key conclusions"), + }), + ]) + ); + + // Verify no folder creation was needed since folder already exists + expect(mockVault.createFolder).not.toHaveBeenCalled(); + + // Verify no new file creation was needed since file already exists + expect(mockVault.create).not.toHaveBeenCalled(); + }); + }); + + describe("getUserMemoryPrompt", () => { + it("should return memory prompt when recent conversations exist", async () => { + const mockFile = createMockTFile("copilot/memory/Recent Conversations.md"); + const mockContent = "## Test Conversation\n**Time:** 2024-01-01T10:00:00Z\n"; + + mockVault.getAbstractFileByPath.mockReturnValue(mockFile); + mockVault.read.mockResolvedValue(mockContent); + + const result = await userMemoryManager.getUserMemoryPrompt(); + + expect(result).toBe(`\n${mockContent}\n`); + }); + + it("should return null when no memory content exists", async () => { + mockVault.getAbstractFileByPath.mockReturnValue(null); + + const result = await userMemoryManager.getUserMemoryPrompt(); + + expect(result).toBeNull(); + }); + + it("should handle errors and return null", async () => { + const mockFile = createMockTFile("copilot/memory/Recent Conversations.md"); + mockVault.getAbstractFileByPath.mockReturnValue(mockFile); + mockVault.read.mockRejectedValue(new Error("Read error")); + + const result = await userMemoryManager.getUserMemoryPrompt(); + + expect(result).toBeNull(); + expect(logError).toHaveBeenCalledWith( + "[UserMemoryManager] Error reading recent conversations file:", + expect.any(Error) + ); + }); + }); +}); diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 3f76456a4..d014960fc 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -25,19 +25,19 @@ export class UserMemoryManager { /** * Load memory data from files into class fields */ - async loadMemory(): Promise { + private async loadMemory(): Promise { try { const recentConversationsFile = this.app.vault.getAbstractFileByPath( this.getRecentConversationFilePath() ); if (recentConversationsFile instanceof TFile) { this.recentConversationsContent = await this.app.vault.read(recentConversationsFile); + } else { + logInfo("[UserMemoryManager] Recent Conversations file not found, skipping memory load"); } } catch (error) { logError("[UserMemoryManager] Error reading recent conversations file:", error); } - - // User insights functionality removed - focusing only on recent memory } /** @@ -181,7 +181,7 @@ Condense the user message into a single concise sentence while preserving intent messages: ChatMessage[], chatModel: BaseChatModel ): Promise { - const conversationSummary = await this.extractConversationSummary(messages, chatModel); + const conversationTitle = await this.extractConversationTitle(messages, chatModel); const timestamp = new Date().toISOString().split(".")[0] + "Z"; // Remove milliseconds but keep Z for UTC const userMessageTexts = messages .filter((message) => message.sender === USER_SENDER) @@ -190,15 +190,15 @@ Condense the user message into a single concise sentence while preserving intent return `- ${message.condensedMessage}`; }); - // Generate key conclusion if conversation is substantial enough - const keyConclusion = await this.extractKeyConclusion(messages, chatModel); + // Generate key conclusions if conversation is substantial enough + const keyConclusions = await this.extractKeyConclusion(messages, chatModel); - let section = `## ${conversationSummary}\n`; + let section = `## ${conversationTitle}\n`; section += `**Time:** ${timestamp}\n`; - section += `**User Messages:**\n${userMessageTexts.join("\n")}\n`; + section += `**User Messages:**\n${userMessageTexts.join("\n - ")}\n`; - if (keyConclusion) { - section += `**Key Conclusion:** ${keyConclusion}\n`; + if (keyConclusions) { + section += `**Key Conclusions:**\n${keyConclusions}\n`; } return section; @@ -274,7 +274,7 @@ Condense the user message into a single concise sentence while preserving intent private getRecentConversationFilePath(): string { const settings = getSettings(); - return `${settings.memoryFolderName}/Recent Conversations Content.md`; + return `${settings.memoryFolderName}/Recent Conversations.md`; } // getUserInsightsFilePath removed - user insights functionality removed @@ -354,12 +354,8 @@ Condense the user message into a single concise sentence while preserving intent return conversations; } - // shouldUpdateUserInsights removed - user insights functionality removed - - // extractTimestampFromLine removed - no longer needed without user insights functionality - /** - * Extract key conclusion from conversation if it contains important insights + * Extract key conclusions from conversation if it contains important insights */ private async extractKeyConclusion( messages: ChatMessage[], @@ -375,7 +371,7 @@ Condense the user message into a single concise sentence while preserving intent const systemPrompt = `You are an AI assistant that analyzes conversations and determines if they contain important conclusions worth remembering. -TASK: Analyze the conversation and extract a key conclusion ONLY if the conversation contains: +TASK: Analyze the conversation and extract key conclusions ONLY if the conversation contains: - Important insights, decisions, or learnings - Technical solutions or discoveries - Significant planning or strategy discussions @@ -384,10 +380,15 @@ TASK: Analyze the conversation and extract a key conclusion ONLY if the conversa If the conversation is just casual chat, simple questions, or routine tasks, return "NONE". # OUTPUT FORMAT -If there's a key conclusion: Return a concise 1-2 sentence summary of the key insight/conclusion. Use the same language as the conversation. -If no important conclusion: Return exactly "NONE"`; +If there are key conclusions: Return each conclusion as a bullet point (use - for each point). Each conclusion should be concise (1-2 sentences). Use the same language as the conversation. +Example: +- First important insight or decision +- Second key learning or solution +- Third significant conclusion - const humanPrompt = `Analyze this conversation and determine if there's a key conclusion worth remembering: +If no important conclusions: Return exactly "NONE"`; + + const humanPrompt = `Analyze this conversation and determine if there are key conclusions worth remembering: ${conversationText}`; @@ -409,27 +410,29 @@ ${conversationText}`; } /** - * Extract conversation summary using LLM + * Extract conversation title using LLM */ - private async extractConversationSummary( + private async extractConversationTitle( messages: ChatMessage[], chatModel: BaseChatModel ): Promise { const conversationText = messages.map((msg) => `${msg.sender}: ${msg.message}`).join("\n\n"); - const systemPrompt = `You are an AI assistant that analyzes conversations and extracts a brief summary. - -CONVERSATION SUMMARY: A very brief summary in 2-5 words maximum + const systemPrompt = `Your task is to generate a title for a conversation based on its content. Examples: "Travel Plan", "Tokyo Weather" -# OUTPUT FORMAT -* Return only the brief 2-5 word summary as plain text, no JSON format needed. +# OUTPUT RULES +* Look at the conversation content and generate a title that captures the main *user intent* of the conversation. +* Return only the brief 2-8 word title as plain text, no JSON format needed. * Use the same language as the conversation.`; - const humanPrompt = `Analyze this conversation and extract a brief summary: + const humanPrompt = ` + +${conversationText} + -${conversationText}`; +Generate a title for the conversation:`; const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; diff --git a/src/memory/memory-design.md b/src/memory/memory-design.md index a56715b86..811dc65a1 100644 --- a/src/memory/memory-design.md +++ b/src/memory/memory-design.md @@ -11,14 +11,21 @@ graph TD %% Triggers for Memory Updates A[Chat Conversation Ends] --> B[updateUserMemory called] B --> C{Memory Enabled?} - C -->|Yes| D[Extract conversation summary, condensed user messages,
and optional key conclusion for Markdown format] + C -->|Yes| D[Process Messages for Memory Storage] C -->|No| Z[Skip Memory Update] - %% Recent Memory Only (Simplified) - D --> E[Rolling Buffer Policy - Count Based] - E --> F[Keep last 20 conversations max] - F --> G[Save to recent_conversation_content.md] - G --> H[Memory Update Complete] + %% Message Processing + D --> E[Extract Conversation Title using LLM] + E --> F[Extract Key Conclusions if substantial content] + F --> G[Create Conversation Section in Markdown using existing condensed messages] + + %% Storage and Rolling Buffer + H --> I[Load Existing Conversations from File] + I --> J[Add New Conversation Section] + J --> K[Apply Rolling Buffer Policy] + K --> L[Keep last maxRecentConversations] + L --> M[Save to Recent Conversations.md] + M --> N[Memory Update Complete] ``` ## Key Points @@ -26,25 +33,47 @@ graph TD ### Memory Update Triggers: - **Trigger**: When a chat conversation ends and `updateUserMemory()` is called -- **Guard**: Only if `enableMemory` setting (Reference Recent History) is on +- **Guard**: Only if `enableMemory` setting (Reference Recent History) is enabled +- **Fire-and-forget**: Runs asynchronously in background without blocking execution +- **Race condition protection**: Prevents multiple simultaneous memory updates ### Recent Conversations (Current Implementation): - **When**: Updated after every conversation -- **Retention policy**: Fixed rolling buffer - keeps last 20 conversations maximum -- **Content**: Timestamp + brief conversation summary + condensed user message excerpts + optional key conclusion -- **Format**: Markdown format with `# Recent Conversations` header and `## conversation title` sections containing time, user messages, and optional key conclusions -- **Storage**: `recent_conversation_content.md` in the configured memory folder +- **Retention policy**: Configurable rolling buffer - keeps last `maxRecentConversations` (default: 30, range: 10-50) +- **Content**: + - Timestamp (ISO format with UTC) + - LLM-generated conversation title (2-8 words) + - Condensed user messages (AI-generated one-line summaries created during conversation) + - Optional key conclusions (only for substantial conversations >300 chars) +- **Format**: Markdown format with `## conversation title` sections containing structured data +- **Storage**: `Recent Conversations.md` in the configured memory folder +- **File handling**: Creates file if doesn't exist, parses existing conversations to maintain rolling buffer + +### Message Processing Features: + +- **Condensed Messages**: AI-generated one-line summaries of user messages created during conversation (not during memory update) that preserve intent and important details +- **Conversation Titles**: LLM-extracted titles that capture main user intent +- **Key Conclusions**: Only generated for conversations with substantial content (>300 chars) containing insights, decisions, or learnings +- **Obsidian-optimized**: Special handling for note names, tags, links, and Obsidian-specific features ### Configuration (Current): - **`enableMemory`**: Master switch for all recent history referencing functionality -- **`memoryFolderName`**: Folder where memory files are stored +- **`memoryFolderName`**: Folder where memory files are stored (creates recursively if needed) +- **`maxRecentConversations`**: Number of conversations to keep (10-50 range, default: 30) + +### Memory Retrieval: + +- **`getUserMemoryPrompt()`**: Loads and returns Recent Conversations for LLM context +- **`loadMemory()`**: Loads memory data from files into class fields +- **Automatic folder creation**: Ensures memory folder exists before operations -### Removed Features: +### Error Handling: -- **Long-term Memory**: User insights, response preferences, and topic highlights have been removed -- **Complex Update Logic**: No more threshold-based updates or insight extraction -- **Multiple Memory Types**: Simplified to only recent conversations +- Comprehensive error logging for all operations +- Fallback mechanisms for AI processing failures +- Graceful handling of missing files and folders +- Validation of AI-generated content (e.g., ensures condensed messages are actually shorter) -This simplified design focuses on providing recent conversation context without the complexity of long-term memory management. +This simplified design focuses on providing recent conversation context without the complexity of long-term memory management, while maintaining robust AI-powered content processing and configurable retention policies. From 0a6b6fa2a0f9afa062283468d1eae217b89a896c Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sat, 13 Sep 2025 10:27:28 +0900 Subject: [PATCH 13/32] Fix user message formatting --- src/memory/UserMemoryManager.test.ts | 69 +++++++++++++++++++++------- src/memory/UserMemoryManager.ts | 8 ++-- 2 files changed, 57 insertions(+), 20 deletions(-) diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index 81ddd9bda..8c604e54b 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -207,23 +207,60 @@ describe("UserMemoryManager", () => { const modifyCall = mockVault.modify.mock.calls[0]; const actualContent = modifyCall[1]; - // Verify that the content includes all previous conversations plus the new one - expect(actualContent).toContain("## Previous Conversation"); - expect(actualContent).toContain("## Another Conversation"); - expect(actualContent).toContain("## Daily Note Template Setup"); - - // Verify the new conversation structure - expect(actualContent).toMatch(/\*\*Time:\*\* \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/); - expect(actualContent).toContain("**User Messages:**"); - expect(actualContent).toContain("- Condensed: How do I create a daily note template"); - expect(actualContent).toContain( - "- Condensed: That's perfect! Can you also show me how to add tags" - ); + // Check the full memory content structure as a whole - exact line-by-line verification + const expectedContentStructure = [ + // Previous conversations should be preserved (no empty lines between conversations) + "## Previous Conversation", + "**Time:** 2024-01-01T09:00:00Z", + "**User Messages:**", + "- Asked about plugin installation", + "**Key Conclusions:**", + "- Plugins enhance Obsidian functionality", + "## Another Conversation", + "**Time:** 2024-01-01T10:00:00Z", + "**User Messages:**", + "- Inquired about linking notes", + "**Key Conclusions:**", + "- Backlinks create knowledge connections", + // New conversation should be added + "## Daily Note Template Setup", + // Dynamic timestamp pattern + /\*\*Time:\*\* \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/, + "**User Messages:**", + "- Condensed: How do I create a daily note template in Obsidian with automatic date formatting? I want to have a template that automatically inserts today's date and creates sections for tasks, notes, and reflections.", + "- Condensed: That's perfect! Can you also show me how to add tags automatically to these daily notes? I'd like them to be tagged with #daily-note and maybe the current month.", + "**Key Conclusions:**", + "- Templates can automatically insert dates and metadata", + "- Tags can be added through template variables", + "", // Empty line at end + "", // Second empty line at end + ]; - // Since we provided a detailed conversation, key conclusions should be included - expect(actualContent).toContain("**Key Conclusions:**"); - expect(actualContent).toContain("- Templates can automatically insert dates and metadata"); - expect(actualContent).toContain("- Tags can be added through template variables"); + // Verify the complete content structure line by line + const contentLines = actualContent.split("\n"); + + // Verify we have the expected number of lines + expect(contentLines).toHaveLength(expectedContentStructure.length); + + // Verify each line matches the expected structure + for (let i = 0; i < expectedContentStructure.length; i++) { + const expectedItem = expectedContentStructure[i]; + const actualLine = contentLines[i]; + + if (expectedItem instanceof RegExp) { + // Handle regex patterns for dynamic content like timestamps + expect(actualLine).toMatch(expectedItem); + } else { + // Handle exact string matches + expect(actualLine).toBe(expectedItem); + } + } + + // Verify all conversations have the required sections using pattern matching + expect(actualContent.match(/## [^#\n]+/g)).toHaveLength(3); // 3 conversations + expect(actualContent.match(/\*\*Time:\*\*/g)).toHaveLength(3); // Each has a timestamp + expect(actualContent.match(/\*\*User Messages:\*\*/g)).toHaveLength(3); // Each has user messages + expect(actualContent.match(/\*\*Key Conclusions:\*\*/g)).toHaveLength(3); // Each has key conclusions // Verify that the conversation title and key conclusions were extracted via LLM expect(mockChatModel.invoke).toHaveBeenCalledTimes(2); diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index d014960fc..c3a4536dc 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -191,14 +191,14 @@ Condense the user message into a single concise sentence while preserving intent }); // Generate key conclusions if conversation is substantial enough - const keyConclusions = await this.extractKeyConclusion(messages, chatModel); + const keyConclusionsText = await this.extractKeyConclusion(messages, chatModel); let section = `## ${conversationTitle}\n`; section += `**Time:** ${timestamp}\n`; - section += `**User Messages:**\n${userMessageTexts.join("\n - ")}\n`; + section += `**User Messages:**\n${userMessageTexts.join("\n")}\n`; - if (keyConclusions) { - section += `**Key Conclusions:**\n${keyConclusions}\n`; + if (keyConclusionsText) { + section += `**Key Conclusions:**\n${keyConclusionsText}\n`; } return section; From 24bf217b1c46d1814b85d6525b4e142da36a6904 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sat, 13 Sep 2025 16:59:50 +0900 Subject: [PATCH 14/32] Fix PR issues --- .../chainRunner/AutonomousAgentChainRunner.ts | 1 - src/core/ChatManager.ts | 67 +++++++++++-------- src/memory/UserMemoryManager.ts | 14 +++- src/settings/model.ts | 6 ++ 4 files changed, 57 insertions(+), 31 deletions(-) diff --git a/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts b/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts index 6e09b4916..b2462a292 100644 --- a/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts +++ b/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts @@ -151,7 +151,6 @@ ${params} // Build initial conversation messages const customSystemPrompt = await this.generateSystemPrompt(); - console.log("customSystemPrompt", customSystemPrompt); const chatModel = this.chainManager.chatModelManager.getChatModel(); const adapter = ModelAdapterFactory.createAdapter(chatModel); diff --git a/src/core/ChatManager.ts b/src/core/ChatManager.ts index e612ad9fa..733d5fef9 100644 --- a/src/core/ChatManager.ts +++ b/src/core/ChatManager.ts @@ -141,35 +141,7 @@ export class ChatManager { currentRepo.updateProcessedText(messageId, processedContent); // Create condensed message for user messages - if (message.sender === USER_SENDER && this.plugin.userMemoryManager) { - try { - const settings = getSettings(); - if (settings.enableMemory) { - const chainManager = this.plugin.projectManager.getCurrentChainManager(); - const chatModel = chainManager.chatModelManager.getChatModel(); - - // Create condensed message asynchronously (fire and forget) - this.plugin.userMemoryManager - .createCondensedMessage(displayText, chatModel) - .then((condensedMessage) => { - if (condensedMessage) { - currentRepo.updateCondensedMessage(messageId, condensedMessage); - logInfo( - `[ChatManager] Created condensed message for ${messageId}: "${condensedMessage}"` - ); - } - }) - .catch((error) => { - logInfo( - `[ChatManager] Failed to create condensed message for ${messageId}:`, - error - ); - }); - } - } catch (error) { - logInfo(`[ChatManager] Error setting up condensed message creation:`, error); - } - } + this.createCondensedMessageAsync(message, messageId, displayText, currentRepo); logInfo(`[ChatManager] Successfully sent message ${messageId}`); return messageId; @@ -395,6 +367,43 @@ export class ChatManager { return currentRepo.getLLMMessage(id); } + /** + * Create condensed message asynchronously for user messages (fire and forget) + */ + private createCondensedMessageAsync( + message: ChatMessage, + messageId: string, + displayText: string, + currentRepo: MessageRepository + ): void { + if (message.sender === USER_SENDER && this.plugin.userMemoryManager) { + try { + const settings = getSettings(); + if (settings.enableMemory) { + const chainManager = this.plugin.projectManager.getCurrentChainManager(); + const chatModel = chainManager.chatModelManager.getChatModel(); + + // Create condensed message asynchronously (fire and forget) + this.plugin.userMemoryManager + .createCondensedMessage(displayText, chatModel) + .then((condensedMessage) => { + if (condensedMessage) { + currentRepo.updateCondensedMessage(messageId, condensedMessage); + logInfo( + `[ChatManager] Created condensed message for ${messageId}: "${condensedMessage}"` + ); + } + }) + .catch((error) => { + logInfo(`[ChatManager] Failed to create condensed message for ${messageId}:`, error); + }); + } + } catch (error) { + logInfo(`[ChatManager] Error setting up condensed message creation:`, error); + } + } + } + /** * Update chain memory with current LLM messages */ diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index c3a4536dc..48d9782a2 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -22,6 +22,18 @@ export class UserMemoryManager { this.app = app; } + /** + * Check if a message is a user message with valid condensed content + */ + private hasValidCondensedUserMessage(message: ChatMessage): boolean { + return ( + message.sender === USER_SENDER && + !!message.condensedMessage && + typeof message.condensedMessage === "string" && + message.condensedMessage.trim().length > 0 + ); + } + /** * Load memory data from files into class fields */ @@ -184,7 +196,7 @@ Condense the user message into a single concise sentence while preserving intent const conversationTitle = await this.extractConversationTitle(messages, chatModel); const timestamp = new Date().toISOString().split(".")[0] + "Z"; // Remove milliseconds but keep Z for UTC const userMessageTexts = messages - .filter((message) => message.sender === USER_SENDER) + .filter(this.hasValidCondensedUserMessage.bind(this)) .map((message) => { // Use condensed message if available return `- ${message.condensedMessage}`; diff --git a/src/settings/model.ts b/src/settings/model.ts index 065d97275..30a80d2af 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -371,6 +371,12 @@ export async function getSystemPromptWithMemory( return getSystemPrompt(); } const memoryPrompt = await userMemoryManager.getUserMemoryPrompt(); + + // Only include user_memory section if there's actual memory content + if (!memoryPrompt) { + return systemPrompt; + } + return `${systemPrompt} ${memoryPrompt} From 6c078808dbbcf8cc3b9e6a9bce8ab7f31e1dc8f6 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sat, 13 Sep 2025 22:31:47 +0900 Subject: [PATCH 15/32] Refactor UserMemoryManager to utilize utility function for folder creation --- src/memory/UserMemoryManager.ts | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 48d9782a2..b02c478f0 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -3,6 +3,7 @@ import { ChatMessage } from "@/types/message"; import { logInfo, logError } from "@/logger"; import { USER_SENDER } from "@/constants"; import { getSettings } from "@/settings/model"; +import { ensureFolderExists } from "@/utils"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { HumanMessage, SystemMessage } from "@langchain/core/messages"; @@ -260,28 +261,7 @@ Condense the user message into a single concise sentence while preserving intent const settings = getSettings(); const memoryFolderPath = settings.memoryFolderName; - const folder = this.app.vault.getAbstractFileByPath(memoryFolderPath); - if (!folder) { - await this.createFolderRecursively(memoryFolderPath); - logInfo(`[UserMemoryManager] Created user memory folder: ${memoryFolderPath}`); - } - } - - /** - * Recursively create folders for the given path - */ - private async createFolderRecursively(folderPath: string): Promise { - const pathParts = folderPath.split("/").filter((part) => part.length > 0); - let currentPath = ""; - - for (const part of pathParts) { - currentPath = currentPath ? `${currentPath}/${part}` : part; - - const exists = this.app.vault.getAbstractFileByPath(currentPath); - if (!exists) { - await this.app.vault.createFolder(currentPath); - } - } + await ensureFolderExists(memoryFolderPath); } private getRecentConversationFilePath(): string { @@ -289,8 +269,6 @@ Condense the user message into a single concise sentence while preserving intent return `${settings.memoryFolderName}/Recent Conversations.md`; } - // getUserInsightsFilePath removed - user insights functionality removed - /** * Save content to the user memory file by appending new conversation section * Maintains a rolling buffer of conversations by removing the oldest when limit is exceeded From addbefdaa000711a5b7c5dbc94b667b79a71b01e Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sat, 13 Sep 2025 22:33:56 +0900 Subject: [PATCH 16/32] Use logError for error logging --- src/core/ChatManager.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/ChatManager.ts b/src/core/ChatManager.ts index 733d5fef9..b98e2026f 100644 --- a/src/core/ChatManager.ts +++ b/src/core/ChatManager.ts @@ -1,6 +1,6 @@ import { getSettings } from "@/settings/model"; import { ChainType } from "@/chainFactory"; -import { logInfo } from "@/logger"; +import { logError, logInfo } from "@/logger"; import { ChatMessage, MessageContext } from "@/types/message"; import { FileParserManager } from "@/tools/FileParserManager"; import ChainManager from "@/LLMProviders/chainManager"; @@ -395,11 +395,11 @@ export class ChatManager { } }) .catch((error) => { - logInfo(`[ChatManager] Failed to create condensed message for ${messageId}:`, error); + logError(`[ChatManager] Failed to create condensed message for ${messageId}:`, error); }); } } catch (error) { - logInfo(`[ChatManager] Error setting up condensed message creation:`, error); + logError(`[ChatManager] Error setting up condensed message creation:`, error); } } } From 06b635cdb24ce55c47d62b50c8e62e8cc09df302 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 17:25:11 +0900 Subject: [PATCH 17/32] Improve documentation and remove unused code --- src/LLMProviders/chainManager.ts | 9 +++------ src/core/MessageRepository.ts | 14 +++++++------- src/memory/UserMemoryManager.test.ts | 2 +- src/memory/UserMemoryManager.ts | 8 ++++---- src/settings/model.ts | 6 +++--- src/types/message.ts | 12 +++++++++--- 6 files changed, 27 insertions(+), 24 deletions(-) diff --git a/src/LLMProviders/chainManager.ts b/src/LLMProviders/chainManager.ts index 257416e4c..003c682e2 100644 --- a/src/LLMProviders/chainManager.ts +++ b/src/LLMProviders/chainManager.ts @@ -29,7 +29,6 @@ import { App, Notice } from "obsidian"; import ChatModelManager from "./chatModelManager"; import MemoryManager from "./memoryManager"; import PromptManager from "./promptManager"; -import CopilotPlugin from "@/main"; export default class ChainManager { // TODO: These chains are deprecated since we now use direct chat model calls in chain runners @@ -46,12 +45,10 @@ export default class ChainManager { public chatModelManager: ChatModelManager; public memoryManager: MemoryManager; public promptManager: PromptManager; - public plugin?: CopilotPlugin; - constructor(app: App, plugin?: CopilotPlugin) { + constructor(app: App) { // Instantiate singletons this.app = app; - this.plugin = plugin; this.memoryManager = MemoryManager.getInstance(); this.chatModelManager = ChatModelManager.getInstance(); this.promptManager = PromptManager.getInstance(); @@ -324,9 +321,9 @@ export default class ChainManager { updateLoading?: (loading: boolean) => void; } = {} ) { - const { ignoreSystemMessage = false } = options; + const { debug = false, ignoreSystemMessage = false } = options; - logInfo("Step 0: Initial user message:\n", userMessage); + if (debug) console.log("==== Step 0: Initial user message ====\n", userMessage); this.validateChatModel(); this.validateChainInitialization(); diff --git a/src/core/MessageRepository.ts b/src/core/MessageRepository.ts index 0c13b4767..a66f48965 100644 --- a/src/core/MessageRepository.ts +++ b/src/core/MessageRepository.ts @@ -88,7 +88,7 @@ export class MessageRepository { id, displayText: message.message, processedText: message.originalMessage || message.message, - condensedMessage: message.condensedMessage, + condensedUserMessage: message.condensedUserMessage, sender: message.sender, timestamp, context: message.context, @@ -159,7 +159,7 @@ export class MessageRepository { return false; } - message.condensedMessage = condensedMessage; + message.condensedUserMessage = condensedMessage; logInfo(`[MessageRepository] Updated condensed message for message: ${id}`); return true; } @@ -217,7 +217,7 @@ export class MessageRepository { id: msg.id, message: msg.displayText, originalMessage: msg.displayText, - condensedMessage: msg.condensedMessage, + condensedMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: true, @@ -240,7 +240,7 @@ export class MessageRepository { id: msg.id, message: msg.processedText, originalMessage: msg.displayText, - condensedMessage: msg.condensedMessage, + condensedUserMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: false, // LLM messages are not for display @@ -260,7 +260,7 @@ export class MessageRepository { id: msg.id, message: msg.processedText, originalMessage: msg.displayText, - condensedMessage: msg.condensedMessage, + condensedMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: false, @@ -282,7 +282,7 @@ export class MessageRepository { id: msg.id, message: msg.displayText, originalMessage: msg.displayText, - condensedMessage: msg.condensedMessage, + condensedUserMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: msg.isVisible, @@ -303,7 +303,7 @@ export class MessageRepository { id: msg.id || this.generateId(), displayText: msg.message, processedText: msg.originalMessage || msg.message, - condensedMessage: msg.condensedMessage, + condensedUserMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp || formatDateTime(new Date()), context: msg.context, diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index 8c604e54b..13e1300b8 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -82,7 +82,7 @@ describe("UserMemoryManager", () => { sender, timestamp: null, isVisible: true, - condensedMessage: `Condensed: ${message}`, + condensedUserMessage: `Condensed: ${message}`, }); it("should skip memory update when memory is disabled", () => { diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index b02c478f0..66592b7ba 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -29,9 +29,9 @@ export class UserMemoryManager { private hasValidCondensedUserMessage(message: ChatMessage): boolean { return ( message.sender === USER_SENDER && - !!message.condensedMessage && - typeof message.condensedMessage === "string" && - message.condensedMessage.trim().length > 0 + !!message.condensedUserMessage && + typeof message.condensedUserMessage === "string" && + message.condensedUserMessage.trim().length > 0 ); } @@ -200,7 +200,7 @@ Condense the user message into a single concise sentence while preserving intent .filter(this.hasValidCondensedUserMessage.bind(this)) .map((message) => { // Use condensed message if available - return `- ${message.condensedMessage}`; + return `- ${message.condensedUserMessage}`; }); // Generate key conclusions if conversation is substantial enough diff --git a/src/settings/model.ts b/src/settings/model.ts index 30a80d2af..b71939c2d 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -13,7 +13,7 @@ import { DEFAULT_SYSTEM_PROMPT, EmbeddingModelProviders, } from "@/constants"; -import { logError } from "@/logger"; +import { logInfo } from "@/logger"; /** * We used to store commands in the settings file with the following interface. @@ -367,8 +367,8 @@ export async function getSystemPromptWithMemory( ): Promise { const systemPrompt = getSystemPrompt(); if (!userMemoryManager) { - logError("No UserMemoryManager provided to getSystemPromptWithMemory"); - return getSystemPrompt(); + logInfo("No UserMemoryManager provided to getSystemPromptWithMemory"); + return systemPrompt; } const memoryPrompt = await userMemoryManager.getUserMemoryPrompt(); diff --git a/src/types/message.ts b/src/types/message.ts index 0ea979ad3..bf8d9b477 100644 --- a/src/types/message.ts +++ b/src/types/message.ts @@ -44,8 +44,14 @@ export interface ChatMessage { /** Original user input before processing (for LLM messages) */ originalMessage?: string; - /** The condensed message content (for memory and only for user messages) */ - condensedMessage?: string; + /** + * AI-generated one-sentence summary of user messages for memory storage. + * Created asynchronously after user messages to reduce memory footprint while preserving + * core intent and Obsidian-specific features (notes, tags, links). + * Used in constructing "Recent Conversations" for user memory. + * Only applies to user messages when memory is enabled. + */ + condensedUserMessage?: string; /** Message sender ("user", "AI", etc.) */ sender: string; @@ -85,7 +91,7 @@ export interface StoredMessage { id: string; displayText: string; // What user typed/what AI responded processedText: string; // For user messages: with context added. For AI: same as display - condensedMessage?: string; // Condensed version for memory (user messages only) + condensedUserMessage?: string; // AI-generated condensed version for memory storage (user messages only) - see condensedUserMessage documentation above sender: string; timestamp: FormattedDateTime; context?: MessageContext; From 3cd9eb5186f63a77bd6b506d9e7f2890308cb01b Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 17:52:58 +0900 Subject: [PATCH 18/32] Replace console.error with logError --- src/LLMProviders/chainManager.ts | 3 +++ .../chainRunner/AutonomousAgentChainRunner.ts | 2 +- .../chainRunner/CopilotPlusChainRunner.ts | 2 +- .../chainRunner/LLMChainRunner.ts | 4 ++-- .../chainRunner/ProjectChainRunner.ts | 1 + src/components/Chat.tsx | 20 +++++++++---------- src/settings/SettingsPage.tsx | 4 ++-- src/settings/model.ts | 7 +++++++ src/settings/providerModels.ts | 3 ++- src/settings/v2/components/ApiKeyDialog.tsx | 4 ++-- .../v2/components/CommandSettings.tsx | 5 +++-- .../v2/components/CopilotPlusSettings.tsx | 3 ++- src/settings/v2/components/ModelAddDialog.tsx | 3 ++- 13 files changed, 38 insertions(+), 23 deletions(-) diff --git a/src/LLMProviders/chainManager.ts b/src/LLMProviders/chainManager.ts index 003c682e2..e15b546e5 100644 --- a/src/LLMProviders/chainManager.ts +++ b/src/LLMProviders/chainManager.ts @@ -29,6 +29,7 @@ import { App, Notice } from "obsidian"; import ChatModelManager from "./chatModelManager"; import MemoryManager from "./memoryManager"; import PromptManager from "./promptManager"; +import { UserMemoryManager } from "@/memory/UserMemoryManager"; export default class ChainManager { // TODO: These chains are deprecated since we now use direct chat model calls in chain runners @@ -45,6 +46,7 @@ export default class ChainManager { public chatModelManager: ChatModelManager; public memoryManager: MemoryManager; public promptManager: PromptManager; + public userMemoryManager: UserMemoryManager; constructor(app: App) { // Instantiate singletons @@ -52,6 +54,7 @@ export default class ChainManager { this.memoryManager = MemoryManager.getInstance(); this.chatModelManager = ChatModelManager.getInstance(); this.promptManager = PromptManager.getInstance(); + this.userMemoryManager = new UserMemoryManager(app); // Initialize async operations this.initialize(); diff --git a/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts b/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts index b2462a292..4e70aa258 100644 --- a/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts +++ b/src/LLMProviders/chainRunner/AutonomousAgentChainRunner.ts @@ -108,7 +108,7 @@ ${params} return AutonomousAgentChainRunner.generateSystemPrompt( availableTools, adapter, - this.chainManager.plugin?.userMemoryManager + this.chainManager.userMemoryManager ); } diff --git a/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts b/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts index 5a1764d03..84debcfc1 100644 --- a/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts +++ b/src/LLMProviders/chainRunner/CopilotPlusChainRunner.ts @@ -783,6 +783,6 @@ export class CopilotPlusChainRunner extends BaseChainRunner { } protected async getSystemPrompt(): Promise { - return getSystemPromptWithMemory(this.chainManager.plugin?.userMemoryManager); + return getSystemPromptWithMemory(this.chainManager.userMemoryManager); } } diff --git a/src/LLMProviders/chainRunner/LLMChainRunner.ts b/src/LLMProviders/chainRunner/LLMChainRunner.ts index ea41e5c7a..22c0d1f71 100644 --- a/src/LLMProviders/chainRunner/LLMChainRunner.ts +++ b/src/LLMProviders/chainRunner/LLMChainRunner.ts @@ -1,6 +1,6 @@ import { ABORT_REASON } from "@/constants"; import { logInfo } from "@/logger"; -import { getSystemPrompt } from "@/settings/model"; +import { getSystemPromptWithMemory } from "@/settings/model"; import { ChatMessage } from "@/types/message"; import { extractChatHistory, getMessageRole, withSuppressedTokenWarnings } from "@/utils"; import { BaseChainRunner } from "./BaseChainRunner"; @@ -30,7 +30,7 @@ export class LLMChainRunner extends BaseChainRunner { const messages: any[] = []; // Add system message if available - const systemPrompt = getSystemPrompt(); + const systemPrompt = await getSystemPromptWithMemory(this.chainManager.userMemoryManager); const chatModel = this.chainManager.chatModelManager.getChatModel(); if (systemPrompt) { diff --git a/src/LLMProviders/chainRunner/ProjectChainRunner.ts b/src/LLMProviders/chainRunner/ProjectChainRunner.ts index 443cd4074..4bb178d7f 100644 --- a/src/LLMProviders/chainRunner/ProjectChainRunner.ts +++ b/src/LLMProviders/chainRunner/ProjectChainRunner.ts @@ -5,6 +5,7 @@ import { CopilotPlusChainRunner } from "./CopilotPlusChainRunner"; export class ProjectChainRunner extends CopilotPlusChainRunner { protected async getSystemPrompt(): Promise { + // NOTE: Currently memory is not enabled for project mode, so we don't need to use getSystemPromptWithMemory let finalPrompt = getSystemPrompt(); const projectConfig = getCurrentProject(); if (!projectConfig) { diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index 642d59a41..2d57e46f7 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -10,7 +10,7 @@ import { } from "@/aiParams"; import { ChainType } from "@/chainFactory"; import { useProjectContextStatus } from "@/hooks/useProjectContextStatus"; -import { logInfo } from "@/logger"; +import { logInfo, logError } from "@/logger"; import { ChatControls, reloadCurrentProject } from "@/components/chat-components/ChatControls"; import ChatInput from "@/components/chat-components/ChatInput"; @@ -255,7 +255,7 @@ const Chat: React.FC = ({ handleSaveAsNote(); } } catch (error) { - console.error("Error sending message:", error); + logError("Error sending message:", error); new Notice("Failed to send message. Please try again."); } finally { safeSet.setLoading(false); @@ -265,7 +265,7 @@ const Chat: React.FC = ({ const handleSaveAsNote = useCallback(async () => { if (!app) { - console.error("App instance is not available."); + logError("App instance is not available."); return; } @@ -273,7 +273,7 @@ const Chat: React.FC = ({ // Use the new ChatManager persistence functionality await chatUIState.saveChat(currentModelKey); } catch (error) { - console.error("Error saving chat as note:", err2String(error)); + logError("Error saving chat as note:", err2String(error)); new Notice("Failed to save chat as note. Check console for details."); } }, [app, chatUIState, currentModelKey]); @@ -338,7 +338,7 @@ const Chat: React.FC = ({ handleSaveAsNote(); } } catch (error) { - console.error("Error regenerating message:", error); + logError("Error regenerating message:", error); new Notice("Failed to regenerate message. Please try again."); } finally { safeSet.setLoading(false); @@ -399,7 +399,7 @@ const Chat: React.FC = ({ ); } } catch (error) { - console.error("Error regenerating AI response:", error); + logError("Error regenerating AI response:", error); new Notice("Failed to regenerate AI response. Please try again."); } finally { safeSet.setLoading(false); @@ -412,7 +412,7 @@ const Chat: React.FC = ({ handleSaveAsNote(); } } catch (error) { - console.error("Error editing message:", error); + logError("Error editing message:", error); new Notice("Failed to edit message. Please try again."); } }, @@ -459,7 +459,7 @@ const Chat: React.FC = ({ new Notice(`${project.name} added and context loaded`); }) .catch((error: Error) => { - console.error("Error loading project context:", error); + logError("Error loading project context:", error); new Notice(`${project.name} added but context loading failed`); }); } else { @@ -494,7 +494,7 @@ const Chat: React.FC = ({ new Notice(`${originP.name} updated and context reloaded`); }) .catch((error: Error) => { - console.error("Error reloading project context:", error); + logError("Error reloading project context:", error); new Notice(`${originP.name} updated but context reload failed`); }); } else { @@ -528,7 +528,7 @@ const Chat: React.FC = ({ new Notice("Failed to delete message. Please try again."); } } catch (error) { - console.error("Error deleting message:", error); + logError("Error deleting message:", error); new Notice("Failed to delete message. Please try again."); } }, diff --git a/src/settings/SettingsPage.tsx b/src/settings/SettingsPage.tsx index 743c2202d..5b80d2334 100644 --- a/src/settings/SettingsPage.tsx +++ b/src/settings/SettingsPage.tsx @@ -2,7 +2,7 @@ import CopilotView from "@/components/CopilotView"; import { CHAT_VIEWTYPE } from "@/constants"; import CopilotPlugin from "@/main"; import { getSettings } from "@/settings/model"; -import { logInfo } from "@/logger"; +import { logInfo, logError } from "@/logger"; import { App, Notice, PluginSettingTab } from "obsidian"; import React from "react"; import { createRoot } from "react-dom/client"; @@ -51,7 +51,7 @@ export class CopilotSettingTab extends PluginSettingTab { new Notice("Plugin reloaded successfully."); } catch (error) { new Notice("Failed to reload the plugin. Please reload manually."); - console.error("Error reloading plugin:", error); + logError("Error reloading plugin:", error); } } diff --git a/src/settings/model.ts b/src/settings/model.ts index b71939c2d..bd1aff766 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -366,6 +366,13 @@ export async function getSystemPromptWithMemory( userMemoryManager: UserMemoryManager | undefined ): Promise { const systemPrompt = getSystemPrompt(); + + // Check if memory is enabled in settings + const settings = getSettings(); + if (!settings.enableMemory) { + return systemPrompt; + } + if (!userMemoryManager) { logInfo("No UserMemoryManager provided to getSystemPromptWithMemory"); return systemPrompt; diff --git a/src/settings/providerModels.ts b/src/settings/providerModels.ts index ee4f77b0a..8b3c4594f 100644 --- a/src/settings/providerModels.ts +++ b/src/settings/providerModels.ts @@ -1,4 +1,5 @@ import { ChatModelProviders, SettingKeyProviders } from "@/constants"; +import { logError } from "@/logger"; /** * Standard model interface definition - for frontend display @@ -466,7 +467,7 @@ export const parseModelsResponse = (provider: SettingKeyProviders, data: any): S try { return adapter(data); } catch (error) { - console.error(`Error parsing ${provider} model data:`, error); + logError(`Error parsing ${provider} model data:`, error); return []; } }; diff --git a/src/settings/v2/components/ApiKeyDialog.tsx b/src/settings/v2/components/ApiKeyDialog.tsx index f7d99dbae..305f6ba63 100644 --- a/src/settings/v2/components/ApiKeyDialog.tsx +++ b/src/settings/v2/components/ApiKeyDialog.tsx @@ -150,7 +150,7 @@ function ApiKeyModalContent({ onClose }: ApiKeyModalContentProps) { setModelsByProvider((prev) => ({ ...prev, [provider]: standardModels })); setLoadingProvider(null); } catch (error) { - console.error(`Error fetching models for ${provider}:`, error); + logError(`Error fetching models for ${provider}:`, error); setErrorProvider(provider); setLoadingProvider(null); new Notice( @@ -195,7 +195,7 @@ function ApiKeyModalContent({ onClose }: ApiKeyModalContentProps) { ); } } catch (error) { - console.error("Model verification failed:", error); + logError("Model verification failed:", error); new Notice("Model verification failed: " + err2String(error), 10000); } finally { setVerifyingModel(false); diff --git a/src/settings/v2/components/CommandSettings.tsx b/src/settings/v2/components/CommandSettings.tsx index 03b36167d..0374dfe21 100644 --- a/src/settings/v2/components/CommandSettings.tsx +++ b/src/settings/v2/components/CommandSettings.tsx @@ -13,6 +13,7 @@ import { TableRow, } from "@/components/ui/table"; import { cn } from "@/lib/utils"; +import { logError } from "@/logger"; import { updateSetting, useSettingsValue } from "@/settings/model"; import { PromptSortStrategy } from "@/types"; import { @@ -193,7 +194,7 @@ export const CommandSettings: React.FC = () => { new Notice(`Command "${command.title}" deleted successfully!`); } catch (error) { - console.error("Failed to delete command:", error); + logError("Failed to delete command:", error); new Notice("Failed to delete command. Please try again."); throw error; } @@ -213,7 +214,7 @@ export const CommandSettings: React.FC = () => { autoOrder: false, }); } catch (error) { - console.error("Failed to copy command:", error); + logError("Failed to copy command:", error); new Notice("Failed to copy command. Please try again."); } }; diff --git a/src/settings/v2/components/CopilotPlusSettings.tsx b/src/settings/v2/components/CopilotPlusSettings.tsx index d0ab1bd6a..6f3220162 100644 --- a/src/settings/v2/components/CopilotPlusSettings.tsx +++ b/src/settings/v2/components/CopilotPlusSettings.tsx @@ -13,6 +13,7 @@ import { SettingItem } from "@/components/ui/setting-item"; import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip"; import { AUTOCOMPLETE_CONFIG } from "@/constants"; import { cn } from "@/lib/utils"; +import { logError } from "@/logger"; import { updateSetting, useSettingsValue } from "@/settings/model"; import { HelpCircle, RefreshCw } from "lucide-react"; import { Notice } from "obsidian"; @@ -62,7 +63,7 @@ export const CopilotPlusSettings: React.FC = () => { new Notice(`Word index rebuilt successfully! ${result.wordCount} unique words indexed.`); } catch (error) { - console.error("Failed to refresh word index:", error); + logError("Failed to refresh word index:", error); new Notice("Failed to refresh word index. Check console for details."); } finally { setIsRefreshing(false); diff --git a/src/settings/v2/components/ModelAddDialog.tsx b/src/settings/v2/components/ModelAddDialog.tsx index 8e75ed809..8a987a41e 100644 --- a/src/settings/v2/components/ModelAddDialog.tsx +++ b/src/settings/v2/components/ModelAddDialog.tsx @@ -12,6 +12,7 @@ import { SettingKeyProviders, } from "@/constants"; import { CustomModel } from "@/aiParams"; +import { logError } from "@/logger"; import { err2String, getProviderInfo, getProviderLabel, omit } from "@/utils"; import { Notice } from "obsidian"; import { Label } from "@/components/ui/label"; @@ -234,7 +235,7 @@ export const ModelAddDialog: React.FC = ({ await ping(cleanedModel); new Notice("Model verification successful!"); } catch (err) { - console.error(err); + logError(err); const errStr = err2String(err); new Notice("Model verification failed: " + errStr); } finally { From 2ff8020002a4f7febceda79b0713f6f64adf29ec Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 18:15:39 +0900 Subject: [PATCH 19/32] Backfill condensedUserMessage --- src/memory/UserMemoryManager.test.ts | 132 ++++++++++++++++++--------- src/memory/UserMemoryManager.ts | 48 ++++++++-- 2 files changed, 128 insertions(+), 52 deletions(-) diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index 13e1300b8..e6acec7fb 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -12,12 +12,17 @@ jest.mock("@/constants", () => ({ USER_SENDER: "user", })); +jest.mock("@/utils", () => ({ + ensureFolderExists: jest.fn(), +})); + import { UserMemoryManager } from "./UserMemoryManager"; import { App, TFile, Vault } from "obsidian"; import { ChatMessage } from "@/types/message"; import { logInfo, logError } from "@/logger"; import { getSettings } from "@/settings/model"; import { USER_SENDER } from "@/constants"; +import { ensureFolderExists } from "@/utils"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { AIMessageChunk } from "@langchain/core/messages"; @@ -63,6 +68,9 @@ describe("UserMemoryManager", () => { vault: mockVault, } as any; + // Reset ensureFolderExists mock + (ensureFolderExists as jest.Mock).mockClear(); + // Mock chat model mockChatModel = { invoke: jest.fn(), @@ -104,39 +112,6 @@ describe("UserMemoryManager", () => { ); }); - it("should create nested memory folders recursively", async () => { - const messages = [createMockMessage("1", "test user message")]; - - // Set nested folder path in settings - mockSettings.memoryFolderName = "deep/nested/memory/folder"; - - // Mock folders don't exist - mockVault.getAbstractFileByPath.mockReturnValue(null); - - // Mock LLM responses - const mockResponse1 = new AIMessageChunk({ content: "Test Conversation Title" }); - const mockResponse2 = new AIMessageChunk({ content: "NONE" }); - mockChatModel.invoke.mockResolvedValueOnce(mockResponse1); - mockChatModel.invoke.mockResolvedValueOnce(mockResponse2); - - userMemoryManager.updateUserMemory(messages, mockChatModel); - - // Wait for async operation to complete - await new Promise((resolve) => setTimeout(resolve, 50)); - - // Verify all nested folders were created - expect(mockVault.createFolder).toHaveBeenCalledWith("deep"); - expect(mockVault.createFolder).toHaveBeenCalledWith("deep/nested"); - expect(mockVault.createFolder).toHaveBeenCalledWith("deep/nested/memory"); - expect(mockVault.createFolder).toHaveBeenCalledWith("deep/nested/memory/folder"); - - // Verify file creation in nested path - expect(mockVault.create).toHaveBeenCalledWith( - "deep/nested/memory/folder/Recent Conversations.md", - expect.stringContaining("## Test Conversation Title") - ); - }); - it("should complete end-to-end memory update with existing file", async () => { // Setup: Create test messages simulating a real conversation with enough content for key conclusions const messages = [ @@ -178,11 +153,11 @@ describe("UserMemoryManager", () => { const mockMemoryFile = createMockTFile("copilot/memory/Recent Conversations.md"); - // Mock vault responses for folder and file existence - const mockFolder = { path: "copilot/memory", name: "memory" } as any; - mockVault.getAbstractFileByPath - .mockReturnValueOnce(mockFolder) // Folder exists - .mockReturnValueOnce(mockMemoryFile); // File exists + // Mock ensureFolderExists to resolve successfully + (ensureFolderExists as jest.Mock).mockResolvedValue(undefined); + + // Mock app instance for file operations + mockVault.getAbstractFileByPath.mockReturnValue(mockMemoryFile); // Mock reading existing file content mockVault.read.mockResolvedValue(existingMemoryContent); @@ -197,11 +172,8 @@ describe("UserMemoryManager", () => { .mockResolvedValueOnce(mockTitleResponse) .mockResolvedValueOnce(mockConclusionResponse); - // Execute the updateUserMemory function - userMemoryManager.updateUserMemory(messages, mockChatModel); - - // Wait for async operation to complete - await new Promise((resolve) => setTimeout(resolve, 100)); + // Execute the updateMemory function directly to ensure proper awaiting + await (userMemoryManager as any).updateMemory(messages, mockChatModel); // Verify the end result: file was modified with new conversation const modifyCall = mockVault.modify.mock.calls[0]; @@ -291,6 +263,80 @@ describe("UserMemoryManager", () => { // Verify no new file creation was needed since file already exists expect(mockVault.create).not.toHaveBeenCalled(); }); + + it("should handle missing condensed messages by creating them inline (race condition fix)", async () => { + // Setup: Create messages without condensed messages to simulate race condition + const messages = [ + createMockMessage("1", "How do I create daily notes?"), + createMockMessage("2", "AI response about daily notes", "ai"), + createMockMessage("3", "What about templates?"), + ]; + + // Remove condensed messages to simulate race condition + delete messages[0].condensedUserMessage; + delete messages[2].condensedUserMessage; + + const mockMemoryFile = createMockTFile("copilot/memory/Recent Conversations.md"); + const existingContent = ""; + + // Mock ensureFolderExists and file operations + (ensureFolderExists as jest.Mock).mockResolvedValue(undefined); + mockVault.getAbstractFileByPath.mockReturnValue(mockMemoryFile); + mockVault.read.mockResolvedValue(existingContent); + + // Mock LLM responses + const mockTitleResponse = new AIMessageChunk({ content: "Daily Notes Help" }); + const mockConclusionResponse = new AIMessageChunk({ + content: "- Daily notes can be automated with templates", + }); + + // Setup condensed message creation (called inline for missing entries) + const condensedMessage1 = "Asked about creating daily notes"; + const condensedMessage2 = "Inquired about template usage"; + + // Mock createCondensedMessage to return condensed versions + const createCondensedMessageSpy = jest.spyOn( + userMemoryManager as any, + "createCondensedMessage" + ); + + createCondensedMessageSpy.mockImplementation(async (message, model) => { + if (message === "How do I create daily notes?") { + return condensedMessage1; + } + if (message === "What about templates?") { + return condensedMessage2; + } + return null; + }); + + mockChatModel.invoke + .mockResolvedValueOnce(mockTitleResponse) + .mockResolvedValueOnce(mockConclusionResponse); + + // Execute the updateMemory function + await (userMemoryManager as any).updateMemory(messages, mockChatModel); + + // Verify condensed messages were created inline for missing entries + expect(createCondensedMessageSpy).toHaveBeenCalledTimes(2); + expect(createCondensedMessageSpy).toHaveBeenCalledWith( + "How do I create daily notes?", + mockChatModel + ); + expect(createCondensedMessageSpy).toHaveBeenCalledWith( + "What about templates?", + mockChatModel + ); + + // Verify the final content includes the inline-created condensed messages + const modifyCall = mockVault.modify.mock.calls[0]; + const actualContent = modifyCall[1]; + + expect(actualContent).toContain("Asked about creating daily notes"); + expect(actualContent).toContain("Inquired about template usage"); + + createCondensedMessageSpy.mockRestore(); + }); }); describe("getUserMemoryPrompt", () => { diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 66592b7ba..f5da698d9 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -1,6 +1,6 @@ import { App, TFile } from "obsidian"; import { ChatMessage } from "@/types/message"; -import { logInfo, logError } from "@/logger"; +import { logInfo, logError, logWarn } from "@/logger"; import { USER_SENDER } from "@/constants"; import { getSettings } from "@/settings/model"; import { ensureFolderExists } from "@/utils"; @@ -196,12 +196,42 @@ Condense the user message into a single concise sentence while preserving intent ): Promise { const conversationTitle = await this.extractConversationTitle(messages, chatModel); const timestamp = new Date().toISOString().split(".")[0] + "Z"; // Remove milliseconds but keep Z for UTC - const userMessageTexts = messages - .filter(this.hasValidCondensedUserMessage.bind(this)) - .map((message) => { - // Use condensed message if available - return `- ${message.condensedUserMessage}`; - }); + + // Process user messages and ensure condensed messages are available + const userMessages = messages.filter((message) => message.sender === USER_SENDER); + const userMessageTexts: string[] = []; + + for (const message of userMessages) { + let condensedText = message.condensedUserMessage; + + // If condensed message is missing or invalid, create it inline to handle race condition + if ( + !condensedText || + typeof condensedText !== "string" || + condensedText.trim().length === 0 + ) { + try { + const newCondensedText = await this.createCondensedMessage(message.message, chatModel); + if (newCondensedText) { + condensedText = newCondensedText; + logWarn( + `[UserMemoryManager] Created inline condensed message for missing entry: "${condensedText}"` + ); + } + } catch (error) { + logError( + `[UserMemoryManager] Failed to create inline condensed message for "${message.message}":`, + error + ); + // Continue processing other messages even if one fails + } + } + + // Only include if we have valid condensed text + if (condensedText && condensedText.trim().length > 0) { + userMessageTexts.push(`- ${condensedText}`); + } + } // Generate key conclusions if conversation is substantial enough const keyConclusionsText = await this.extractKeyConclusion(messages, chatModel); @@ -429,10 +459,10 @@ Generate a title for the conversation:`; try { const response = await chatModel.invoke(messages_llm); const summary = response.content.toString().trim(); - return summary || "No summary"; + return summary || "Untitled Conversation"; } catch (error) { logError("[UserMemoryManager] Failed to extract conversation summary:", error); - return "No summary"; + return "Untitled Conversation"; } } From 2a9ac449c061d213aa3257a14bdcda818d2d9a06 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 19:12:11 +0900 Subject: [PATCH 20/32] Fix build error --- src/LLMProviders/projectManager.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/LLMProviders/projectManager.ts b/src/LLMProviders/projectManager.ts index d79f7546d..f03f6782b 100644 --- a/src/LLMProviders/projectManager.ts +++ b/src/LLMProviders/projectManager.ts @@ -39,7 +39,7 @@ export default class ProjectManager { this.app = app; this.plugin = plugin; this.currentProjectId = null; - this.chainMangerInstance = new ChainManager(app, plugin); + this.chainMangerInstance = new ChainManager(app); this.projectContextCache = ProjectContextCache.getInstance(); this.fileParserManager = new FileParserManager( BrevilabsClient.getInstance(), From c836de32c1ff01dafbc1ef1c2d33912fb07fac6d Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 20:02:45 +0900 Subject: [PATCH 21/32] Refactor for a simpiler implementation --- src/core/ChatManager.ts | 42 +--- src/core/MessageRepository.ts | 21 -- src/memory/UserMemoryManager.test.ts | 321 +++++++++++++-------------- src/memory/UserMemoryManager.ts | 280 +++++------------------ src/memory/memory-design.md | 2 + src/types/message.ts | 10 - 6 files changed, 219 insertions(+), 457 deletions(-) diff --git a/src/core/ChatManager.ts b/src/core/ChatManager.ts index b98e2026f..c63ae573c 100644 --- a/src/core/ChatManager.ts +++ b/src/core/ChatManager.ts @@ -1,6 +1,6 @@ import { getSettings } from "@/settings/model"; import { ChainType } from "@/chainFactory"; -import { logError, logInfo } from "@/logger"; +import { logInfo } from "@/logger"; import { ChatMessage, MessageContext } from "@/types/message"; import { FileParserManager } from "@/tools/FileParserManager"; import ChainManager from "@/LLMProviders/chainManager"; @@ -140,9 +140,6 @@ export class ChatManager { // Update the processed content currentRepo.updateProcessedText(messageId, processedContent); - // Create condensed message for user messages - this.createCondensedMessageAsync(message, messageId, displayText, currentRepo); - logInfo(`[ChatManager] Successfully sent message ${messageId}`); return messageId; } catch (error) { @@ -367,43 +364,6 @@ export class ChatManager { return currentRepo.getLLMMessage(id); } - /** - * Create condensed message asynchronously for user messages (fire and forget) - */ - private createCondensedMessageAsync( - message: ChatMessage, - messageId: string, - displayText: string, - currentRepo: MessageRepository - ): void { - if (message.sender === USER_SENDER && this.plugin.userMemoryManager) { - try { - const settings = getSettings(); - if (settings.enableMemory) { - const chainManager = this.plugin.projectManager.getCurrentChainManager(); - const chatModel = chainManager.chatModelManager.getChatModel(); - - // Create condensed message asynchronously (fire and forget) - this.plugin.userMemoryManager - .createCondensedMessage(displayText, chatModel) - .then((condensedMessage) => { - if (condensedMessage) { - currentRepo.updateCondensedMessage(messageId, condensedMessage); - logInfo( - `[ChatManager] Created condensed message for ${messageId}: "${condensedMessage}"` - ); - } - }) - .catch((error) => { - logError(`[ChatManager] Failed to create condensed message for ${messageId}:`, error); - }); - } - } catch (error) { - logError(`[ChatManager] Error setting up condensed message creation:`, error); - } - } - } - /** * Update chain memory with current LLM messages */ diff --git a/src/core/MessageRepository.ts b/src/core/MessageRepository.ts index a66f48965..d51cbcc51 100644 --- a/src/core/MessageRepository.ts +++ b/src/core/MessageRepository.ts @@ -88,7 +88,6 @@ export class MessageRepository { id, displayText: message.message, processedText: message.originalMessage || message.message, - condensedUserMessage: message.condensedUserMessage, sender: message.sender, timestamp, context: message.context, @@ -149,21 +148,6 @@ export class MessageRepository { return true; } - /** - * Update condensed message for a message - */ - updateCondensedMessage(id: string, condensedMessage: string): boolean { - const message = this.messages.find((msg) => msg.id === id); - if (!message) { - logInfo(`[MessageRepository] Message not found for condensed message update: ${id}`); - return false; - } - - message.condensedUserMessage = condensedMessage; - logInfo(`[MessageRepository] Updated condensed message for message: ${id}`); - return true; - } - /** * Delete a message */ @@ -217,7 +201,6 @@ export class MessageRepository { id: msg.id, message: msg.displayText, originalMessage: msg.displayText, - condensedMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: true, @@ -240,7 +223,6 @@ export class MessageRepository { id: msg.id, message: msg.processedText, originalMessage: msg.displayText, - condensedUserMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: false, // LLM messages are not for display @@ -260,7 +242,6 @@ export class MessageRepository { id: msg.id, message: msg.processedText, originalMessage: msg.displayText, - condensedMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: false, @@ -282,7 +263,6 @@ export class MessageRepository { id: msg.id, message: msg.displayText, originalMessage: msg.displayText, - condensedUserMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp, isVisible: msg.isVisible, @@ -303,7 +283,6 @@ export class MessageRepository { id: msg.id || this.generateId(), displayText: msg.message, processedText: msg.originalMessage || msg.message, - condensedUserMessage: msg.condensedUserMessage, sender: msg.sender, timestamp: msg.timestamp || formatDateTime(new Date()), context: msg.context, diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index e6acec7fb..ffe7c0ec5 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -8,10 +8,6 @@ jest.mock("@/settings/model", () => ({ getSettings: jest.fn(), })); -jest.mock("@/constants", () => ({ - USER_SENDER: "user", -})); - jest.mock("@/utils", () => ({ ensureFolderExists: jest.fn(), })); @@ -21,7 +17,6 @@ import { App, TFile, Vault } from "obsidian"; import { ChatMessage } from "@/types/message"; import { logInfo, logError } from "@/logger"; import { getSettings } from "@/settings/model"; -import { USER_SENDER } from "@/constants"; import { ensureFolderExists } from "@/utils"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import { AIMessageChunk } from "@langchain/core/messages"; @@ -83,14 +78,13 @@ describe("UserMemoryManager", () => { const createMockMessage = ( id: string, message: string, - sender: string = USER_SENDER + sender: string = "user" ): ChatMessage => ({ id, message, sender, timestamp: null, isVisible: true, - condensedUserMessage: `Condensed: ${message}`, }); it("should skip memory update when memory is disabled", () => { @@ -112,43 +106,33 @@ describe("UserMemoryManager", () => { ); }); - it("should complete end-to-end memory update with existing file", async () => { - // Setup: Create test messages simulating a real conversation with enough content for key conclusions + it("should complete end-to-end memory update with new simple format", async () => { + // Setup: Create test messages simulating a real conversation const messages = [ createMockMessage( "1", - "How do I create a daily note template in Obsidian with automatic date formatting? I want to have a template that automatically inserts today's date and creates sections for tasks, notes, and reflections." + "How do I create a daily note template in Obsidian with automatic date formatting?" ), createMockMessage( "2", - "I can help you create a daily note template with automatic date formatting. Here's how you can set this up: First, create a template file in your templates folder with variables like {{date}} for automatic date insertion. You can use format strings to customize the date display. For the sections, you can create headers for Tasks, Notes, and Reflections that will be included every time you create a new daily note.", + "I can help you create a daily note template with automatic date formatting...", "ai" ), createMockMessage( "3", - "That's perfect! Can you also show me how to add tags automatically to these daily notes? I'd like them to be tagged with #daily-note and maybe the current month." - ), - createMockMessage( - "4", - "Certainly! You can add automatic tags to your template by including tag syntax directly in the template file. Add #daily-note and #{{date:MMMM}} to automatically tag with the current month. This way every daily note will be consistently tagged and easy to find later.", - "ai" + "That's perfect! Can you also show me how to add tags automatically?" ), + createMockMessage("4", "Certainly! You can add automatic tags to your template...", "ai"), ]; // Mock existing memory file with previous conversations const existingMemoryContent = `## Previous Conversation **Time:** 2024-01-01T09:00:00Z -**User Messages:** -- Asked about plugin installation -**Key Conclusions:** -- Plugins enhance Obsidian functionality +**Summary:** User asked about plugin installation and learned that plugins enhance Obsidian functionality. ## Another Conversation **Time:** 2024-01-01T10:00:00Z -**User Messages:** -- Inquired about linking notes -**Key Conclusions:** -- Backlinks create knowledge connections +**Summary:** User inquired about linking notes and discovered that backlinks create knowledge connections. `; const mockMemoryFile = createMockTFile("copilot/memory/Recent Conversations.md"); @@ -162,15 +146,15 @@ describe("UserMemoryManager", () => { // Mock reading existing file content mockVault.read.mockResolvedValue(existingMemoryContent); - // Mock LLM responses for conversation processing - const mockTitleResponse = new AIMessageChunk({ content: "Daily Note Template Setup" }); - const mockConclusionResponse = new AIMessageChunk({ - content: - "- Templates can automatically insert dates and metadata\n- Tags can be added through template variables", + // Mock LLM response for title and summary + const mockResponse = new AIMessageChunk({ + content: JSON.stringify({ + title: "Daily Note Template Setup", + summary: + "User asked about creating daily note templates with automatic date formatting and tagging. Learned how to use template variables for dates and automatic tag insertion.", + }), }); - mockChatModel.invoke - .mockResolvedValueOnce(mockTitleResponse) - .mockResolvedValueOnce(mockConclusionResponse); + mockChatModel.invoke.mockResolvedValueOnce(mockResponse); // Execute the updateMemory function directly to ensure proper awaiting await (userMemoryManager as any).updateMemory(messages, mockChatModel); @@ -179,170 +163,183 @@ describe("UserMemoryManager", () => { const modifyCall = mockVault.modify.mock.calls[0]; const actualContent = modifyCall[1]; - // Check the full memory content structure as a whole - exact line-by-line verification - const expectedContentStructure = [ - // Previous conversations should be preserved (no empty lines between conversations) - "## Previous Conversation", - "**Time:** 2024-01-01T09:00:00Z", - "**User Messages:**", - "- Asked about plugin installation", - "**Key Conclusions:**", - "- Plugins enhance Obsidian functionality", - "## Another Conversation", - "**Time:** 2024-01-01T10:00:00Z", - "**User Messages:**", - "- Inquired about linking notes", - "**Key Conclusions:**", - "- Backlinks create knowledge connections", - // New conversation should be added - "## Daily Note Template Setup", - // Dynamic timestamp pattern - /\*\*Time:\*\* \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/, - "**User Messages:**", - "- Condensed: How do I create a daily note template in Obsidian with automatic date formatting? I want to have a template that automatically inserts today's date and creates sections for tasks, notes, and reflections.", - "- Condensed: That's perfect! Can you also show me how to add tags automatically to these daily notes? I'd like them to be tagged with #daily-note and maybe the current month.", - "**Key Conclusions:**", - "- Templates can automatically insert dates and metadata", - "- Tags can be added through template variables", - "", // Empty line at end - "", // Second empty line at end - ]; - - // Verify the complete content structure line by line - const contentLines = actualContent.split("\n"); - - // Verify we have the expected number of lines - expect(contentLines).toHaveLength(expectedContentStructure.length); - - // Verify each line matches the expected structure - for (let i = 0; i < expectedContentStructure.length; i++) { - const expectedItem = expectedContentStructure[i]; - const actualLine = contentLines[i]; - - if (expectedItem instanceof RegExp) { - // Handle regex patterns for dynamic content like timestamps - expect(actualLine).toMatch(expectedItem); - } else { - // Handle exact string matches - expect(actualLine).toBe(expectedItem); - } - } - - // Verify all conversations have the required sections using pattern matching - expect(actualContent.match(/## [^#\n]+/g)).toHaveLength(3); // 3 conversations - expect(actualContent.match(/\*\*Time:\*\*/g)).toHaveLength(3); // Each has a timestamp - expect(actualContent.match(/\*\*User Messages:\*\*/g)).toHaveLength(3); // Each has user messages - expect(actualContent.match(/\*\*Key Conclusions:\*\*/g)).toHaveLength(3); // Each has key conclusions - - // Verify that the conversation title and key conclusions were extracted via LLM - expect(mockChatModel.invoke).toHaveBeenCalledTimes(2); - - // Verify title extraction call - expect(mockChatModel.invoke).toHaveBeenNthCalledWith( - 1, - expect.arrayContaining([ - expect.objectContaining({ - content: expect.stringContaining("Generate a title for the conversation"), - }), - ]) + // Check that the new format is used + expect(actualContent).toContain("## Daily Note Template Setup"); + expect(actualContent).toMatch(/\*\*Time:\*\* \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/); + expect(actualContent).toContain( + "**Summary:** User asked about creating daily note templates" ); - // Verify key conclusions extraction call - expect(mockChatModel.invoke).toHaveBeenNthCalledWith( - 2, + // Verify previous conversations are preserved + expect(actualContent).toContain("## Previous Conversation"); + expect(actualContent).toContain("## Another Conversation"); + + // Verify that the title and summary were extracted via single LLM call + expect(mockChatModel.invoke).toHaveBeenCalledTimes(1); + + // Verify the LLM call format + expect(mockChatModel.invoke).toHaveBeenCalledWith( expect.arrayContaining([ expect.objectContaining({ - content: expect.stringContaining("extract key conclusions"), + content: expect.stringContaining("generate both a title and a summary"), }), ]) ); + }); - // Verify no folder creation was needed since folder already exists - expect(mockVault.createFolder).not.toHaveBeenCalled(); + it("should handle LLM JSON parsing errors gracefully", async () => { + const messages = [createMockMessage("1", "test message")]; + const mockMemoryFile = createMockTFile("copilot/memory/Recent Conversations.md"); - // Verify no new file creation was needed since file already exists - expect(mockVault.create).not.toHaveBeenCalled(); - }); + (ensureFolderExists as jest.Mock).mockResolvedValue(undefined); + mockVault.getAbstractFileByPath.mockReturnValue(mockMemoryFile); + mockVault.read.mockResolvedValue(""); - it("should handle missing condensed messages by creating them inline (race condition fix)", async () => { - // Setup: Create messages without condensed messages to simulate race condition - const messages = [ - createMockMessage("1", "How do I create daily notes?"), - createMockMessage("2", "AI response about daily notes", "ai"), - createMockMessage("3", "What about templates?"), - ]; + // Mock LLM response with invalid JSON + const mockResponse = new AIMessageChunk({ content: "Invalid JSON response" }); + mockChatModel.invoke.mockResolvedValueOnce(mockResponse); - // Remove condensed messages to simulate race condition - delete messages[0].condensedUserMessage; - delete messages[2].condensedUserMessage; + await (userMemoryManager as any).updateMemory(messages, mockChatModel); + // Should still create a conversation entry with fallback values + const modifyCall = mockVault.modify.mock.calls[0]; + const actualContent = modifyCall[1]; + + expect(actualContent).toContain("## Untitled Conversation"); + expect(actualContent).toContain("**Summary:** Summary generation failed"); + expect(logError).toHaveBeenCalledWith( + "[UserMemoryManager] Failed to parse LLM response as JSON:", + expect.any(Error) + ); + }); + + it("should handle JSON wrapped in code blocks from Gemini", async () => { + const messages = [createMockMessage("1", "test message")]; const mockMemoryFile = createMockTFile("copilot/memory/Recent Conversations.md"); - const existingContent = ""; - // Mock ensureFolderExists and file operations (ensureFolderExists as jest.Mock).mockResolvedValue(undefined); mockVault.getAbstractFileByPath.mockReturnValue(mockMemoryFile); - mockVault.read.mockResolvedValue(existingContent); - - // Mock LLM responses - const mockTitleResponse = new AIMessageChunk({ content: "Daily Notes Help" }); - const mockConclusionResponse = new AIMessageChunk({ - content: "- Daily notes can be automated with templates", + mockVault.read.mockResolvedValue(""); + + // Mock LLM response with JSON wrapped in code blocks (typical Gemini behavior) + const mockResponse = new AIMessageChunk({ + content: `Here's the title and summary for the conversation: + +\`\`\`json +{ + "title": "Code Block Test", + "summary": "This tests JSON extraction from code blocks." +} +\`\`\``, }); + mockChatModel.invoke.mockResolvedValueOnce(mockResponse); - // Setup condensed message creation (called inline for missing entries) - const condensedMessage1 = "Asked about creating daily notes"; - const condensedMessage2 = "Inquired about template usage"; + await (userMemoryManager as any).updateMemory(messages, mockChatModel); - // Mock createCondensedMessage to return condensed versions - const createCondensedMessageSpy = jest.spyOn( - userMemoryManager as any, - "createCondensedMessage" - ); + // Should successfully extract JSON from code block + const modifyCall = mockVault.modify.mock.calls[0]; + const actualContent = modifyCall[1]; - createCondensedMessageSpy.mockImplementation(async (message, model) => { - if (message === "How do I create daily notes?") { - return condensedMessage1; - } - if (message === "What about templates?") { - return condensedMessage2; - } - return null; - }); + expect(actualContent).toContain("## Code Block Test"); + expect(actualContent).toContain("**Summary:** This tests JSON extraction from code blocks."); + }); + + it("should handle JSON wrapped in unmarked code blocks", async () => { + const messages = [createMockMessage("1", "test message")]; + const mockMemoryFile = createMockTFile("copilot/memory/Recent Conversations.md"); - mockChatModel.invoke - .mockResolvedValueOnce(mockTitleResponse) - .mockResolvedValueOnce(mockConclusionResponse); + (ensureFolderExists as jest.Mock).mockResolvedValue(undefined); + mockVault.getAbstractFileByPath.mockReturnValue(mockMemoryFile); + mockVault.read.mockResolvedValue(""); + + // Mock LLM response with JSON in unmarked code blocks + const mockResponse = new AIMessageChunk({ + content: `\`\`\` +{ + "title": "Unmarked Block Test", + "summary": "This tests JSON extraction from unmarked code blocks." +} +\`\`\``, + }); + mockChatModel.invoke.mockResolvedValueOnce(mockResponse); - // Execute the updateMemory function await (userMemoryManager as any).updateMemory(messages, mockChatModel); - // Verify condensed messages were created inline for missing entries - expect(createCondensedMessageSpy).toHaveBeenCalledTimes(2); - expect(createCondensedMessageSpy).toHaveBeenCalledWith( - "How do I create daily notes?", - mockChatModel + // Should successfully extract JSON from unmarked code block + const modifyCall = mockVault.modify.mock.calls[0]; + const actualContent = modifyCall[1]; + + expect(actualContent).toContain("## Unmarked Block Test"); + expect(actualContent).toContain( + "**Summary:** This tests JSON extraction from unmarked code blocks." ); - expect(createCondensedMessageSpy).toHaveBeenCalledWith( - "What about templates?", - mockChatModel + }); + }); + + describe("extractJsonFromResponse", () => { + it("should extract JSON from markdown code blocks with json language tag", () => { + const content = `Here's the response: + +\`\`\`json +{ + "title": "Test Title", + "summary": "Test Summary" +} +\`\`\` + +That's the JSON data.`; + + const result = (userMemoryManager as any).extractJsonFromResponse(content); + expect(result).toBe('{\n "title": "Test Title",\n "summary": "Test Summary"\n}'); + }); + + it("should extract JSON from unmarked code blocks", () => { + const content = `\`\`\` +{ + "title": "Unmarked Block", + "summary": "No language specified" +} +\`\`\``; + + const result = (userMemoryManager as any).extractJsonFromResponse(content); + expect(result).toBe( + '{\n "title": "Unmarked Block",\n "summary": "No language specified"\n}' ); + }); - // Verify the final content includes the inline-created condensed messages - const modifyCall = mockVault.modify.mock.calls[0]; - const actualContent = modifyCall[1]; + it("should extract JSON object when no code blocks present", () => { + const content = `Some text before {"title": "Inline JSON", "summary": "Direct JSON"} and after`; - expect(actualContent).toContain("Asked about creating daily notes"); - expect(actualContent).toContain("Inquired about template usage"); + const result = (userMemoryManager as any).extractJsonFromResponse(content); + expect(result).toBe('{"title": "Inline JSON", "summary": "Direct JSON"}'); + }); + + it("should return original content when no JSON patterns found", () => { + const content = "No JSON here, just plain text"; + + const result = (userMemoryManager as any).extractJsonFromResponse(content); + expect(result).toBe(content); + }); - createCondensedMessageSpy.mockRestore(); + it("should handle multiline JSON in code blocks", () => { + const content = `\`\`\`json +{ + "title": "Multi-line Test", + "summary": "This is a test with\\nmultiple lines and special characters: äöü" +} +\`\`\``; + + const result = (userMemoryManager as any).extractJsonFromResponse(content); + expect(result).toContain('"title": "Multi-line Test"'); + expect(result).toContain("special characters: äöü"); }); }); describe("getUserMemoryPrompt", () => { it("should return memory prompt when recent conversations exist", async () => { const mockFile = createMockTFile("copilot/memory/Recent Conversations.md"); - const mockContent = "## Test Conversation\n**Time:** 2024-01-01T10:00:00Z\n"; + const mockContent = + "## Test Conversation\n**Time:** 2024-01-01T10:00:00Z\n**Summary:** Test summary"; mockVault.getAbstractFileByPath.mockReturnValue(mockFile); mockVault.read.mockResolvedValue(mockContent); diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index f5da698d9..f7d44352b 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -1,7 +1,6 @@ import { App, TFile } from "obsidian"; import { ChatMessage } from "@/types/message"; -import { logInfo, logError, logWarn } from "@/logger"; -import { USER_SENDER } from "@/constants"; +import { logInfo, logError } from "@/logger"; import { getSettings } from "@/settings/model"; import { ensureFolderExists } from "@/utils"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; @@ -10,9 +9,7 @@ import { HumanMessage, SystemMessage } from "@langchain/core/messages"; /** * User Memory Management Class * - * Instance-based methods for building and managing user memory based on conversations. - * The UserMemoryManager has methods to add recent conversations to memory - * which can then be used to provide recent conversation context for LLM responses. + * Simple memory manager that creates conversation summaries for recent chat history. */ export class UserMemoryManager { private app: App; @@ -23,18 +20,6 @@ export class UserMemoryManager { this.app = app; } - /** - * Check if a message is a user message with valid condensed content - */ - private hasValidCondensedUserMessage(message: ChatMessage): boolean { - return ( - message.sender === USER_SENDER && - !!message.condensedUserMessage && - typeof message.condensedUserMessage === "string" && - message.condensedUserMessage.trim().length > 0 - ); - } - /** * Load memory data from files into class fields */ @@ -76,97 +61,6 @@ export class UserMemoryManager { }); } - /** - * Create a condensed version of a user message for memory purposes. - * Optimized for Obsidian note-taking context and knowledge management workflows. - * - * @param userMessage - The original user message to condense - * @param chatModel - The chat model to use for condensing (optional) - * @returns Promise - The condensed message or null if failed/unnecessary - * - * Features: - * - Skips condensing for very short messages or simple commands - * - Validates that condensed message is actually shorter than original - * - Provides fallback truncation if AI condensing fails - * - Optimized prompts for Obsidian-specific use cases - */ - async createCondensedMessage( - userMessage: string, - chatModel?: BaseChatModel - ): Promise { - if (!chatModel) { - logError("[UserMemoryManager] No chat model available for condensed message creation"); - return null; - } - - // Remove newlines and other formatting - const formattedMessage = userMessage.replace(/\n/g, " ").replace(/\\n/g, " ").trim(); - const trimmedMessage = formattedMessage.trim(); - if (!trimmedMessage) { - return null; - } - - const systemPrompt = `Your task is to condense user messages into concise one-line summaries while preserving user intent and important details. - -The condensed message will be used as part of the recent conversation content for memory purposes. - -CRITICAL RULES: -1. Keep it to ONE sentence maximum -2. Preserve the user's core intent and request -3. Include important details like note names, tags, search queries, or Obsidian features mentioned -4. Maintain the meaning and specificity of the original message -5. Use clear, direct language -6. Prioritize Obsidian-specific features (links, tags, graphs, plugins, etc.) - -# OUTPUT FORMAT -* Return only the condensed message as plain text, no quotes or additional formatting. -* Use the same language as the original message.`; - - const humanPrompt = ` -${trimmedMessage} - - -Condense the user message into a single concise sentence while preserving intent and important details`; - - const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; - - try { - const response = await chatModel.invoke(messages_llm); - if (!response || !response.content) { - logError("[UserMemoryManager] Empty response from chat model for condensed message"); - return null; - } - - const condensed = response.content.toString().trim(); - - // Validate the condensed message - if (!condensed) { - logError("[UserMemoryManager] Chat model returned empty condensed message"); - return null; - } - - // Ensure the condensed message is actually shorter than the original - if (condensed.length >= trimmedMessage.length) { - logInfo("[UserMemoryManager] Condensed message not shorter than original, using original"); - return trimmedMessage; - } - - // Remove any quotes or formatting that might have been added - const cleanedCondensed = condensed.replace(/^["']|["']$/g, "").trim(); - - return cleanedCondensed || null; - } catch (error) { - logError("[UserMemoryManager] Failed to create condensed message:", error); - // Fallback: return a truncated version of the original message if it's too long - if (trimmedMessage.length > 100) { - const fallback = trimmedMessage.substring(0, 97) + "..."; - logInfo("[UserMemoryManager] Using fallback truncated message"); - return fallback; - } - return null; - } - } - /** * Get user memory prompt */ @@ -194,55 +88,12 @@ Condense the user message into a single concise sentence while preserving intent messages: ChatMessage[], chatModel: BaseChatModel ): Promise { - const conversationTitle = await this.extractConversationTitle(messages, chatModel); + const { title, summary } = await this.extractTitleAndSummary(messages, chatModel); const timestamp = new Date().toISOString().split(".")[0] + "Z"; // Remove milliseconds but keep Z for UTC - // Process user messages and ensure condensed messages are available - const userMessages = messages.filter((message) => message.sender === USER_SENDER); - const userMessageTexts: string[] = []; - - for (const message of userMessages) { - let condensedText = message.condensedUserMessage; - - // If condensed message is missing or invalid, create it inline to handle race condition - if ( - !condensedText || - typeof condensedText !== "string" || - condensedText.trim().length === 0 - ) { - try { - const newCondensedText = await this.createCondensedMessage(message.message, chatModel); - if (newCondensedText) { - condensedText = newCondensedText; - logWarn( - `[UserMemoryManager] Created inline condensed message for missing entry: "${condensedText}"` - ); - } - } catch (error) { - logError( - `[UserMemoryManager] Failed to create inline condensed message for "${message.message}":`, - error - ); - // Continue processing other messages even if one fails - } - } - - // Only include if we have valid condensed text - if (condensedText && condensedText.trim().length > 0) { - userMessageTexts.push(`- ${condensedText}`); - } - } - - // Generate key conclusions if conversation is substantial enough - const keyConclusionsText = await this.extractKeyConclusion(messages, chatModel); - - let section = `## ${conversationTitle}\n`; + let section = `## ${title}\n`; section += `**Time:** ${timestamp}\n`; - section += `**User Messages:**\n${userMessageTexts.join("\n")}\n`; - - if (keyConclusionsText) { - section += `**Key Conclusions:**\n${keyConclusionsText}\n`; - } + section += `**Summary:** ${summary}\n`; return section; } @@ -272,11 +123,9 @@ Condense the user message into a single concise sentence while preserving intent return; } - // 1. Always extract and save conversation summary to recent conversations + // Extract and save conversation summary to recent conversations const conversationSection = await this.createConversationSection(messages, chatModel); await this.addToMemoryFile(this.getRecentConversationFilePath(), conversationSection); - - // User insights functionality removed - only maintain recent conversations } catch (error) { logError("[UserMemoryManager] Error analyzing chat messages for user memory:", error); } finally { @@ -375,96 +224,81 @@ Condense the user message into a single concise sentence while preserving intent } /** - * Extract key conclusions from conversation if it contains important insights + * Extract JSON content from LLM response, handling cases where JSON is wrapped in code blocks */ - private async extractKeyConclusion( - messages: ChatMessage[], - chatModel: BaseChatModel - ): Promise { - // Only generate key conclusions for conversations with substantial content - const conversationText = messages.map((msg) => `${msg.sender}: ${msg.message}`).join("\n\n"); - - // Skip if conversation is too short or simple - if (conversationText.length < 300) { - return null; + private extractJsonFromResponse(content: string): string { + // First, try to extract JSON from markdown code blocks + const codeBlockMatch = content.match(/```(?:json)?\s*\n?([\s\S]*?)\n?```/); + if (codeBlockMatch) { + return codeBlockMatch[1].trim(); } - const systemPrompt = `You are an AI assistant that analyzes conversations and determines if they contain important conclusions worth remembering. - -TASK: Analyze the conversation and extract key conclusions ONLY if the conversation contains: -- Important insights, decisions, or learnings -- Technical solutions or discoveries -- Significant planning or strategy discussions -- Important facts or knowledge gained - -If the conversation is just casual chat, simple questions, or routine tasks, return "NONE". - -# OUTPUT FORMAT -If there are key conclusions: Return each conclusion as a bullet point (use - for each point). Each conclusion should be concise (1-2 sentences). Use the same language as the conversation. -Example: -- First important insight or decision -- Second key learning or solution -- Third significant conclusion - -If no important conclusions: Return exactly "NONE"`; - - const humanPrompt = `Analyze this conversation and determine if there are key conclusions worth remembering: - -${conversationText}`; - - const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; - - try { - const response = await chatModel.invoke(messages_llm); - const conclusion = response.content.toString().trim(); - - if (conclusion === "NONE" || !conclusion) { - return null; - } - - return conclusion; - } catch (error) { - logError("[UserMemoryManager] Failed to extract key conclusion:", error); - return null; + // If no code block found, look for JSON object pattern + const jsonMatch = content.match(/\{[\s\S]*\}/); + if (jsonMatch) { + return jsonMatch[0]; } + + // Return original content if no patterns match + return content; } /** - * Extract conversation title using LLM + * Extract conversation title and summary using a single LLM call */ - private async extractConversationTitle( + private async extractTitleAndSummary( messages: ChatMessage[], chatModel: BaseChatModel - ): Promise { + ): Promise<{ title: string; summary: string }> { const conversationText = messages.map((msg) => `${msg.sender}: ${msg.message}`).join("\n\n"); - const systemPrompt = `Your task is to generate a title for a conversation based on its content. + const systemPrompt = `Your task is to analyze a conversation and generate both a title and a summary. -Examples: "Travel Plan", "Tokyo Weather" +# OUTPUT FORMAT +You must return your response in the following JSON format: +{ + "title": "Brief 2-8 word title capturing the main user intent", + "summary": "2-3 sentence summary at most including key details (e.g. user facts mentioned entities), and key conclusions if there are any." +} -# OUTPUT RULES -* Look at the conversation content and generate a title that captures the main *user intent* of the conversation. -* Return only the brief 2-8 word title as plain text, no JSON format needed. -* Use the same language as the conversation.`; +# RULES +* Use the same language as the conversation`; - const humanPrompt = ` - + const humanPrompt = ` ${conversationText} -Generate a title for the conversation:`; +Generate a title and summary for this conversation:`; const messages_llm = [new SystemMessage(systemPrompt), new HumanMessage(humanPrompt)]; try { const response = await chatModel.invoke(messages_llm); - const summary = response.content.toString().trim(); - return summary || "Untitled Conversation"; + const content = response.content.toString().trim(); + + // Extract JSON from content, handling code blocks + const jsonContent = this.extractJsonFromResponse(content); + + // Try to parse JSON response + try { + const parsed = JSON.parse(jsonContent); + return { + title: parsed.title || "Untitled Conversation", + summary: parsed.summary || "No summary available", + }; + } catch (parseError) { + logError("[UserMemoryManager] Failed to parse LLM response as JSON:", parseError); + return { + title: "Untitled Conversation", + summary: "Summary generation failed", + }; + } } catch (error) { - logError("[UserMemoryManager] Failed to extract conversation summary:", error); - return "Untitled Conversation"; + logError("[UserMemoryManager] Failed to extract title and summary:", error); + return { + title: "Untitled Conversation", + summary: "Summary generation failed", + }; } } - - // extractUserInsights removed - user insights functionality removed } diff --git a/src/memory/memory-design.md b/src/memory/memory-design.md index 811dc65a1..3660df4e9 100644 --- a/src/memory/memory-design.md +++ b/src/memory/memory-design.md @@ -56,6 +56,7 @@ graph TD - **Conversation Titles**: LLM-extracted titles that capture main user intent - **Key Conclusions**: Only generated for conversations with substantial content (>300 chars) containing insights, decisions, or learnings - **Obsidian-optimized**: Special handling for note names, tags, links, and Obsidian-specific features +- **Robust JSON Parsing**: Handles JSON responses wrapped in code blocks (common with Gemini and other LLMs) with fallback to plain JSON extraction ### Configuration (Current): @@ -75,5 +76,6 @@ graph TD - Fallback mechanisms for AI processing failures - Graceful handling of missing files and folders - Validation of AI-generated content (e.g., ensures condensed messages are actually shorter) +- Robust JSON extraction from LLM responses with multiple parsing strategies (code blocks, inline JSON, fallback to raw content) This simplified design focuses on providing recent conversation context without the complexity of long-term memory management, while maintaining robust AI-powered content processing and configurable retention policies. diff --git a/src/types/message.ts b/src/types/message.ts index bf8d9b477..6b55aaa51 100644 --- a/src/types/message.ts +++ b/src/types/message.ts @@ -44,15 +44,6 @@ export interface ChatMessage { /** Original user input before processing (for LLM messages) */ originalMessage?: string; - /** - * AI-generated one-sentence summary of user messages for memory storage. - * Created asynchronously after user messages to reduce memory footprint while preserving - * core intent and Obsidian-specific features (notes, tags, links). - * Used in constructing "Recent Conversations" for user memory. - * Only applies to user messages when memory is enabled. - */ - condensedUserMessage?: string; - /** Message sender ("user", "AI", etc.) */ sender: string; @@ -91,7 +82,6 @@ export interface StoredMessage { id: string; displayText: string; // What user typed/what AI responded processedText: string; // For user messages: with context added. For AI: same as display - condensedUserMessage?: string; // AI-generated condensed version for memory storage (user messages only) - see condensedUserMessage documentation above sender: string; timestamp: FormattedDateTime; context?: MessageContext; From cbe7b8c408999b04c7962027ae05772654786fe5 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 22:18:23 +0900 Subject: [PATCH 22/32] Better memory prompt and fix conversation parsing --- src/LLMProviders/chainManager.ts | 4 +- src/memory/UserMemoryManager.test.ts | 210 +++++++++++++++++++-------- src/memory/UserMemoryManager.ts | 69 +++++---- src/settings/model.ts | 7 +- 4 files changed, 192 insertions(+), 98 deletions(-) diff --git a/src/LLMProviders/chainManager.ts b/src/LLMProviders/chainManager.ts index e15b546e5..93789bebf 100644 --- a/src/LLMProviders/chainManager.ts +++ b/src/LLMProviders/chainManager.ts @@ -324,9 +324,9 @@ export default class ChainManager { updateLoading?: (loading: boolean) => void; } = {} ) { - const { debug = false, ignoreSystemMessage = false } = options; + const { ignoreSystemMessage = false } = options; - if (debug) console.log("==== Step 0: Initial user message ====\n", userMessage); + logInfo("Step 0: Initial user message:\n", userMessage); this.validateChatModel(); this.validateChainInitialization(); diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index ffe7c0ec5..fb0bfcba9 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -212,68 +212,6 @@ describe("UserMemoryManager", () => { expect.any(Error) ); }); - - it("should handle JSON wrapped in code blocks from Gemini", async () => { - const messages = [createMockMessage("1", "test message")]; - const mockMemoryFile = createMockTFile("copilot/memory/Recent Conversations.md"); - - (ensureFolderExists as jest.Mock).mockResolvedValue(undefined); - mockVault.getAbstractFileByPath.mockReturnValue(mockMemoryFile); - mockVault.read.mockResolvedValue(""); - - // Mock LLM response with JSON wrapped in code blocks (typical Gemini behavior) - const mockResponse = new AIMessageChunk({ - content: `Here's the title and summary for the conversation: - -\`\`\`json -{ - "title": "Code Block Test", - "summary": "This tests JSON extraction from code blocks." -} -\`\`\``, - }); - mockChatModel.invoke.mockResolvedValueOnce(mockResponse); - - await (userMemoryManager as any).updateMemory(messages, mockChatModel); - - // Should successfully extract JSON from code block - const modifyCall = mockVault.modify.mock.calls[0]; - const actualContent = modifyCall[1]; - - expect(actualContent).toContain("## Code Block Test"); - expect(actualContent).toContain("**Summary:** This tests JSON extraction from code blocks."); - }); - - it("should handle JSON wrapped in unmarked code blocks", async () => { - const messages = [createMockMessage("1", "test message")]; - const mockMemoryFile = createMockTFile("copilot/memory/Recent Conversations.md"); - - (ensureFolderExists as jest.Mock).mockResolvedValue(undefined); - mockVault.getAbstractFileByPath.mockReturnValue(mockMemoryFile); - mockVault.read.mockResolvedValue(""); - - // Mock LLM response with JSON in unmarked code blocks - const mockResponse = new AIMessageChunk({ - content: `\`\`\` -{ - "title": "Unmarked Block Test", - "summary": "This tests JSON extraction from unmarked code blocks." -} -\`\`\``, - }); - mockChatModel.invoke.mockResolvedValueOnce(mockResponse); - - await (userMemoryManager as any).updateMemory(messages, mockChatModel); - - // Should successfully extract JSON from unmarked code block - const modifyCall = mockVault.modify.mock.calls[0]; - const actualContent = modifyCall[1]; - - expect(actualContent).toContain("## Unmarked Block Test"); - expect(actualContent).toContain( - "**Summary:** This tests JSON extraction from unmarked code blocks." - ); - }); }); describe("extractJsonFromResponse", () => { @@ -335,6 +273,149 @@ That's the JSON data.`; }); }); + describe("parseExistingConversations", () => { + it("should return empty array for empty string", () => { + const result = (userMemoryManager as any).parseExistingConversations(""); + expect(result).toEqual([]); + }); + + it("should return empty array for content with no H2 sections", () => { + const content = `This is some content without H2 headers. +It has multiple lines but no conversations. +# This is H1, not H2 +### This is H3, not H2`; + + const result = (userMemoryManager as any).parseExistingConversations(content); + expect(result).toEqual([]); + }); + + it("should extract single conversation section", () => { + const content = `## Daily Note Template Setup +**Time:** 2024-01-01T10:00:00Z +**Summary:** User asked about creating daily note templates with automatic date formatting.`; + + const result = (userMemoryManager as any).parseExistingConversations(content); + expect(result).toEqual([ + `## Daily Note Template Setup +**Time:** 2024-01-01T10:00:00Z +**Summary:** User asked about creating daily note templates with automatic date formatting.`, + ]); + }); + + it("should extract multiple conversation sections", () => { + const content = `## First Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** User asked about plugin installation. + +## Second Conversation +**Time:** 2024-01-01T10:00:00Z +**Summary:** User inquired about linking notes. + +## Third Conversation +**Time:** 2024-01-01T11:00:00Z +**Summary:** User learned about backlinks.`; + + const result = (userMemoryManager as any).parseExistingConversations(content); + expect(result).toEqual([ + `## First Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** User asked about plugin installation.`, + `## Second Conversation +**Time:** 2024-01-01T10:00:00Z +**Summary:** User inquired about linking notes.`, + `## Third Conversation +**Time:** 2024-01-01T11:00:00Z +**Summary:** User learned about backlinks.`, + ]); + }); + + it("should ignore content before the first H2 section", () => { + const content = `This is some introductory text that should be ignored. +It might contain important information, but it's before the first conversation. + +## First Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** This conversation should be included. + +## Second Conversation +**Time:** 2024-01-01T10:00:00Z +**Summary:** This conversation should also be included.`; + + const result = (userMemoryManager as any).parseExistingConversations(content); + expect(result).toEqual([ + `## First Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** This conversation should be included.`, + `## Second Conversation +**Time:** 2024-01-01T10:00:00Z +**Summary:** This conversation should also be included.`, + ]); + }); + + it("should handle conversations with extra whitespace and trim them", () => { + const content = ` ## First Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** User asked about plugin installation. + + ## Second Conversation +**Time:** 2024-01-01T10:00:00Z +**Summary:** User inquired about linking notes. `; + + const result = (userMemoryManager as any).parseExistingConversations(content); + expect(result).toEqual([ + `## First Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** User asked about plugin installation.`, + `## Second Conversation +**Time:** 2024-01-01T10:00:00Z +**Summary:** User inquired about linking notes.`, + ]); + }); + + it("should handle conversation sections with complex multi-line content", () => { + const content = `## Complex Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** User asked about multiple topics including: +- How to create templates +- How to use variables +- How to set up automation + +The conversation covered advanced features and included code examples. + +## Another Conversation +**Time:** 2024-01-01T10:00:00Z +**Summary:** Short summary.`; + + const result = (userMemoryManager as any).parseExistingConversations(content); + expect(result).toEqual([ + `## Complex Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** User asked about multiple topics including: +- How to create templates +- How to use variables +- How to set up automation + +The conversation covered advanced features and included code examples.`, + `## Another Conversation +**Time:** 2024-01-01T10:00:00Z +**Summary:** Short summary.`, + ]); + }); + + it("should handle conversation at end of file without trailing newlines", () => { + const content = `## Only Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** This is the only conversation and it's at the end.`; + + const result = (userMemoryManager as any).parseExistingConversations(content); + expect(result).toEqual([ + `## Only Conversation +**Time:** 2024-01-01T09:00:00Z +**Summary:** This is the only conversation and it's at the end.`, + ]); + }); + }); + describe("getUserMemoryPrompt", () => { it("should return memory prompt when recent conversations exist", async () => { const mockFile = createMockTFile("copilot/memory/Recent Conversations.md"); @@ -346,7 +427,10 @@ That's the JSON data.`; const result = await userMemoryManager.getUserMemoryPrompt(); - expect(result).toBe(`\n${mockContent}\n`); + expect(result).toContain(mockContent); + expect(result).toContain(""); + expect(result).toContain(""); + expect(result).toContain("Above is the recent conversations between you and the user"); }); it("should return null when no memory content exists", async () => { diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index f7d44352b..0a08015d0 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -71,7 +71,17 @@ export class UserMemoryManager { let memoryPrompt = ""; if (this.recentConversationsContent) { - memoryPrompt += `\n${this.recentConversationsContent}\n`; + memoryPrompt += ` + + ${this.recentConversationsContent} + + + Above is the recent conversations between you and the user. + You can use it to provide more context for your responses. + Only use the recent conversations if they are relevant to the current conversation. + + The current time is ${this.getTimestamp()}. + `; } return memoryPrompt.length > 0 ? memoryPrompt : null; @@ -81,6 +91,10 @@ export class UserMemoryManager { } } + private getTimestamp(): string { + return new Date().toISOString().split(".")[0] + "Z"; // Remove milliseconds but keep Z for UTC + } + /** * Create a conversation section from messages and return it in Markdown format */ @@ -89,7 +103,7 @@ export class UserMemoryManager { chatModel: BaseChatModel ): Promise { const { title, summary } = await this.extractTitleAndSummary(messages, chatModel); - const timestamp = new Date().toISOString().split(".")[0] + "Z"; // Remove milliseconds but keep Z for UTC + const timestamp = this.getTimestamp(); let section = `## ${title}\n`; section += `**Time:** ${timestamp}\n`; @@ -162,8 +176,8 @@ export class UserMemoryManager { let updatedContent: string; if (fileContent.trim() === "") { - // Create new file without header - updatedContent = `${newConversationSection}\n`; + // Create new file with a single trailing newline + updatedContent = `${newConversationSection.trim()}\n`; } else { // Parse existing conversations and add new one const conversations = this.parseExistingConversations(fileContent); @@ -176,13 +190,15 @@ export class UserMemoryManager { conversations.splice(0, conversations.length - maxConversations); } - updatedContent = `${conversations.join("\n")}\n`; + // Normalize sections to avoid extra blank lines, then separate with exactly one blank line + const normalized = conversations.map((s) => s.trim()); + updatedContent = `${normalized.join("\n\n")}\n`; } await this.app.vault.modify(existingFile, updatedContent); } else { // Create new file - const initialContent = `${newConversationSection}\n`; + const initialContent = `${newConversationSection.trim()}\n`; await this.app.vault.create(filePath, initialContent); } } @@ -191,33 +207,28 @@ export class UserMemoryManager { * Parse existing conversations from file content */ private parseExistingConversations(content: string): string[] { + const lines = content.split("\n"); const conversations: string[] = []; + let currentConversation: string[] = []; - // Remove any old header if it exists - const cleanContent = content.replace(/^# Recent Conversations\s*\n\n?/m, "").trim(); - - // Split by ## headings to get individual conversations - const sections = cleanContent.split(/^## /m); - - if (sections.length === 1 && sections[0].trim()) { - // Content doesn't start with ##, but has content - if (sections[0].trim().startsWith("##")) { - conversations.push(sections[0].trim()); - } else { - // Find any ## sections in the content - const matches = cleanContent.match(/^## [\s\S]+?(?=^## |$)/gm); - if (matches) { - conversations.push(...matches.map((match) => match.trim())); - } - } - } else { - for (let i = 1; i < sections.length; i++) { - // Skip the first section (before first ##) - const section = `## ${sections[i]}`.trim(); - if (section.length > 0) { - conversations.push(section); + for (const line of lines) { + if (line.trim().startsWith("## ")) { + // Start of a new conversation - save the previous one if it exists + if (currentConversation.length > 0) { + conversations.push(currentConversation.join("\n").trim()); } + // Start new conversation with this header + currentConversation = [line]; + } else if (currentConversation.length > 0) { + // Add line to current conversation if we're inside one + currentConversation.push(line); } + // Ignore lines before the first ## header + } + + // Add the last conversation if it exists + if (currentConversation.length > 0) { + conversations.push(currentConversation.join("\n").trim()); } return conversations; diff --git a/src/settings/model.ts b/src/settings/model.ts index bd1aff766..f34d2f218 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -384,10 +384,9 @@ export async function getSystemPromptWithMemory( return systemPrompt; } - return `${systemPrompt} - - ${memoryPrompt} - `; + return `${memoryPrompt} + ${systemPrompt} + `; } function mergeAllActiveModelsWithCoreModels(settings: CopilotSettings): CopilotSettings { From 502d57a44b61d3f6bf7412a25ad70798a434ee0b Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 22:36:33 +0900 Subject: [PATCH 23/32] Rename memory functions --- src/components/Chat.tsx | 2 +- src/main.ts | 2 +- src/memory/UserMemoryManager.test.ts | 6 +++--- src/memory/UserMemoryManager.ts | 4 ++-- src/memory/memory-design.md | 4 ++-- src/settings/SettingsPage.tsx | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index 2d57e46f7..526d725cc 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -543,7 +543,7 @@ const Chat: React.FC = ({ try { // Get the current chat model from the chain manager const chatModel = chainManager.chatModelManager.getChatModel(); - plugin.userMemoryManager.updateUserMemory(chatUIState.getMessages(), chatModel); + plugin.userMemoryManager.addRecentConversation(chatUIState.getMessages(), chatModel); } catch (error) { logInfo("Failed to analyze chat messages for memory:", error); } diff --git a/src/main.ts b/src/main.ts index 965add55b..c2748169f 100644 --- a/src/main.ts +++ b/src/main.ts @@ -392,7 +392,7 @@ export default class CopilotPlugin extends Plugin { // Get the current chat model from the chain manager const chainManager = this.projectManager.getCurrentChainManager(); const chatModel = chainManager.chatModelManager.getChatModel(); - this.userMemoryManager.updateUserMemory(this.chatUIState.getMessages(), chatModel); + this.userMemoryManager.addRecentConversation(this.chatUIState.getMessages(), chatModel); } catch (error) { logInfo("Failed to analyze chat messages for memory:", error); } diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index fb0bfcba9..16946d4e6 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -74,7 +74,7 @@ describe("UserMemoryManager", () => { userMemoryManager = new UserMemoryManager(mockApp); }); - describe("updateUserMemory", () => { + describe("addRecentConversation", () => { const createMockMessage = ( id: string, message: string, @@ -91,7 +91,7 @@ describe("UserMemoryManager", () => { mockSettings.enableMemory = false; const messages = [createMockMessage("1", "test message")]; - userMemoryManager.updateUserMemory(messages, mockChatModel); + userMemoryManager.addRecentConversation(messages, mockChatModel); expect(logInfo).toHaveBeenCalledWith( "[UserMemoryManager] Recent history referencing is disabled, skipping analysis" @@ -99,7 +99,7 @@ describe("UserMemoryManager", () => { }); it("should skip memory update when no messages provided", () => { - userMemoryManager.updateUserMemory([], mockChatModel); + userMemoryManager.addRecentConversation([], mockChatModel); expect(logInfo).toHaveBeenCalledWith( "[UserMemoryManager] No messages to analyze for user memory" diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 0a08015d0..16e18e3d1 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -39,9 +39,9 @@ export class UserMemoryManager { } /** - * Runs the user memory operation in the background without blocking execution + * Adds a recent conversation to user memory storage in the background without blocking execution */ - updateUserMemory(messages: ChatMessage[], chatModel?: BaseChatModel): void { + addRecentConversation(messages: ChatMessage[], chatModel?: BaseChatModel): void { const settings = getSettings(); // Only proceed if memory is enabled diff --git a/src/memory/memory-design.md b/src/memory/memory-design.md index 3660df4e9..e2dfd16ce 100644 --- a/src/memory/memory-design.md +++ b/src/memory/memory-design.md @@ -9,7 +9,7 @@ Current design for how the user memory system works in Obsidian Copilot, focusin ```mermaid graph TD %% Triggers for Memory Updates - A[Chat Conversation Ends] --> B[updateUserMemory called] + A[Chat Conversation Ends] --> B[addRecentConversation called] B --> C{Memory Enabled?} C -->|Yes| D[Process Messages for Memory Storage] C -->|No| Z[Skip Memory Update] @@ -32,7 +32,7 @@ graph TD ### Memory Update Triggers: -- **Trigger**: When a chat conversation ends and `updateUserMemory()` is called +- **Trigger**: When a chat conversation ends and `addRecentConversation()` is called - **Guard**: Only if `enableMemory` setting (Reference Recent History) is enabled - **Fire-and-forget**: Runs asynchronously in background without blocking execution - **Race condition protection**: Prevents multiple simultaneous memory updates diff --git a/src/settings/SettingsPage.tsx b/src/settings/SettingsPage.tsx index 5b80d2334..002fd1b5d 100644 --- a/src/settings/SettingsPage.tsx +++ b/src/settings/SettingsPage.tsx @@ -27,7 +27,7 @@ export class CopilotSettingTab extends PluginSettingTab { // Get the current chat model from the chain manager const chainManager = this.plugin.projectManager.getCurrentChainManager(); const chatModel = chainManager.chatModelManager.getChatModel(); - this.plugin.userMemoryManager.updateUserMemory( + this.plugin.userMemoryManager.addRecentConversation( this.plugin.chatUIState.getMessages(), chatModel ); From 1765366993f25b44c5a21423d95b4dddf5c4f9a9 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 23:24:41 +0900 Subject: [PATCH 24/32] Implement memory tool. --- src/components/Chat.tsx | 4 +- src/constants.ts | 4 +- src/main.ts | 2 +- src/memory/UserMemoryManager.test.ts | 103 +++++++++++++++- src/memory/UserMemoryManager.ts | 111 ++++++++++++++++-- src/memory/memory-design.md | 4 +- src/settings/SettingsPage.tsx | 2 +- src/settings/model.ts | 17 ++- .../v2/components/CopilotPlusSettings.tsx | 16 ++- src/tools/ToolRegistry.ts | 2 +- src/tools/builtinTools.ts | 45 ++++++- src/tools/memoryTools.ts | 49 ++++++++ 12 files changed, 331 insertions(+), 28 deletions(-) create mode 100644 src/tools/memoryTools.ts diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index 526d725cc..98578d5a6 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -539,7 +539,7 @@ const Chat: React.FC = ({ handleStopGenerating(ABORT_REASON.NEW_CHAT); // Analyze chat messages for memory if enabled - if (settings.enableMemory) { + if (settings.enableRecentConversations) { try { // Get the current chat model from the chain manager const chatModel = chainManager.chatModelManager.getChatModel(); @@ -573,7 +573,7 @@ const Chat: React.FC = ({ chainManager.chatModelManager, chatUIState, settings.autosaveChat, - settings.enableMemory, + settings.enableRecentConversations, settings.includeActiveNoteAsContext, selectedChain, handleSaveAsNote, diff --git a/src/constants.ts b/src/constants.ts index ec6b44fea..cbc579c34 100644 --- a/src/constants.ts +++ b/src/constants.ts @@ -751,12 +751,14 @@ export const DEFAULT_SETTINGS: CopilotSettings = { "youtubeTranscription", "writeToFile", "replaceInFile", + "memoryTool", ], reasoningEffort: DEFAULT_MODEL_SETTING.REASONING_EFFORT, verbosity: DEFAULT_MODEL_SETTING.VERBOSITY, memoryFolderName: "copilot/memory", - enableMemory: false, + enableRecentConversations: false, maxRecentConversations: 30, + enableSavedMemory: false, }; export const EVENT_NAMES = { diff --git a/src/main.ts b/src/main.ts index c2748169f..6e7a1b775 100644 --- a/src/main.ts +++ b/src/main.ts @@ -387,7 +387,7 @@ export default class CopilotPlugin extends Plugin { async handleNewChat() { // Analyze chat messages for memory if enabled - if (getSettings().enableMemory) { + if (getSettings().enableRecentConversations) { try { // Get the current chat model from the chain manager const chainManager = this.projectManager.getCurrentChainManager(); diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index 16946d4e6..e78a60d52 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -43,7 +43,8 @@ describe("UserMemoryManager", () => { // Mock settings mockSettings = { - enableMemory: true, + enableRecentConversations: true, + enableSavedMemory: true, memoryFolderName: "copilot/memory", maxRecentConversations: 30, }; @@ -88,7 +89,7 @@ describe("UserMemoryManager", () => { }); it("should skip memory update when memory is disabled", () => { - mockSettings.enableMemory = false; + mockSettings.enableRecentConversations = false; const messages = [createMockMessage("1", "test message")]; userMemoryManager.addRecentConversation(messages, mockChatModel); @@ -450,7 +451,103 @@ The conversation covered advanced features and included code examples.`, expect(result).toBeNull(); expect(logError).toHaveBeenCalledWith( - "[UserMemoryManager] Error reading recent conversations file:", + "[UserMemoryManager] Error reading memory files:", + expect.any(Error) + ); + }); + }); + + describe("addSavedMemory", () => { + it("should skip saving when saved memory is disabled", async () => { + mockSettings.enableSavedMemory = false; + + await userMemoryManager.addSavedMemory("Test memory content"); + + expect(logInfo).toHaveBeenCalledWith( + "[UserMemoryManager] Saved memory is disabled, skipping save" + ); + }); + + it("should skip saving when no content provided", async () => { + mockSettings.enableSavedMemory = true; + + await userMemoryManager.addSavedMemory(""); + + expect(logInfo).toHaveBeenCalledWith( + "[UserMemoryManager] No content provided for saved memory" + ); + }); + + it("should save memory content to Saved Memories file", async () => { + mockSettings.enableSavedMemory = true; + + // Mock ensureFolderExists to resolve successfully + (ensureFolderExists as jest.Mock).mockResolvedValue(undefined); + + // Mock no existing file (new file creation) + mockVault.getAbstractFileByPath.mockReturnValue(null); + + // Mock file creation + const mockNewFile = createMockTFile("copilot/memory/Saved Memories.md"); + mockVault.create.mockResolvedValue(mockNewFile); + + await userMemoryManager.addSavedMemory("Important user preference: I prefer dark mode"); + + // Verify folder creation was called + expect(ensureFolderExists).toHaveBeenCalledWith("copilot/memory"); + + // Verify file creation was called with proper content + expect(mockVault.create).toHaveBeenCalledWith( + "copilot/memory/Saved Memories.md", + expect.stringContaining("- Important user preference: I prefer dark mode") + ); + + expect(logInfo).toHaveBeenCalledWith("[UserMemoryManager] Saved memory added successfully"); + }); + + it("should append to existing Saved Memories file", async () => { + mockSettings.enableSavedMemory = true; + + const existingContent = `- Previous memory content +- Another important fact +`; + + const mockMemoryFile = createMockTFile("copilot/memory/Saved Memories.md"); + + // Mock ensureFolderExists to resolve successfully + (ensureFolderExists as jest.Mock).mockResolvedValue(undefined); + + // Mock existing file + mockVault.getAbstractFileByPath.mockReturnValue(mockMemoryFile); + mockVault.read.mockResolvedValue(existingContent); + + await userMemoryManager.addSavedMemory("New important information"); + + // Verify file modification was called with appended content + expect(mockVault.modify).toHaveBeenCalledWith( + mockMemoryFile, + expect.stringContaining("- Previous memory content") + ); + expect(mockVault.modify).toHaveBeenCalledWith( + mockMemoryFile, + expect.stringContaining("- New important information") + ); + + expect(logInfo).toHaveBeenCalledWith("[UserMemoryManager] Saved memory added successfully"); + }); + + it("should handle errors during save operation", async () => { + mockSettings.enableSavedMemory = true; + + // Mock ensureFolderExists to reject + (ensureFolderExists as jest.Mock).mockRejectedValue(new Error("Folder creation failed")); + + await expect(userMemoryManager.addSavedMemory("Test content")).rejects.toThrow( + "Folder creation failed" + ); + + expect(logError).toHaveBeenCalledWith( + "[UserMemoryManager] Error saving memory:", expect.any(Error) ); }); diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 16e18e3d1..5b75d0677 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -14,6 +14,7 @@ import { HumanMessage, SystemMessage } from "@langchain/core/messages"; export class UserMemoryManager { private app: App; private recentConversationsContent: string = ""; + private savedMemoriesContent: string = ""; private isUpdatingMemory: boolean = false; constructor(app: App) { @@ -25,6 +26,7 @@ export class UserMemoryManager { */ private async loadMemory(): Promise { try { + // Load recent conversations const recentConversationsFile = this.app.vault.getAbstractFileByPath( this.getRecentConversationFilePath() ); @@ -33,8 +35,18 @@ export class UserMemoryManager { } else { logInfo("[UserMemoryManager] Recent Conversations file not found, skipping memory load"); } + + // Load saved memories + const savedMemoriesFile = this.app.vault.getAbstractFileByPath( + this.getSavedMemoriesFilePath() + ); + if (savedMemoriesFile instanceof TFile) { + this.savedMemoriesContent = await this.app.vault.read(savedMemoriesFile); + } else { + logInfo("[UserMemoryManager] Saved Memories file not found, skipping saved memory load"); + } } catch (error) { - logError("[UserMemoryManager] Error reading recent conversations file:", error); + logError("[UserMemoryManager] Error reading memory files:", error); } } @@ -45,7 +57,7 @@ export class UserMemoryManager { const settings = getSettings(); // Only proceed if memory is enabled - if (!settings.enableMemory) { + if (!settings.enableRecentConversations) { logInfo("[UserMemoryManager] Recent history referencing is disabled, skipping analysis"); return; } @@ -61,6 +73,40 @@ export class UserMemoryManager { }); } + /** + * Adds a saved memory that the user explicitly asked to remember + */ + async addSavedMemory(memoryContent: string): Promise { + const settings = getSettings(); + + // Only proceed if saved memory is enabled + if (!settings.enableSavedMemory) { + logInfo("[UserMemoryManager] Saved memory is disabled, skipping save"); + return; + } + + if (!memoryContent || memoryContent.trim() === "") { + logInfo("[UserMemoryManager] No content provided for saved memory"); + return; + } + + try { + // Ensure user memory folder exists + await this.ensureMemoryFolderExists(); + + // Create memory entry as a bullet point + const memoryEntry = `- ${memoryContent.trim()}`; + + // Add to saved memories file + await this.addToSavedMemoryFile(this.getSavedMemoriesFilePath(), memoryEntry); + + logInfo("[UserMemoryManager] Saved memory added successfully"); + } catch (error) { + logError("[UserMemoryManager] Error saving memory:", error); + throw error; + } + } + /** * Get user memory prompt */ @@ -68,19 +114,32 @@ export class UserMemoryManager { await this.loadMemory(); try { + const settings = getSettings(); let memoryPrompt = ""; - if (this.recentConversationsContent) { + // Add recent conversations if enabled + if (settings.enableRecentConversations && this.recentConversationsContent) { memoryPrompt += ` ${this.recentConversationsContent} - Above is the recent conversations between you and the user. + The current time is ${this.getTimestamp()}. + are the recent conversations between you and the user. You can use it to provide more context for your responses. Only use the recent conversations if they are relevant to the current conversation. + `; + } - The current time is ${this.getTimestamp()}. + // Add saved memories if enabled + if (settings.enableSavedMemory && this.savedMemoriesContent) { + memoryPrompt += ` + + ${this.savedMemoriesContent} + + + are important memories that the user explicitly asked you to remember. + Use these memories to provide more personalized and contextually relevant responses. `; } @@ -139,7 +198,10 @@ export class UserMemoryManager { // Extract and save conversation summary to recent conversations const conversationSection = await this.createConversationSection(messages, chatModel); - await this.addToMemoryFile(this.getRecentConversationFilePath(), conversationSection); + await this.addToRecentConversationsFile( + this.getRecentConversationFilePath(), + conversationSection + ); } catch (error) { logError("[UserMemoryManager] Error analyzing chat messages for user memory:", error); } finally { @@ -162,11 +224,46 @@ export class UserMemoryManager { return `${settings.memoryFolderName}/Recent Conversations.md`; } + public getSavedMemoriesFilePath(): string { + const settings = getSettings(); + return `${settings.memoryFolderName}/Saved Memories.md`; + } + + /** + * Save content to saved memory file by appending new entry (no max limit) + */ + private async addToSavedMemoryFile(filePath: string, newMemoryEntry: string): Promise { + const existingFile = this.app.vault.getAbstractFileByPath(filePath); + + if (existingFile instanceof TFile) { + // Read existing content and append new entry + const fileContent = await this.app.vault.read(existingFile); + + let updatedContent: string; + if (fileContent.trim() === "") { + // Create new file with the entry + updatedContent = `${newMemoryEntry}\n`; + } else { + // Append to existing content + updatedContent = `${fileContent.trimEnd()}\n${newMemoryEntry}\n`; + } + + await this.app.vault.modify(existingFile, updatedContent); + } else { + // Create new file + const initialContent = `${newMemoryEntry}\n`; + await this.app.vault.create(filePath, initialContent); + } + } + /** * Save content to the user memory file by appending new conversation section * Maintains a rolling buffer of conversations by removing the oldest when limit is exceeded */ - private async addToMemoryFile(filePath: string, newConversationSection: string): Promise { + private async addToRecentConversationsFile( + filePath: string, + newConversationSection: string + ): Promise { const existingFile = this.app.vault.getAbstractFileByPath(filePath); if (existingFile instanceof TFile) { diff --git a/src/memory/memory-design.md b/src/memory/memory-design.md index e2dfd16ce..cadc3e73d 100644 --- a/src/memory/memory-design.md +++ b/src/memory/memory-design.md @@ -33,7 +33,7 @@ graph TD ### Memory Update Triggers: - **Trigger**: When a chat conversation ends and `addRecentConversation()` is called -- **Guard**: Only if `enableMemory` setting (Reference Recent History) is enabled +- **Guard**: Only if `enableRecentConversations` setting (Reference Recent History) is enabled - **Fire-and-forget**: Runs asynchronously in background without blocking execution - **Race condition protection**: Prevents multiple simultaneous memory updates @@ -60,7 +60,7 @@ graph TD ### Configuration (Current): -- **`enableMemory`**: Master switch for all recent history referencing functionality +- **`enableRecentConversations`**: Master switch for all recent history referencing functionality - **`memoryFolderName`**: Folder where memory files are stored (creates recursively if needed) - **`maxRecentConversations`**: Number of conversations to keep (10-50 range, default: 30) diff --git a/src/settings/SettingsPage.tsx b/src/settings/SettingsPage.tsx index 002fd1b5d..88c6b9203 100644 --- a/src/settings/SettingsPage.tsx +++ b/src/settings/SettingsPage.tsx @@ -22,7 +22,7 @@ export class CopilotSettingTab extends PluginSettingTab { const chatView = this.app.workspace.getLeavesOfType(CHAT_VIEWTYPE)[0]?.view as CopilotView; // Analyze chat messages for memory if enabled - if (chatView && getSettings().enableMemory) { + if (chatView && getSettings().enableRecentConversations) { try { // Get the current chat model from the chain manager const chainManager = this.plugin.projectManager.getCurrentChainManager(); diff --git a/src/settings/model.ts b/src/settings/model.ts index f34d2f218..b33dc55a4 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -139,9 +139,11 @@ export interface CopilotSettings { /** Folder where memory data is stored */ memoryFolderName: string; /** Reference recent conversation history to provide more contextually relevant responses */ - enableMemory: boolean; + enableRecentConversations: boolean; /** Maximum number of recent conversations to remember (10-50) */ maxRecentConversations: number; + /** Reference saved memories that user explicitly asked to remember */ + enableSavedMemory: boolean; } export const settingsStore = createStore(); @@ -324,9 +326,14 @@ export function sanitizeSettings(settings: CopilotSettings): CopilotSettings { sanitizedSettings.memoryFolderName = DEFAULT_SETTINGS.memoryFolderName; } - // Ensure enableMemory has a default value - if (typeof sanitizedSettings.enableMemory !== "boolean") { - sanitizedSettings.enableMemory = DEFAULT_SETTINGS.enableMemory; + // Ensure enableRecentConversations has a default value + if (typeof sanitizedSettings.enableRecentConversations !== "boolean") { + sanitizedSettings.enableRecentConversations = DEFAULT_SETTINGS.enableRecentConversations; + } + + // Ensure enableSavedMemory has a default value + if (typeof sanitizedSettings.enableSavedMemory !== "boolean") { + sanitizedSettings.enableSavedMemory = DEFAULT_SETTINGS.enableSavedMemory; } // Ensure maxRecentConversations has a valid value (10-50 range) @@ -369,7 +376,7 @@ export async function getSystemPromptWithMemory( // Check if memory is enabled in settings const settings = getSettings(); - if (!settings.enableMemory) { + if (!settings.enableRecentConversations) { return systemPrompt; } diff --git a/src/settings/v2/components/CopilotPlusSettings.tsx b/src/settings/v2/components/CopilotPlusSettings.tsx index 6f3220162..848bc56b8 100644 --- a/src/settings/v2/components/CopilotPlusSettings.tsx +++ b/src/settings/v2/components/CopilotPlusSettings.tsx @@ -114,13 +114,23 @@ export const CopilotPlusSettings: React.FC = () => { type="switch" title="Reference Recent Conversation" description="When enabled, Copilot references your recent conversation history to provide more contextually relevant responses. All history data is stored locally in your vault." - checked={settings.enableMemory} + checked={settings.enableRecentConversations} onCheckedChange={(checked) => { - updateSetting("enableMemory", checked); + updateSetting("enableRecentConversations", checked); }} /> - {settings.enableMemory && ( + { + updateSetting("enableSavedMemory", checked); + }} + /> + + {settings.enableRecentConversations && ( +memoryTool +User's favorite programming language is Python and they prefer functional programming style +`, + }, + }); +} + /** * Initialize all built-in tools in the registry. * This function registers tool definitions, not user preferences. @@ -298,12 +327,19 @@ Example usage: */ export function initializeBuiltinTools(vault?: Vault): void { const registry = ToolRegistry.getInstance(); + const settings = getSettings(); - // Only reinitialize if tools have changed or vault status has changed + // Only reinitialize if tools have changed or vault/memory status has changed const hasFileTree = registry.getToolMetadata("getFileTree") !== undefined; const shouldHaveFileTree = vault !== undefined; + const hasMemoryTool = registry.getToolMetadata("memoryTool") !== undefined; + const shouldHaveMemoryTool = settings.enableSavedMemory; - if (registry.getAllTools().length === 0 || hasFileTree !== shouldHaveFileTree) { + if ( + registry.getAllTools().length === 0 || + hasFileTree !== shouldHaveFileTree || + hasMemoryTool !== shouldHaveMemoryTool + ) { // Clear any existing tools registry.clear(); @@ -314,5 +350,10 @@ export function initializeBuiltinTools(vault?: Vault): void { if (vault) { registerFileTreeTool(vault); } + + // Register memory tool if saved memory is enabled + if (settings.enableSavedMemory) { + registerMemoryTool(); + } } } diff --git a/src/tools/memoryTools.ts b/src/tools/memoryTools.ts new file mode 100644 index 000000000..6340e7168 --- /dev/null +++ b/src/tools/memoryTools.ts @@ -0,0 +1,49 @@ +import { z } from "zod"; +import { createTool, SimpleTool } from "./SimpleTool"; +import { UserMemoryManager } from "@/memory/UserMemoryManager"; +import { logInfo, logError } from "@/logger"; +import { Notice } from "obsidian"; + +// Define Zod schema for memoryTool +const memorySchema = z.object({ + memoryContent: z + .string() + .min(1) + .describe( + "The content to save to user's memory (information the user explicitly asked to remember)" + ), +}); + +/** + * Memory tool for saving information that the user explicitly asks the assistant to remember + */ +export const memoryTool: SimpleTool = + createTool({ + name: "memoryTool", + description: + "Save information to user memory when the user explicitly asks to remember something", + schema: memorySchema, + handler: async ({ memoryContent }) => { + try { + const memoryManager = new UserMemoryManager(app); + await memoryManager.addSavedMemory(memoryContent); + + logInfo(`[memoryTool] Successfully saved memory: ${memoryContent.substring(0, 100)}...`); + + // Notice the user that the memory has been saved + new Notice(`Memory saved successfully!`); + + return { + success: true, + message: `Memory saved successfully: ${memoryContent}`, + }; + } catch (error) { + logError("[memoryTool] Error saving memory:", error); + + return { + success: false, + message: `Failed to save memory: ${error.message}`, + }; + } + }, + }); From 3da77b4ddf247c2f0b659e1e5f912ebf4dd36d9a Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 23:27:11 +0900 Subject: [PATCH 25/32] Update design doc --- src/memory/memory-design.md | 110 +++++++++++++++++++++++++----------- 1 file changed, 78 insertions(+), 32 deletions(-) diff --git a/src/memory/memory-design.md b/src/memory/memory-design.md index cadc3e73d..58e0b8442 100644 --- a/src/memory/memory-design.md +++ b/src/memory/memory-design.md @@ -2,72 +2,110 @@ ## Overview -Current design for how the user memory system works in Obsidian Copilot, focusing on recent conversation memory only. Long-term memory features like user insights have been removed to simplify the system. +Current design for how the user memory system works in Obsidian Copilot, featuring two distinct memory types: + +1. **Recent Conversations**: Automatic background processing of chat history for context +2. **Saved Memories**: Explicit user-initiated memory storage for important information ## Flow Diagram ```mermaid graph TD - %% Triggers for Memory Updates + %% Recent Conversations Flow A[Chat Conversation Ends] --> B[addRecentConversation called] - B --> C{Memory Enabled?} + B --> C{enableRecentConversations?} C -->|Yes| D[Process Messages for Memory Storage] - C -->|No| Z[Skip Memory Update] - - %% Message Processing - D --> E[Extract Conversation Title using LLM] - E --> F[Extract Key Conclusions if substantial content] - F --> G[Create Conversation Section in Markdown using existing condensed messages] - - %% Storage and Rolling Buffer - H --> I[Load Existing Conversations from File] - I --> J[Add New Conversation Section] - J --> K[Apply Rolling Buffer Policy] - K --> L[Keep last maxRecentConversations] - L --> M[Save to Recent Conversations.md] - M --> N[Memory Update Complete] + C -->|No| Z1[Skip Recent Memory Update] + + D --> E[Extract Conversation Title & Summary using LLM] + E --> F[Create Conversation Section in Markdown] + F --> G[Load Existing Conversations from File] + G --> H[Add New Conversation Section] + H --> I[Apply Rolling Buffer Policy] + I --> J[Keep last maxRecentConversations] + J --> K[Save to Recent Conversations.md] + K --> L[Recent Memory Update Complete] + + %% Saved Memory Flow + M[User Explicitly Asks to Remember] --> N[memoryTool called] + N --> O{enableSavedMemory?} + O -->|Yes| P[Extract Memory Content] + O -->|No| Z2[Skip Saved Memory] + P --> Q[Format as Bullet Point] + Q --> R[Append to Saved Memories.md] + R --> S[Saved Memory Complete] + + %% Memory Retrieval + T[LLM Request] --> U[getUserMemoryPrompt called] + U --> V[Load Recent Conversations] + U --> W[Load Saved Memories] + V --> X[Combine Memory Sections] + W --> X + X --> Y[Return Memory Context for LLM] ``` ## Key Points ### Memory Update Triggers: +**Recent Conversations:** + - **Trigger**: When a chat conversation ends and `addRecentConversation()` is called -- **Guard**: Only if `enableRecentConversations` setting (Reference Recent History) is enabled +- **Guard**: Only if `enableRecentConversations` setting is enabled - **Fire-and-forget**: Runs asynchronously in background without blocking execution - **Race condition protection**: Prevents multiple simultaneous memory updates -### Recent Conversations (Current Implementation): +**Saved Memories:** + +- **Trigger**: When user explicitly asks to remember something during chat and `memoryTool` is called +- **Guard**: Only if `enableSavedMemory` setting is enabled +- **Immediate**: Saves directly to file when invoked +- **User notification**: Shows success/failure notice to user + +### Recent Conversations: - **When**: Updated after every conversation - **Retention policy**: Configurable rolling buffer - keeps last `maxRecentConversations` (default: 30, range: 10-50) - **Content**: - Timestamp (ISO format with UTC) - LLM-generated conversation title (2-8 words) - - Condensed user messages (AI-generated one-line summaries created during conversation) - - Optional key conclusions (only for substantial conversations >300 chars) + - LLM-generated summary (2-3 sentences with key details and conclusions) - **Format**: Markdown format with `## conversation title` sections containing structured data - **Storage**: `Recent Conversations.md` in the configured memory folder - **File handling**: Creates file if doesn't exist, parses existing conversations to maintain rolling buffer +### Saved Memories: + +- **When**: User explicitly asks to remember something via `memoryTool` +- **Retention policy**: No limit - memories persist until manually deleted +- **Content**: + - Raw user-specified information to remember + - Personal facts, preferences, important decisions, or context +- **Format**: Simple bullet-point list in markdown +- **Storage**: `Saved Memories.md` in the configured memory folder +- **File handling**: Appends new memories to existing file, creates if doesn't exist + ### Message Processing Features: -- **Condensed Messages**: AI-generated one-line summaries of user messages created during conversation (not during memory update) that preserve intent and important details -- **Conversation Titles**: LLM-extracted titles that capture main user intent -- **Key Conclusions**: Only generated for conversations with substantial content (>300 chars) containing insights, decisions, or learnings -- **Obsidian-optimized**: Special handling for note names, tags, links, and Obsidian-specific features +- **Conversation Titles**: LLM-extracted titles that capture main user intent (2-8 words) +- **Conversation Summaries**: AI-generated 2-3 sentence summaries with key details and conclusions +- **Memory Tool Integration**: Explicit memory saving via natural language commands - **Robust JSON Parsing**: Handles JSON responses wrapped in code blocks (common with Gemini and other LLMs) with fallback to plain JSON extraction +- **Language-aware**: Uses the same language as the conversation for titles and summaries -### Configuration (Current): +### Configuration: -- **`enableRecentConversations`**: Master switch for all recent history referencing functionality +- **`enableRecentConversations`**: Master switch for recent conversation history functionality +- **`enableSavedMemory`**: Master switch for saved memory functionality - **`memoryFolderName`**: Folder where memory files are stored (creates recursively if needed) -- **`maxRecentConversations`**: Number of conversations to keep (10-50 range, default: 30) +- **`maxRecentConversations`**: Number of recent conversations to keep (10-50 range, default: 30) ### Memory Retrieval: -- **`getUserMemoryPrompt()`**: Loads and returns Recent Conversations for LLM context -- **`loadMemory()`**: Loads memory data from files into class fields +- **`getUserMemoryPrompt()`**: Loads and returns both Recent Conversations and Saved Memories for LLM context +- **`loadMemory()`**: Loads memory data from both files into class fields +- **System prompt integration**: Memory context automatically included via `getSystemPromptWithMemory()` +- **Conditional loading**: Only includes enabled memory types based on settings - **Automatic folder creation**: Ensures memory folder exists before operations ### Error Handling: @@ -75,7 +113,15 @@ graph TD - Comprehensive error logging for all operations - Fallback mechanisms for AI processing failures - Graceful handling of missing files and folders -- Validation of AI-generated content (e.g., ensures condensed messages are actually shorter) +- User notifications for saved memory operations (success/failure) - Robust JSON extraction from LLM responses with multiple parsing strategies (code blocks, inline JSON, fallback to raw content) +- Race condition protection for concurrent memory updates + +### Tool Integration: + +- **Memory Tool**: Integrated into the tool registry when `enableSavedMemory` is enabled +- **Automatic registration**: Tool is conditionally registered based on settings +- **Natural language triggers**: Responds to phrases like "remember that", "don't forget", etc. +- **Context-aware**: Only saves information when user explicitly requests memory storage -This simplified design focuses on providing recent conversation context without the complexity of long-term memory management, while maintaining robust AI-powered content processing and configurable retention policies. +This dual memory design provides both automatic conversation context (recent conversations) and explicit user-controlled memory storage (saved memories), offering flexible memory management while maintaining robust AI-powered content processing and configurable retention policies. From deac33bba3fb194d2bbca20317aa1dc161ae36d9 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Mon, 15 Sep 2025 23:31:09 +0900 Subject: [PATCH 26/32] Fix test --- src/memory/UserMemoryManager.test.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index e78a60d52..487f6e537 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -431,7 +431,6 @@ The conversation covered advanced features and included code examples.`, expect(result).toContain(mockContent); expect(result).toContain(""); expect(result).toContain(""); - expect(result).toContain("Above is the recent conversations between you and the user"); }); it("should return null when no memory content exists", async () => { From 9e3f501ba3fc3186bbd6c5ee03c79b0ec72b5b57 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Tue, 16 Sep 2025 09:53:06 +0900 Subject: [PATCH 27/32] Update savedMemory setting --- .../v2/components/CopilotPlusSettings.tsx | 20 +++++++++---------- src/tools/builtinTools.ts | 1 + 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/settings/v2/components/CopilotPlusSettings.tsx b/src/settings/v2/components/CopilotPlusSettings.tsx index 848bc56b8..9e87ce889 100644 --- a/src/settings/v2/components/CopilotPlusSettings.tsx +++ b/src/settings/v2/components/CopilotPlusSettings.tsx @@ -120,16 +120,6 @@ export const CopilotPlusSettings: React.FC = () => { }} /> - { - updateSetting("enableSavedMemory", checked); - }} - /> - {settings.enableRecentConversations && ( { /> )} + { + updateSetting("enableSavedMemory", checked); + }} + /> +
Autocomplete
Date: Tue, 16 Sep 2025 09:58:23 +0900 Subject: [PATCH 28/32] Fix memory setting check --- src/settings/model.ts | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/settings/model.ts b/src/settings/model.ts index b33dc55a4..72e6a212f 100644 --- a/src/settings/model.ts +++ b/src/settings/model.ts @@ -374,12 +374,6 @@ export async function getSystemPromptWithMemory( ): Promise { const systemPrompt = getSystemPrompt(); - // Check if memory is enabled in settings - const settings = getSettings(); - if (!settings.enableRecentConversations) { - return systemPrompt; - } - if (!userMemoryManager) { logInfo("No UserMemoryManager provided to getSystemPromptWithMemory"); return systemPrompt; From e3af03c8dea900a5031614fd2dccb2bc44fd9a33 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sat, 20 Sep 2025 21:20:20 +0900 Subject: [PATCH 29/32] Support "@memory" in non-agent mode --- src/LLMProviders/intentAnalyzer.ts | 22 +++++++++++++++++++++- src/tools/memoryTools.ts | 3 ++- src/tools/toolManager.ts | 2 ++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/src/LLMProviders/intentAnalyzer.ts b/src/LLMProviders/intentAnalyzer.ts index 01204489f..6a25690e0 100644 --- a/src/LLMProviders/intentAnalyzer.ts +++ b/src/LLMProviders/intentAnalyzer.ts @@ -15,9 +15,17 @@ import { ToolManager } from "@/tools/toolManager"; import { extractAllYoutubeUrls, extractChatHistory } from "@/utils"; import { Vault } from "obsidian"; import { BrevilabsClient } from "./brevilabsClient"; +import { memoryTool } from "@/tools/memoryTools"; // TODO: Add @index with explicit pdf files in chat context menu -export const COPILOT_TOOL_NAMES = ["@vault", "@composer", "@websearch", "@youtube", "@pomodoro"]; +export const COPILOT_TOOL_NAMES = [ + "@vault", + "@composer", + "@websearch", + "@youtube", + "@memory", + "@pomodoro", +]; type ToolCall = { tool: any; @@ -136,6 +144,18 @@ export class IntentAnalyzer { }); } + // Handle @websearch command and also support @web for backward compatibility + if (message.includes("@memory")) { + const cleanQuery = this.removeAtCommands(originalMessage); + + processedToolCalls.push({ + tool: memoryTool, + args: { + memoryContent: cleanQuery, + }, + }); + } + // Handle @pomodoro command if (message.includes("@pomodoro")) { const pomodoroMatch = originalMessage.match(/@pomodoro\s+(\S+)/i); diff --git a/src/tools/memoryTools.ts b/src/tools/memoryTools.ts index 6340e7168..252fa7380 100644 --- a/src/tools/memoryTools.ts +++ b/src/tools/memoryTools.ts @@ -27,6 +27,7 @@ export const memoryTool: SimpleTool { return "Start a pomodoro timer. Example: @pomodoro 25m"; case "@composer": return "Edit existing notes or create new notes."; + case "@memory": + return "Save information to user memory. Example: @memory "; default: return ""; } From ea720575441a05933ac4d55d73b3d0b3114551ee Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sat, 20 Sep 2025 21:37:40 +0900 Subject: [PATCH 30/32] Improve timestamp format --- src/memory/UserMemoryManager.test.ts | 52 ++++++++++++++-------------- src/memory/UserMemoryManager.ts | 13 +++++-- 2 files changed, 36 insertions(+), 29 deletions(-) diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index 487f6e537..92c6649b1 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -128,11 +128,11 @@ describe("UserMemoryManager", () => { // Mock existing memory file with previous conversations const existingMemoryContent = `## Previous Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** User asked about plugin installation and learned that plugins enhance Obsidian functionality. ## Another Conversation -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** User inquired about linking notes and discovered that backlinks create knowledge connections. `; @@ -166,7 +166,7 @@ describe("UserMemoryManager", () => { // Check that the new format is used expect(actualContent).toContain("## Daily Note Template Setup"); - expect(actualContent).toMatch(/\*\*Time:\*\* \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/); + expect(actualContent).toMatch(/\*\*Time:\*\* \d{4}-\d{2}-\d{2} \d{2}:\d{2}/); expect(actualContent).toContain( "**Summary:** User asked about creating daily note templates" ); @@ -292,40 +292,40 @@ It has multiple lines but no conversations. it("should extract single conversation section", () => { const content = `## Daily Note Template Setup -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** User asked about creating daily note templates with automatic date formatting.`; const result = (userMemoryManager as any).parseExistingConversations(content); expect(result).toEqual([ `## Daily Note Template Setup -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** User asked about creating daily note templates with automatic date formatting.`, ]); }); it("should extract multiple conversation sections", () => { const content = `## First Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** User asked about plugin installation. ## Second Conversation -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** User inquired about linking notes. ## Third Conversation -**Time:** 2024-01-01T11:00:00Z +**Time:** 2024-01-01 11:00 **Summary:** User learned about backlinks.`; const result = (userMemoryManager as any).parseExistingConversations(content); expect(result).toEqual([ `## First Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** User asked about plugin installation.`, `## Second Conversation -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** User inquired about linking notes.`, `## Third Conversation -**Time:** 2024-01-01T11:00:00Z +**Time:** 2024-01-01 11:00 **Summary:** User learned about backlinks.`, ]); }); @@ -335,47 +335,47 @@ It has multiple lines but no conversations. It might contain important information, but it's before the first conversation. ## First Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** This conversation should be included. ## Second Conversation -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** This conversation should also be included.`; const result = (userMemoryManager as any).parseExistingConversations(content); expect(result).toEqual([ `## First Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** This conversation should be included.`, `## Second Conversation -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** This conversation should also be included.`, ]); }); it("should handle conversations with extra whitespace and trim them", () => { const content = ` ## First Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** User asked about plugin installation. ## Second Conversation -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** User inquired about linking notes. `; const result = (userMemoryManager as any).parseExistingConversations(content); expect(result).toEqual([ `## First Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** User asked about plugin installation.`, `## Second Conversation -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** User inquired about linking notes.`, ]); }); it("should handle conversation sections with complex multi-line content", () => { const content = `## Complex Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** User asked about multiple topics including: - How to create templates - How to use variables @@ -384,13 +384,13 @@ It might contain important information, but it's before the first conversation. The conversation covered advanced features and included code examples. ## Another Conversation -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** Short summary.`; const result = (userMemoryManager as any).parseExistingConversations(content); expect(result).toEqual([ `## Complex Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** User asked about multiple topics including: - How to create templates - How to use variables @@ -398,20 +398,20 @@ The conversation covered advanced features and included code examples. The conversation covered advanced features and included code examples.`, `## Another Conversation -**Time:** 2024-01-01T10:00:00Z +**Time:** 2024-01-01 10:00 **Summary:** Short summary.`, ]); }); it("should handle conversation at end of file without trailing newlines", () => { const content = `## Only Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** This is the only conversation and it's at the end.`; const result = (userMemoryManager as any).parseExistingConversations(content); expect(result).toEqual([ `## Only Conversation -**Time:** 2024-01-01T09:00:00Z +**Time:** 2024-01-01 09:00 **Summary:** This is the only conversation and it's at the end.`, ]); }); @@ -421,7 +421,7 @@ The conversation covered advanced features and included code examples.`, it("should return memory prompt when recent conversations exist", async () => { const mockFile = createMockTFile("copilot/memory/Recent Conversations.md"); const mockContent = - "## Test Conversation\n**Time:** 2024-01-01T10:00:00Z\n**Summary:** Test summary"; + "## Test Conversation\n**Time:** 2024-01-01 10:00\n**Summary:** Test summary"; mockVault.getAbstractFileByPath.mockReturnValue(mockFile); mockVault.read.mockResolvedValue(mockContent); diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 5b75d0677..9d331d081 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -93,9 +93,9 @@ export class UserMemoryManager { try { // Ensure user memory folder exists await this.ensureMemoryFolderExists(); - + const timestamp = this.getTimestamp(); // Create memory entry as a bullet point - const memoryEntry = `- ${memoryContent.trim()}`; + const memoryEntry = `- **${timestamp}** - ${memoryContent.trim()}`; // Add to saved memories file await this.addToSavedMemoryFile(this.getSavedMemoriesFilePath(), memoryEntry); @@ -151,7 +151,14 @@ export class UserMemoryManager { } private getTimestamp(): string { - return new Date().toISOString().split(".")[0] + "Z"; // Remove milliseconds but keep Z for UTC + const now = new Date(); + const year = now.getFullYear(); + const month = String(now.getMonth() + 1).padStart(2, "0"); + const day = String(now.getDate()).padStart(2, "0"); + const hours = String(now.getHours()).padStart(2, "0"); + const minutes = String(now.getMinutes()).padStart(2, "0"); + + return `${year}-${month}-${day} ${hours}:${minutes}`; } /** From dd47de2eab47aa93fab0b260ba72d78f44a84d1e Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sat, 20 Sep 2025 21:44:24 +0900 Subject: [PATCH 31/32] Update design doc --- src/memory/memory-design.md | 58 ++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/src/memory/memory-design.md b/src/memory/memory-design.md index 58e0b8442..151d7d603 100644 --- a/src/memory/memory-design.md +++ b/src/memory/memory-design.md @@ -67,7 +67,7 @@ graph TD - **When**: Updated after every conversation - **Retention policy**: Configurable rolling buffer - keeps last `maxRecentConversations` (default: 30, range: 10-50) - **Content**: - - Timestamp (ISO format with UTC) + - Timestamp (human-readable format: YYYY-MM-DD HH:MM in local time) - LLM-generated conversation title (2-8 words) - LLM-generated summary (2-3 sentences with key details and conclusions) - **Format**: Markdown format with `## conversation title` sections containing structured data @@ -125,3 +125,59 @@ graph TD - **Context-aware**: Only saves information when user explicitly requests memory storage This dual memory design provides both automatic conversation context (recent conversations) and explicit user-controlled memory storage (saved memories), offering flexible memory management while maintaining robust AI-powered content processing and configurable retention policies. + +## Memory System Behavior by Mode + +The memory system behaves differently depending on which chat mode is active: + +### Agent Mode (Autonomous Agent) + +- **Memory Retrieval**: ✅ Full access to both Recent Conversations and Saved Memories via system prompt +- **Memory Saving**: ✅ Direct access to `memoryTool` through XML-based tool calling +- **Behavior**: + - AI autonomously decides when to save memories based on user requests + - Uses XML format: `memoryTool...` + - Can reason step-by-step about whether something should be remembered + - Shows user notifications when memories are saved + - Access controlled by tool enablement settings (`autonomousAgentEnabledToolIds`) + +### Plus Mode (Legacy Tool Calling) + +- **Memory Retrieval**: ✅ Full access to both Recent Conversations and Saved Memories via system prompt +- **Memory Saving**: ✅ Access to `@memory` +- **Behavior**: + - Uses Brevilabs API intent analysis to determine when to call memory tools + - Intent analyzer processes user message and decides which tools to execute + - Tools are executed before LLM response generation + - Memory tool calls are pre-determined rather than AI-reasoned + - Same notification system as Agent mode + +### Basic Chat Mode (LLM Only) + +- **Memory Retrieval**: ✅ Full access to both Recent Conversations and Saved Memories via system prompt +- **Memory Saving**: ❌ No access to memory tools +- **Behavior**: + - Can reference existing memories for context + - Cannot save new memories during conversations + - Users must manually add memories outside of chat + +### Project Mode + +- **Memory Retrieval**: ❌ Currently disabled (memory not enabled for project mode) +- **Memory Saving**: ❌ Not available +- **Behavior**: + - Project-specific context only + - No cross-conversation memory persistence + - Note: Future enhancement could add project-scoped memory + +### Key Differences Summary + +| Feature | Agent Mode | Plus Mode | Basic Chat | Project Mode | +| ---------------- | ------------- | ------------------ | ------------- | ------------ | +| Memory Retrieval | ✅ Dynamic | ✅ Dynamic | ✅ Dynamic | ❌ Disabled | +| Memory Saving | ✅ XML Tools | ✅ Intent Analysis | ❌ None | ❌ None | +| Tool Decision | AI Reasoning | Pre-analysis | N/A | N/A | +| Memory Context | System Prompt | System Prompt | System Prompt | None | +| User Control | AI-driven | Intent-driven | Manual only | None | + +This design ensures that memory capabilities scale appropriately with the sophistication of each chat mode, while maintaining consistent memory retrieval across modes that support it. From 094e3f8af01a2d217fe03f959d09a9916f8b7008 Mon Sep 17 00:00:00 2001 From: wenzhengjiang Date: Sun, 21 Sep 2025 18:35:47 +0900 Subject: [PATCH 32/32] Add support for "@memory" command in builtin tools. --- src/memory/UserMemoryManager.test.ts | 6 ++++++ src/memory/UserMemoryManager.ts | 5 ++--- src/tools/builtinTools.ts | 1 + 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/memory/UserMemoryManager.test.ts b/src/memory/UserMemoryManager.test.ts index 92c6649b1..6e0e0de05 100644 --- a/src/memory/UserMemoryManager.test.ts +++ b/src/memory/UserMemoryManager.test.ts @@ -501,6 +501,9 @@ The conversation covered advanced features and included code examples.`, expect.stringContaining("- Important user preference: I prefer dark mode") ); + const createdContent = mockVault.create.mock.calls[0][1]; + expect(createdContent).not.toContain("**"); + expect(logInfo).toHaveBeenCalledWith("[UserMemoryManager] Saved memory added successfully"); }); @@ -532,6 +535,9 @@ The conversation covered advanced features and included code examples.`, expect.stringContaining("- New important information") ); + const modifiedContent = mockVault.modify.mock.calls[0][1]; + expect(modifiedContent).not.toContain("**"); + expect(logInfo).toHaveBeenCalledWith("[UserMemoryManager] Saved memory added successfully"); }); diff --git a/src/memory/UserMemoryManager.ts b/src/memory/UserMemoryManager.ts index 9d331d081..a825b339a 100644 --- a/src/memory/UserMemoryManager.ts +++ b/src/memory/UserMemoryManager.ts @@ -93,9 +93,8 @@ export class UserMemoryManager { try { // Ensure user memory folder exists await this.ensureMemoryFolderExists(); - const timestamp = this.getTimestamp(); - // Create memory entry as a bullet point - const memoryEntry = `- **${timestamp}** - ${memoryContent.trim()}`; + // Create memory entry as a bullet point without timestamp metadata + const memoryEntry = `- ${memoryContent.trim()}`; // Add to saved memories file await this.addToSavedMemoryFile(this.getSavedMemoriesFilePath(), memoryEntry); diff --git a/src/tools/builtinTools.ts b/src/tools/builtinTools.ts index 6f8066698..3a7ed2a56 100644 --- a/src/tools/builtinTools.ts +++ b/src/tools/builtinTools.ts @@ -309,6 +309,7 @@ export function registerMemoryTool(): void { displayName: "Save Memory", description: "Save information to user memory when explicitly asked to remember something", category: "memory", + copilotCommands: ["@memory"], isAlwaysEnabled: true, customPromptInstructions: `For memoryTool: - Use ONLY when the user explicitly asks you to remember something (phrases like "remember that", "don't forget", etc.)