diff --git a/docs/content/docs/ai/reference.mdx b/docs/content/docs/ai/reference.mdx
index 7be4973f96..33eae9ed98 100644
--- a/docs/content/docs/ai/reference.mdx
+++ b/docs/content/docs/ai/reference.mdx
@@ -206,11 +206,11 @@ type LLMRequestOptions = {
* @default { add: true, update: true, delete: true }
*/
defaultStreamTools?: {
- /** Enable the add tool (default: true) */
+ /** Enable the add tool (default: false) */
add?: boolean;
- /** Enable the update tool (default: true) */
+ /** Enable the update tool (default: false) */
update?: boolean;
- /** Enable the delete tool (default: true) */
+ /** Enable the delete tool (default: false) */
delete?: boolean;
};
/**
diff --git a/docs/content/docs/features/ai/reference.mdx b/docs/content/docs/features/ai/reference.mdx
index 7be4973f96..33eae9ed98 100644
--- a/docs/content/docs/features/ai/reference.mdx
+++ b/docs/content/docs/features/ai/reference.mdx
@@ -206,11 +206,11 @@ type LLMRequestOptions = {
* @default { add: true, update: true, delete: true }
*/
defaultStreamTools?: {
- /** Enable the add tool (default: true) */
+ /** Enable the add tool (default: false) */
add?: boolean;
- /** Enable the update tool (default: true) */
+ /** Enable the update tool (default: false) */
update?: boolean;
- /** Enable the delete tool (default: true) */
+ /** Enable the delete tool (default: false) */
delete?: boolean;
};
/**
diff --git a/docs/package.json b/docs/package.json
index 358bd32d45..d61f44e1aa 100644
--- a/docs/package.json
+++ b/docs/package.json
@@ -68,7 +68,7 @@
"@vercel/analytics": "^1.5.0",
"@vercel/og": "^0.6.8",
"@y-sweet/react": "^0.6.3",
- "ai": "^4.1.0",
+ "ai": "^4.3.15",
"babel-plugin-react-compiler": "19.1.0-rc.2",
"better-auth": "^1.2.10",
"better-sqlite3": "^11.10.0",
diff --git a/examples/09-ai/01-minimal/src/App.tsx b/examples/09-ai/01-minimal/src/App.tsx
index 9f56576e67..45b5412cc5 100644
--- a/examples/09-ai/01-minimal/src/App.tsx
+++ b/examples/09-ai/01-minimal/src/App.tsx
@@ -7,9 +7,9 @@ import "@blocknote/mantine/style.css";
import {
FormattingToolbar,
FormattingToolbarController,
- SuggestionMenuController,
getDefaultReactSlashMenuItems,
getFormattingToolbarItems,
+ SuggestionMenuController,
useCreateBlockNote,
} from "@blocknote/react";
import {
@@ -29,7 +29,7 @@ import { getEnv } from "./getEnv";
const client = createBlockNoteAIClient({
apiKey: getEnv("BLOCKNOTE_AI_SERVER_API_KEY") || "PLACEHOLDER",
baseURL:
- getEnv("BLOCKNOTE_AI_SERVER_BASE_URL") || "https://localhost:3000/ai",
+ getEnv("BLOCKNOTE_AI_SERVER_BASE_URL") || "https://localhost:3000/ai/proxy",
});
// Use an "open" model such as llama, in this case via groq.com
diff --git a/examples/09-ai/05-manual-execution/.bnexample.json b/examples/09-ai/05-manual-execution/.bnexample.json
new file mode 100644
index 0000000000..6f21dbcd55
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/.bnexample.json
@@ -0,0 +1,15 @@
+{
+ "playground": true,
+ "docs": false,
+ "author": "yousefed",
+ "tags": ["AI", "llm"],
+ "dependencies": {
+ "@blocknote/xl-ai": "latest",
+ "@mantine/core": "^7.17.3",
+ "ai": "^4.3.15",
+ "@ai-sdk/groq": "^1.2.9",
+ "y-partykit": "^0.0.25",
+ "yjs": "^13.6.27",
+ "zustand": "^5.0.3"
+ }
+}
diff --git a/examples/09-ai/05-manual-execution/README.md b/examples/09-ai/05-manual-execution/README.md
new file mode 100644
index 0000000000..003f16a00c
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/README.md
@@ -0,0 +1,3 @@
+# AI manual execution
+
+Instead of calling AI models directly, this example shows how you can use an existing stream of responses and apply them to the editor
diff --git a/examples/09-ai/05-manual-execution/index.html b/examples/09-ai/05-manual-execution/index.html
new file mode 100644
index 0000000000..c63d224da9
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/index.html
@@ -0,0 +1,14 @@
+
+
+
+
+ AI manual execution
+
+
+
+
+
+
+
diff --git a/examples/09-ai/05-manual-execution/main.tsx b/examples/09-ai/05-manual-execution/main.tsx
new file mode 100644
index 0000000000..677c7f7eed
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/main.tsx
@@ -0,0 +1,11 @@
+// AUTO-GENERATED FILE, DO NOT EDIT DIRECTLY
+import React from "react";
+import { createRoot } from "react-dom/client";
+import App from "./src/App.jsx";
+
+const root = createRoot(document.getElementById("root")!);
+root.render(
+
+
+
+);
diff --git a/examples/09-ai/05-manual-execution/package.json b/examples/09-ai/05-manual-execution/package.json
new file mode 100644
index 0000000000..f47382c37e
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/package.json
@@ -0,0 +1,34 @@
+{
+ "name": "@blocknote/example-ai-manual-execution",
+ "description": "AUTO-GENERATED FILE, DO NOT EDIT DIRECTLY",
+ "private": true,
+ "version": "0.12.4",
+ "scripts": {
+ "start": "vite",
+ "dev": "vite",
+ "build:prod": "tsc && vite build",
+ "preview": "vite preview"
+ },
+ "dependencies": {
+ "@blocknote/core": "latest",
+ "@blocknote/react": "latest",
+ "@blocknote/ariakit": "latest",
+ "@blocknote/mantine": "latest",
+ "@blocknote/shadcn": "latest",
+ "react": "^19.1.0",
+ "react-dom": "^19.1.0",
+ "@blocknote/xl-ai": "latest",
+ "@mantine/core": "^7.17.3",
+ "ai": "^4.3.15",
+ "@ai-sdk/groq": "^1.2.9",
+ "y-partykit": "^0.0.25",
+ "yjs": "^13.6.27",
+ "zustand": "^5.0.3"
+ },
+ "devDependencies": {
+ "@types/react": "^19.1.0",
+ "@types/react-dom": "^19.1.0",
+ "@vitejs/plugin-react": "^4.3.1",
+ "vite": "^5.3.4"
+ }
+}
\ No newline at end of file
diff --git a/examples/09-ai/05-manual-execution/src/App.tsx b/examples/09-ai/05-manual-execution/src/App.tsx
new file mode 100644
index 0000000000..fb72b587cf
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/src/App.tsx
@@ -0,0 +1,200 @@
+import "@blocknote/core/fonts/inter.css";
+import { en } from "@blocknote/core/locales";
+import { BlockNoteView } from "@blocknote/mantine";
+import "@blocknote/mantine/style.css";
+import { useCreateBlockNote } from "@blocknote/react";
+import {
+ StreamToolExecutor,
+ createAIExtension,
+ getAIExtension,
+ llmFormats,
+} from "@blocknote/xl-ai";
+import { en as aiEn } from "@blocknote/xl-ai/locales";
+import "@blocknote/xl-ai/style.css";
+
+export default function App() {
+ // Creates a new editor instance.
+ const editor = useCreateBlockNote({
+ dictionary: {
+ ...en,
+ ai: aiEn, // add default translations for the AI extension
+ },
+ // Register the AI extension
+ extensions: [
+ createAIExtension({
+ executor: undefined as any, // disable
+ }),
+ ],
+ // We set some initial content for demo purposes
+ initialContent: [
+ {
+ type: "heading",
+ props: {
+ level: 1,
+ },
+ content: "Open source software",
+ },
+ {
+ type: "paragraph",
+ content:
+ "Open source software refers to computer programs whose source code is made available to the public, allowing anyone to view, modify, and distribute the code. This model stands in contrast to proprietary software, where the source code is kept secret and only the original creators have the right to make changes. Open projects are developed collaboratively, often by communities of developers from around the world, and are typically distributed under licenses that promote sharing and openness.",
+ },
+ {
+ type: "paragraph",
+ content:
+ "One of the primary benefits of open source is the promotion of digital autonomy. By providing access to the source code, these programs empower users to control their own technology, customize software to fit their needs, and avoid vendor lock-in. This level of transparency also allows for greater security, as anyone can inspect the code for vulnerabilities or malicious elements. As a result, users are not solely dependent on a single company for updates, bug fixes, or continued support.",
+ },
+ {
+ type: "paragraph",
+ content:
+ "Additionally, open development fosters innovation and collaboration. Developers can build upon existing projects, share improvements, and learn from each other, accelerating the pace of technological advancement. The open nature of these projects often leads to higher quality software, as bugs are identified and fixed more quickly by a diverse group of contributors. Furthermore, using open source can reduce costs for individuals, businesses, and governments, as it is often available for free and can be tailored to specific requirements without expensive licensing fees.",
+ },
+ ],
+ });
+
+ // Renders the editor instance using a React component.
+ return (
+
+
+
+
+ {/*Inserts a new block at start of document.*/}
+
+
+
+
+
+ );
+}
diff --git a/examples/09-ai/05-manual-execution/src/getEnv.ts b/examples/09-ai/05-manual-execution/src/getEnv.ts
new file mode 100644
index 0000000000..b225fc462e
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/src/getEnv.ts
@@ -0,0 +1,20 @@
+// helper function to get env variables across next / vite
+// only needed so this example works in BlockNote demos and docs
+export function getEnv(key: string) {
+ const env = (import.meta as any).env
+ ? {
+ BLOCKNOTE_AI_SERVER_API_KEY: (import.meta as any).env
+ .VITE_BLOCKNOTE_AI_SERVER_API_KEY,
+ BLOCKNOTE_AI_SERVER_BASE_URL: (import.meta as any).env
+ .VITE_BLOCKNOTE_AI_SERVER_BASE_URL,
+ }
+ : {
+ BLOCKNOTE_AI_SERVER_API_KEY:
+ process.env.NEXT_PUBLIC_BLOCKNOTE_AI_SERVER_API_KEY,
+ BLOCKNOTE_AI_SERVER_BASE_URL:
+ process.env.NEXT_PUBLIC_BLOCKNOTE_AI_SERVER_BASE_URL,
+ };
+
+ const value = env[key as keyof typeof env];
+ return value;
+}
diff --git a/examples/09-ai/05-manual-execution/src/styles.css b/examples/09-ai/05-manual-execution/src/styles.css
new file mode 100644
index 0000000000..cc97b34a4f
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/src/styles.css
@@ -0,0 +1,15 @@
+.edit-buttons {
+ display: flex;
+ justify-content: space-between;
+ margin-top: 8px;
+}
+
+.edit-button {
+ border: 1px solid gray;
+ border-radius: 4px;
+ padding-inline: 4px;
+}
+
+.edit-button:hover {
+ border: 1px solid lightgrey;
+}
diff --git a/examples/09-ai/05-manual-execution/tsconfig.json b/examples/09-ai/05-manual-execution/tsconfig.json
new file mode 100644
index 0000000000..dbe3e6f62d
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/tsconfig.json
@@ -0,0 +1,36 @@
+{
+ "__comment": "AUTO-GENERATED FILE, DO NOT EDIT DIRECTLY",
+ "compilerOptions": {
+ "target": "ESNext",
+ "useDefineForClassFields": true,
+ "lib": [
+ "DOM",
+ "DOM.Iterable",
+ "ESNext"
+ ],
+ "allowJs": false,
+ "skipLibCheck": true,
+ "esModuleInterop": false,
+ "allowSyntheticDefaultImports": true,
+ "strict": true,
+ "forceConsistentCasingInFileNames": true,
+ "module": "ESNext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "noEmit": true,
+ "jsx": "react-jsx",
+ "composite": true
+ },
+ "include": [
+ "."
+ ],
+ "__ADD_FOR_LOCAL_DEV_references": [
+ {
+ "path": "../../../packages/core/"
+ },
+ {
+ "path": "../../../packages/react/"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/examples/09-ai/05-manual-execution/vite.config.ts b/examples/09-ai/05-manual-execution/vite.config.ts
new file mode 100644
index 0000000000..f62ab20bc2
--- /dev/null
+++ b/examples/09-ai/05-manual-execution/vite.config.ts
@@ -0,0 +1,32 @@
+// AUTO-GENERATED FILE, DO NOT EDIT DIRECTLY
+import react from "@vitejs/plugin-react";
+import * as fs from "fs";
+import * as path from "path";
+import { defineConfig } from "vite";
+// import eslintPlugin from "vite-plugin-eslint";
+// https://vitejs.dev/config/
+export default defineConfig((conf) => ({
+ plugins: [react()],
+ optimizeDeps: {},
+ build: {
+ sourcemap: true,
+ },
+ resolve: {
+ alias:
+ conf.command === "build" ||
+ !fs.existsSync(path.resolve(__dirname, "../../packages/core/src"))
+ ? {}
+ : ({
+ // Comment out the lines below to load a built version of blocknote
+ // or, keep as is to load live from sources with live reload working
+ "@blocknote/core": path.resolve(
+ __dirname,
+ "../../packages/core/src/"
+ ),
+ "@blocknote/react": path.resolve(
+ __dirname,
+ "../../packages/react/src/"
+ ),
+ } as any),
+ },
+}));
diff --git a/examples/09-ai/06-server-execution/.bnexample.json b/examples/09-ai/06-server-execution/.bnexample.json
new file mode 100644
index 0000000000..b89a734a39
--- /dev/null
+++ b/examples/09-ai/06-server-execution/.bnexample.json
@@ -0,0 +1,12 @@
+{
+ "playground": true,
+ "docs": true,
+ "author": "yousefed",
+ "tags": ["AI", "llm"],
+ "dependencies": {
+ "@blocknote/xl-ai": "latest",
+ "@mantine/core": "^7.17.3",
+ "ai": "^4.3.15",
+ "zustand": "^5.0.3"
+ }
+}
diff --git a/examples/09-ai/06-server-execution/README.md b/examples/09-ai/06-server-execution/README.md
new file mode 100644
index 0000000000..867a30affa
--- /dev/null
+++ b/examples/09-ai/06-server-execution/README.md
@@ -0,0 +1,3 @@
+# AI Integration with server LLM execution
+
+This example shows how to setup to add AI integration while handling the LLM calls (in this case, using the Vercel AI SDK) on your server, using a custom executor
diff --git a/examples/09-ai/06-server-execution/index.html b/examples/09-ai/06-server-execution/index.html
new file mode 100644
index 0000000000..c2a78b33de
--- /dev/null
+++ b/examples/09-ai/06-server-execution/index.html
@@ -0,0 +1,14 @@
+
+
+
+
+ AI Integration with server LLM execution
+
+
+
+
+
+
+
diff --git a/examples/09-ai/06-server-execution/main.tsx b/examples/09-ai/06-server-execution/main.tsx
new file mode 100644
index 0000000000..677c7f7eed
--- /dev/null
+++ b/examples/09-ai/06-server-execution/main.tsx
@@ -0,0 +1,11 @@
+// AUTO-GENERATED FILE, DO NOT EDIT DIRECTLY
+import React from "react";
+import { createRoot } from "react-dom/client";
+import App from "./src/App.jsx";
+
+const root = createRoot(document.getElementById("root")!);
+root.render(
+
+
+
+);
diff --git a/examples/09-ai/06-server-execution/package.json b/examples/09-ai/06-server-execution/package.json
new file mode 100644
index 0000000000..e88ec02090
--- /dev/null
+++ b/examples/09-ai/06-server-execution/package.json
@@ -0,0 +1,31 @@
+{
+ "name": "@blocknote/example-ai-server-execution",
+ "description": "AUTO-GENERATED FILE, DO NOT EDIT DIRECTLY",
+ "private": true,
+ "version": "0.12.4",
+ "scripts": {
+ "start": "vite",
+ "dev": "vite",
+ "build:prod": "tsc && vite build",
+ "preview": "vite preview"
+ },
+ "dependencies": {
+ "@blocknote/core": "latest",
+ "@blocknote/react": "latest",
+ "@blocknote/ariakit": "latest",
+ "@blocknote/mantine": "latest",
+ "@blocknote/shadcn": "latest",
+ "react": "^19.1.0",
+ "react-dom": "^19.1.0",
+ "@blocknote/xl-ai": "latest",
+ "@mantine/core": "^7.17.3",
+ "ai": "^4.3.15",
+ "zustand": "^5.0.3"
+ },
+ "devDependencies": {
+ "@types/react": "^19.1.0",
+ "@types/react-dom": "^19.1.0",
+ "@vitejs/plugin-react": "^4.3.1",
+ "vite": "^5.3.4"
+ }
+}
\ No newline at end of file
diff --git a/examples/09-ai/06-server-execution/src/App.tsx b/examples/09-ai/06-server-execution/src/App.tsx
new file mode 100644
index 0000000000..6b609aa554
--- /dev/null
+++ b/examples/09-ai/06-server-execution/src/App.tsx
@@ -0,0 +1,158 @@
+import { BlockNoteEditor, filterSuggestionItems } from "@blocknote/core";
+import "@blocknote/core/fonts/inter.css";
+import { en } from "@blocknote/core/locales";
+import { BlockNoteView } from "@blocknote/mantine";
+import "@blocknote/mantine/style.css";
+import {
+ FormattingToolbar,
+ FormattingToolbarController,
+ getDefaultReactSlashMenuItems,
+ getFormattingToolbarItems,
+ SuggestionMenuController,
+ useCreateBlockNote,
+} from "@blocknote/react";
+import {
+ AIMenuController,
+ AIToolbarButton,
+ createAIExtension,
+ createStreamToolsArraySchema,
+ dataStreamResponseToOperationsResult,
+ getAISlashMenuItems,
+ LLMResponse,
+} from "@blocknote/xl-ai";
+import { en as aiEn } from "@blocknote/xl-ai/locales";
+import "@blocknote/xl-ai/style.css";
+
+import { getEnv } from "./getEnv";
+
+const BASE_URL =
+ getEnv("BLOCKNOTE_AI_SERVER_BASE_URL") ||
+ "https://localhost:3000/ai/vercel-ai-sdk";
+
+export default function App() {
+ // Creates a new editor instance.
+ const editor = useCreateBlockNote({
+ dictionary: {
+ ...en,
+ ai: aiEn, // add default translations for the AI extension
+ },
+ // Register the AI extension
+ extensions: [
+ createAIExtension({
+ // We define a custom executor that calls our backend server to execute LLM calls
+ // On the backend, we use the Vercel AI SDK to execute LLM calls
+ // (see packages/xl-ai-server/src/routes/vercelAiSdk.ts)
+ executor: async (opts) => {
+ const schema = createStreamToolsArraySchema(opts.streamTools);
+
+ // Can also use /generateObject for non-streaming mode
+ const response = await fetch(`${BASE_URL}/streamObject`, {
+ method: "POST",
+ body: JSON.stringify({
+ messages: opts.messages,
+ schema,
+ }),
+ });
+ const parsedResponse = await dataStreamResponseToOperationsResult(
+ response,
+ opts.streamTools,
+ opts.onStart,
+ );
+ return new LLMResponse(
+ opts.messages,
+ parsedResponse,
+ opts.streamTools,
+ );
+ },
+ }),
+ ],
+ // We set some initial content for demo purposes
+ initialContent: [
+ {
+ type: "heading",
+ props: {
+ level: 1,
+ },
+ content: "Open source software",
+ },
+ {
+ type: "paragraph",
+ content:
+ "Open source software refers to computer programs whose source code is made available to the public, allowing anyone to view, modify, and distribute the code. This model stands in contrast to proprietary software, where the source code is kept secret and only the original creators have the right to make changes. Open projects are developed collaboratively, often by communities of developers from around the world, and are typically distributed under licenses that promote sharing and openness.",
+ },
+ {
+ type: "paragraph",
+ content:
+ "One of the primary benefits of open source is the promotion of digital autonomy. By providing access to the source code, these programs empower users to control their own technology, customize software to fit their needs, and avoid vendor lock-in. This level of transparency also allows for greater security, as anyone can inspect the code for vulnerabilities or malicious elements. As a result, users are not solely dependent on a single company for updates, bug fixes, or continued support.",
+ },
+ {
+ type: "paragraph",
+ content:
+ "Additionally, open development fosters innovation and collaboration. Developers can build upon existing projects, share improvements, and learn from each other, accelerating the pace of technological advancement. The open nature of these projects often leads to higher quality software, as bugs are identified and fixed more quickly by a diverse group of contributors. Furthermore, using open source can reduce costs for individuals, businesses, and governments, as it is often available for free and can be tailored to specific requirements without expensive licensing fees.",
+ },
+ ],
+ });
+
+ // Renders the editor instance using a React component.
+ return (
+
+
+ {/* Add the AI Command menu to the editor */}
+
+
+ {/* We disabled the default formatting toolbar with `formattingToolbar=false`
+ and replace it for one with an "AI button" (defined below).
+ (See "Formatting Toolbar" in docs)
+ */}
+
+
+ {/* We disabled the default SlashMenu with `slashMenu=false`
+ and replace it for one with an AI option (defined below).
+ (See "Suggestion Menus" in docs)
+ */}
+
+
+
+ );
+}
+
+// Formatting toolbar with the `AIToolbarButton` added
+function FormattingToolbarWithAI() {
+ return (
+ (
+
+ {...getFormattingToolbarItems()}
+ {/* Add the AI button */}
+
+
+ )}
+ />
+ );
+}
+
+// Slash menu with the AI option added
+function SuggestionMenuWithAI(props: {
+ editor: BlockNoteEditor;
+}) {
+ return (
+
+ filterSuggestionItems(
+ [
+ ...getDefaultReactSlashMenuItems(props.editor),
+ // add the default AI slash menu items, or define your own
+ ...getAISlashMenuItems(props.editor),
+ ],
+ query,
+ )
+ }
+ />
+ );
+}
diff --git a/examples/09-ai/06-server-execution/src/getEnv.ts b/examples/09-ai/06-server-execution/src/getEnv.ts
new file mode 100644
index 0000000000..b225fc462e
--- /dev/null
+++ b/examples/09-ai/06-server-execution/src/getEnv.ts
@@ -0,0 +1,20 @@
+// helper function to get env variables across next / vite
+// only needed so this example works in BlockNote demos and docs
+export function getEnv(key: string) {
+ const env = (import.meta as any).env
+ ? {
+ BLOCKNOTE_AI_SERVER_API_KEY: (import.meta as any).env
+ .VITE_BLOCKNOTE_AI_SERVER_API_KEY,
+ BLOCKNOTE_AI_SERVER_BASE_URL: (import.meta as any).env
+ .VITE_BLOCKNOTE_AI_SERVER_BASE_URL,
+ }
+ : {
+ BLOCKNOTE_AI_SERVER_API_KEY:
+ process.env.NEXT_PUBLIC_BLOCKNOTE_AI_SERVER_API_KEY,
+ BLOCKNOTE_AI_SERVER_BASE_URL:
+ process.env.NEXT_PUBLIC_BLOCKNOTE_AI_SERVER_BASE_URL,
+ };
+
+ const value = env[key as keyof typeof env];
+ return value;
+}
diff --git a/examples/09-ai/06-server-execution/tsconfig.json b/examples/09-ai/06-server-execution/tsconfig.json
new file mode 100644
index 0000000000..3b74ef215c
--- /dev/null
+++ b/examples/09-ai/06-server-execution/tsconfig.json
@@ -0,0 +1,33 @@
+{
+ "__comment": "AUTO-GENERATED FILE, DO NOT EDIT DIRECTLY",
+ "compilerOptions": {
+ "target": "ESNext",
+ "useDefineForClassFields": true,
+ "lib": ["DOM", "DOM.Iterable", "ESNext"],
+ "allowJs": false,
+ "skipLibCheck": true,
+ "esModuleInterop": false,
+ "allowSyntheticDefaultImports": true,
+ "strict": true,
+ "forceConsistentCasingInFileNames": true,
+ "module": "ESNext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "noEmit": true,
+ "jsx": "react-jsx",
+ "composite": true
+ },
+ "include": ["."],
+ "references": [
+ {
+ "path": "../../../packages/core/"
+ },
+ {
+ "path": "../../../packages/react/"
+ },
+ {
+ "path": "../../../packages/xl-ai/"
+ }
+ ]
+}
diff --git a/examples/09-ai/06-server-execution/vite.config.ts b/examples/09-ai/06-server-execution/vite.config.ts
new file mode 100644
index 0000000000..f62ab20bc2
--- /dev/null
+++ b/examples/09-ai/06-server-execution/vite.config.ts
@@ -0,0 +1,32 @@
+// AUTO-GENERATED FILE, DO NOT EDIT DIRECTLY
+import react from "@vitejs/plugin-react";
+import * as fs from "fs";
+import * as path from "path";
+import { defineConfig } from "vite";
+// import eslintPlugin from "vite-plugin-eslint";
+// https://vitejs.dev/config/
+export default defineConfig((conf) => ({
+ plugins: [react()],
+ optimizeDeps: {},
+ build: {
+ sourcemap: true,
+ },
+ resolve: {
+ alias:
+ conf.command === "build" ||
+ !fs.existsSync(path.resolve(__dirname, "../../packages/core/src"))
+ ? {}
+ : ({
+ // Comment out the lines below to load a built version of blocknote
+ // or, keep as is to load live from sources with live reload working
+ "@blocknote/core": path.resolve(
+ __dirname,
+ "../../packages/core/src/"
+ ),
+ "@blocknote/react": path.resolve(
+ __dirname,
+ "../../packages/react/src/"
+ ),
+ } as any),
+ },
+}));
diff --git a/packages/xl-ai-server/package.json b/packages/xl-ai-server/package.json
index 8e4133385e..4ca8540906 100644
--- a/packages/xl-ai-server/package.json
+++ b/packages/xl-ai-server/package.json
@@ -44,7 +44,10 @@
},
"dependencies": {
"@hono/node-server": "^1.13.7",
- "hono": "^4.6.12"
+ "hono": "^4.6.12",
+ "ai": "^4",
+ "@blocknote/xl-ai": "workspace:*",
+ "@ai-sdk/openai": "^1.3.22"
},
"devDependencies": {
"eslint": "^8.10.0",
diff --git a/packages/xl-ai-server/src/index.ts b/packages/xl-ai-server/src/index.ts
index f5ce73b3ae..2387be4c96 100644
--- a/packages/xl-ai-server/src/index.ts
+++ b/packages/xl-ai-server/src/index.ts
@@ -1,10 +1,11 @@
import { serve } from "@hono/node-server";
import { Hono } from "hono";
import { bearerAuth } from "hono/bearer-auth";
-import { cors } from "hono/cors";
import { existsSync, readFileSync } from "node:fs";
import { createSecureServer } from "node:http2";
import { Agent, setGlobalDispatcher } from "undici";
+import { proxyRoute } from "./routes/proxy.js";
+import { vercelAiSdkRoute } from "./routes/vercelAiSdk.js";
// make sure our fetch request uses HTTP/2
setGlobalDispatcher(
@@ -13,73 +14,6 @@ setGlobalDispatcher(
}),
);
-const ignoreHeadersRe = /^content-(?:encoding|length|range)$/i;
-
-// REC: we might be able to replace this by https://github.com/honojs/hono/pull/3589
-export const proxyFetch: typeof fetch = async (request, options) => {
- const req = new Request(request, options);
- req.headers.delete("accept-encoding"); // TBD: there may be cases where you want to explicitly specify
- req.headers.delete("Origin");
- const res = await fetch(req);
-
- const headers: HeadersInit = [...res.headers.entries()].filter(
- ([k]) => !ignoreHeadersRe.test(k) && k !== "strict-transport-security",
- );
-
- const readable = res.body;
-
- // For debugging purposes, we can log the chunks as they stream:
-
- // const { readable, writable } = new TransformStream({
- // async transform(chunk, controller) {
- // // Log each chunk as it passes through
-
- // // optional, wait to test streaming mode
- // // await new Promise((resolve) => setTimeout(resolve, 3000));
-
- // console.log("Streaming chunk:", new TextDecoder().decode(chunk));
- // controller.enqueue(chunk);
- // },
- // });
-
- // // Pipe the response body through our transform stream
- // res.body?.pipeTo(writable).catch((err) => {
- // console.error("Error in stream:", err);
- // });
-
- return new Response(readable, {
- ...res,
- status: res.status,
- statusText: res.statusText,
- headers,
- });
-};
-
-function getProviderInfo(provider: string) {
- const envKey = `${provider.toUpperCase().replace(/-/g, "_")}_API_KEY`;
- const key = process.env[envKey];
- if (!key || !key.length) {
- return "not-found";
- }
- if (provider === "google") {
- return {
- key,
- header: "x-goog-api-key",
- };
- }
- if (provider === "anthropic") {
- return {
- key,
- header: "x-api-key",
- };
- }
-
- return {
- key,
- header: "Authorization",
- };
-}
-
const app = new Hono();
app.use("/health", async (c) => {
@@ -93,39 +27,8 @@ if (process.env.TOKEN?.length) {
console.warn("no token set, ai requests will not be secured");
}
-app.use("/ai", cors(), async (c) => {
- const url = c.req.query("url");
- if (!url) {
- return c.json({ error: "url parameter is required" }, 400);
- }
-
- const provider = c.req.query("provider");
- if (!provider) {
- return c.json({ error: "provider parameter is required" }, 400);
- }
-
- const providerInfo = getProviderInfo(provider);
-
- if (providerInfo === "not-found") {
- return c.json(
- {
- error: `provider / key not found for provider ${provider}. Make sure to load correct env variables.`,
- },
- 404,
- );
- }
-
- // eslint-disable-next-line no-console
- console.log("Proxying request to", url);
- const request = new Request(url, c.req.raw);
- if (providerInfo.header === "Authorization") {
- request.headers.set("Authorization", `Bearer ${providerInfo.key}`);
- } else {
- request.headers.set(providerInfo.header, `${providerInfo.key}`);
- }
-
- return proxyFetch(request);
-});
+app.route("/ai/proxy", proxyRoute);
+app.route("/ai/vercel-ai-sdk", vercelAiSdkRoute);
const http2 = existsSync("localhost.pem");
serve(
diff --git a/packages/xl-ai-server/src/routes/proxy.ts b/packages/xl-ai-server/src/routes/proxy.ts
new file mode 100644
index 0000000000..59e478b0fc
--- /dev/null
+++ b/packages/xl-ai-server/src/routes/proxy.ts
@@ -0,0 +1,105 @@
+import { Hono } from "hono";
+import { cors } from "hono/cors";
+
+const ignoreHeadersRe = /^content-(?:encoding|length|range)$/i;
+
+// REC: we might be able to replace this by https://github.com/honojs/hono/pull/3589
+export const proxyFetch: typeof fetch = async (request, options) => {
+ const req = new Request(request, options);
+ req.headers.delete("accept-encoding"); // TBD: there may be cases where you want to explicitly specify
+ req.headers.delete("Origin");
+ const res = await fetch(req);
+
+ const headers: HeadersInit = [...res.headers.entries()].filter(
+ ([k]) => !ignoreHeadersRe.test(k) && k !== "strict-transport-security",
+ );
+
+ const readable = res.body;
+
+ // For debugging purposes, we can log the chunks as they stream:
+
+ // const { readable, writable } = new TransformStream({
+ // async transform(chunk, controller) {
+ // // Log each chunk as it passes through
+
+ // // optional, wait to test streaming mode
+ // // await new Promise((resolve) => setTimeout(resolve, 3000));
+
+ // console.log("Streaming chunk:", new TextDecoder().decode(chunk));
+ // controller.enqueue(chunk);
+ // },
+ // });
+
+ // // Pipe the response body through our transform stream
+ // res.body?.pipeTo(writable).catch((err) => {
+ // console.error("Error in stream:", err);
+ // });
+
+ return new Response(readable, {
+ ...res,
+ status: res.status,
+ statusText: res.statusText,
+ headers,
+ });
+};
+
+function getProviderInfo(provider: string) {
+ const envKey = `${provider.toUpperCase().replace(/-/g, "_")}_API_KEY`;
+ const key = process.env[envKey];
+ if (!key || !key.length) {
+ return "not-found";
+ }
+ if (provider === "google") {
+ return {
+ key,
+ header: "x-goog-api-key",
+ };
+ }
+ if (provider === "anthropic") {
+ return {
+ key,
+ header: "x-api-key",
+ };
+ }
+
+ return {
+ key,
+ header: "Authorization",
+ };
+}
+
+export const proxyRoute = new Hono();
+
+proxyRoute.use("", cors(), async (c) => {
+ const url = c.req.query("url");
+ if (!url) {
+ return c.json({ error: "url parameter is required" }, 400);
+ }
+
+ const provider = c.req.query("provider");
+ if (!provider) {
+ return c.json({ error: "provider parameter is required" }, 400);
+ }
+
+ const providerInfo = getProviderInfo(provider);
+
+ if (providerInfo === "not-found") {
+ return c.json(
+ {
+ error: `provider / key not found for provider ${provider}. Make sure to load correct env variables.`,
+ },
+ 404,
+ );
+ }
+
+ // eslint-disable-next-line no-console
+ console.log("Proxying request to", url);
+ const request = new Request(url, c.req.raw);
+ if (providerInfo.header === "Authorization") {
+ request.headers.set("Authorization", `Bearer ${providerInfo.key}`);
+ } else {
+ request.headers.set(providerInfo.header, `${providerInfo.key}`);
+ }
+
+ return proxyFetch(request);
+});
diff --git a/packages/xl-ai-server/src/routes/vercelAiSdk.ts b/packages/xl-ai-server/src/routes/vercelAiSdk.ts
new file mode 100644
index 0000000000..acbbd4b391
--- /dev/null
+++ b/packages/xl-ai-server/src/routes/vercelAiSdk.ts
@@ -0,0 +1,97 @@
+import { createOpenAI } from "@ai-sdk/openai";
+import {
+ objectToDataStream,
+ partialObjectStreamToDataStream,
+} from "@blocknote/xl-ai";
+import {
+ generateObject,
+ generateText,
+ jsonSchema,
+ streamObject,
+ streamText,
+} from "ai";
+import { Hono } from "hono";
+import { cors } from "hono/cors";
+
+export const vercelAiSdkRoute = new Hono();
+
+const model = createOpenAI({
+ apiKey: process.env.OPENAI_API_KEY,
+})("gpt-4-turbo");
+
+// TODO: add support for generateText + tools
+vercelAiSdkRoute.post("/generateText", cors(), async (c) => {
+ // TODO
+ const { messages } = await c.req.json();
+
+ const result = generateText({
+ model,
+ messages,
+ tools: {
+ // add: tool({}),
+ },
+ });
+
+ // return result.toDataStreamResponse();
+});
+
+// TODO: add support for streamText + tools
+vercelAiSdkRoute.post("/streamText", cors(), async (c) => {
+ // TODO
+ const { messages } = await c.req.json();
+
+ const result = streamText({
+ model,
+ messages,
+ toolCallStreaming: true,
+ tools: {
+ // add: tool({}),
+ },
+ });
+
+ return result.toDataStreamResponse();
+});
+
+vercelAiSdkRoute.post("/streamObject", cors(), async (c) => {
+ const { messages, schema } = await c.req.json();
+
+ const result = streamObject({
+ model,
+ messages,
+ output: "object",
+ schema: jsonSchema(schema),
+ });
+
+ const dataStream = partialObjectStreamToDataStream(result.fullStream);
+
+ return new Response(dataStream.pipeThrough(new TextEncoderStream()), {
+ status: 200,
+ statusText: "OK",
+ headers: {
+ contentType: "text/plain; charset=utf-8",
+ dataStreamVersion: "v1",
+ },
+ });
+});
+
+vercelAiSdkRoute.post("/generateObject", cors(), async (c) => {
+ const { messages, schema } = await c.req.json();
+
+ const result = await generateObject({
+ model,
+ messages,
+ output: "object",
+ schema: jsonSchema(schema),
+ });
+
+ const dataStream = objectToDataStream(result.object);
+
+ return new Response(dataStream.pipeThrough(new TextEncoderStream()), {
+ status: 200,
+ statusText: "OK",
+ headers: {
+ contentType: "text/plain; charset=utf-8",
+ dataStreamVersion: "v1",
+ },
+ });
+});
diff --git a/packages/xl-ai-server/vite.config.ts b/packages/xl-ai-server/vite.config.ts
index c2041c3105..a2aac2c75f 100644
--- a/packages/xl-ai-server/vite.config.ts
+++ b/packages/xl-ai-server/vite.config.ts
@@ -19,6 +19,7 @@ export default defineConfig((conf) => ({
: ({
// load live from sources with live reload working
"@blocknote/core": path.resolve(__dirname, "../core/src/"),
+ "@blocknote/xl-ai": path.resolve(__dirname, "../xl-ai/src/"),
} as Record),
},
build: {
diff --git a/packages/xl-ai/package.json b/packages/xl-ai/package.json
index c28c8339f2..08f97130a7 100644
--- a/packages/xl-ai/package.json
+++ b/packages/xl-ai/package.json
@@ -70,7 +70,9 @@
"@blocknote/react": "0.35.0",
"@floating-ui/react": "^0.26.4",
"@tiptap/core": "^2.12.0",
- "ai": "^4.3.15",
+ "ai": "^4.3.19",
+ "@ai-sdk/ui-utils": "^1.2.11",
+ "@ai-sdk/provider-utils": "^2.2.8",
"lodash.isequal": "^4.5.0",
"prosemirror-changeset": "^2.3.0",
"prosemirror-model": "^1.24.1",
diff --git a/packages/xl-ai/src/AIExtension.ts b/packages/xl-ai/src/AIExtension.ts
index e4ba5d6840..a5f12cd134 100644
--- a/packages/xl-ai/src/AIExtension.ts
+++ b/packages/xl-ai/src/AIExtension.ts
@@ -8,16 +8,20 @@ import {
revertSuggestions,
suggestChanges,
} from "@blocknote/prosemirror-suggest-changes";
-import { APICallError, LanguageModel, RetryError } from "ai";
+import { APICallError, RetryError } from "ai";
+import { Fragment, Slice } from "prosemirror-model";
import { Plugin, PluginKey } from "prosemirror-state";
import { fixTablesKey } from "prosemirror-tables";
import { createStore, StoreApi } from "zustand/vanilla";
-import { doLLMRequest, LLMRequestOptions } from "./api/LLMRequest.js";
+import {
+ doLLMRequest,
+ ExecuteLLMRequestOptions,
+ LLMRequestOptions,
+} from "./api/LLMRequest.js";
import { LLMResponse } from "./api/LLMResponse.js";
import { PromptBuilder } from "./api/formats/PromptBuilder.js";
import { LLMFormat, llmFormats } from "./api/index.js";
import { createAgentCursorPlugin } from "./plugins/AgentCursorPlugin.js";
-import { Fragment, Slice } from "prosemirror-model";
type MakeOptional = Omit & Partial>;
@@ -61,15 +65,6 @@ type AIPluginState = {
* configuration options for LLM calls that are shared across all calls by default
*/
type GlobalLLMRequestOptions = {
- /**
- * The default language model to use for LLM calls
- */
- model: LanguageModel;
- /**
- * Whether to stream the LLM response
- * @default true
- */
- stream?: boolean;
/**
* The default data format to use for LLM calls
* html format is recommended, the other formats are experimental
@@ -81,6 +76,13 @@ type GlobalLLMRequestOptions = {
* @default the default prompt builder for the selected {@link dataFormat}
*/
promptBuilder?: PromptBuilder;
+
+ /**
+ * Customize how your LLM backend is called.
+ * Implement this function if you want to call a backend that is not compatible with
+ * the Vercel AI SDK
+ */
+ executor: (opts: ExecuteLLMRequestOptions) => Promise;
};
const PLUGIN_KEY = new PluginKey(`blocknote-ai-plugin`);
@@ -348,7 +350,7 @@ export class AIExtension extends BlockNoteExtension {
/**
* Execute a call to an LLM and apply the result to the editor
*/
- public async callLLM(opts: MakeOptional) {
+ public async callLLM(opts: MakeOptional) {
this.setAIResponseStatus("thinking");
this.editor.forkYDocPlugin?.fork();
diff --git a/packages/xl-ai/src/api/LLMRequest.ts b/packages/xl-ai/src/api/LLMRequest.ts
index 170e020e5b..006e9d5028 100644
--- a/packages/xl-ai/src/api/LLMRequest.ts
+++ b/packages/xl-ai/src/api/LLMRequest.ts
@@ -1,9 +1,6 @@
import { BlockNoteEditor } from "@blocknote/core";
-import { CoreMessage, generateObject, LanguageModelV1, streamObject } from "ai";
-import {
- generateOperations,
- streamOperations,
-} from "../streamTool/callLLMWithStreamTools.js";
+import { CoreMessage } from "ai";
+import { StreamTool } from "../streamTool/streamTool.js";
import { isEmptyParagraph } from "../util/emptyBlock.js";
import { LLMResponse } from "./LLMResponse.js";
import type { PromptBuilder } from "./formats/PromptBuilder.js";
@@ -11,14 +8,24 @@ import { htmlBlockLLMFormat } from "./formats/html-blocks/htmlBlocks.js";
import { LLMFormat } from "./index.js";
import { trimEmptyBlocks } from "./promptHelpers/trimEmptyBlocks.js";
+type MakeOptional = Omit & Partial>;
+
+export type ExecuteLLMRequestOptions = {
+ messages: CoreMessage[];
+ streamTools: StreamTool[];
+ // TODO: needed?
+ llmRequestOptions: MakeOptional;
+ onStart?: () => void;
+};
+
export type LLMRequestOptions = {
/**
- * The language model to use for the LLM call (AI SDK)
- *
- * (when invoking `callLLM` via the `AIExtension` this will default to the
- * model set in the `AIExtension` options)
+ * Customize how your LLM backend is called.
+ * Implement this function if you want to call a backend that is not compatible with
+ * the Vercel AI SDK
*/
- model: LanguageModelV1;
+ executor: (opts: ExecuteLLMRequestOptions) => Promise;
+
/**
* The user prompt to use for the LLM call
*/
@@ -44,12 +51,6 @@ export type LLMRequestOptions = {
* @default provided by the format (e.g. `llm.html.defaultPromptBuilder`)
*/
promptBuilder?: PromptBuilder;
- /**
- * The maximum number of retries for the LLM call
- *
- * @default 2
- */
- maxRetries?: number;
/**
* Whether to use the editor selection for the LLM call
*
@@ -62,22 +63,13 @@ export type LLMRequestOptions = {
* @default { add: true, update: true, delete: true }
*/
defaultStreamTools?: {
- /** Enable the add tool (default: true) */
+ /** Enable the add tool (default: false) */
add?: boolean;
- /** Enable the update tool (default: true) */
+ /** Enable the update tool (default: false) */
update?: boolean;
- /** Enable the delete tool (default: true) */
+ /** Enable the delete tool (default: false) */
delete?: boolean;
};
- /**
- * Whether to stream the LLM response or not
- *
- * When streaming, we use the AI SDK `streamObject` function,
- * otherwise, we use the AI SDK `generateObject` function.
- *
- * @default true
- */
- stream?: boolean;
/**
* If the user's cursor is in an empty paragraph, automatically delete it when the AI
* is starting to write.
@@ -103,16 +95,6 @@ export type LLMRequestOptions = {
* @default true
*/
withDelays?: boolean;
- /**
- * Additional options to pass to the AI SDK `generateObject` function
- * (only used when `stream` is `false`)
- */
- _generateObjectOptions?: Partial>[0]>;
- /**
- * Additional options to pass to the AI SDK `streamObject` function
- * (only used when `stream` is `true`)
- */
- _streamObjectOptions?: Partial>[0]>;
};
/**
@@ -130,16 +112,13 @@ export async function doLLMRequest(
userPrompt,
useSelection,
deleteEmptyCursorBlock,
- stream,
onStart,
withDelays,
dataFormat,
previousResponse,
...rest
} = {
- maxRetries: 2,
deleteEmptyCursorBlock: true,
- stream: true,
withDelays: true,
dataFormat: htmlBlockLLMFormat,
...opts,
@@ -192,14 +171,15 @@ export async function doLLMRequest(
For now, this approach works ok.
*/
- previousMessages.push({
- role: "system", // using "assistant" here doesn't work with gemini because we can't mix system / assistant messages
- content:
- "ASSISTANT_MESSAGE: These are the operations returned by a previous LLM call: \n" +
- JSON.stringify(
- await previousResponse.llmResult.getGeneratedOperations(),
- ),
- });
+ // TODO: fix
+ // previousMessages.push({
+ // role: "system", // using "assistant" here doesn't work with gemini because we can't mix system / assistant messages
+ // content:
+ // "ASSISTANT_MESSAGE: These are the operations returned by a previous LLM call: \n" +
+ // JSON.stringify(
+ // await previousResponse.llmResult.getGeneratedOperations(),
+ // ),
+ // });
}
const messages = await promptBuilder(editor, {
@@ -219,34 +199,20 @@ export async function doLLMRequest(
opts.onBlockUpdate,
);
- let response:
- | Awaited>>
- | Awaited>>;
-
- if (stream) {
- response = await streamOperations(
- streamTools,
- {
- messages,
- ...rest,
- },
- () => {
- if (deleteCursorBlock) {
- editor.removeBlocks([deleteCursorBlock]);
- }
- onStart?.();
- },
- );
- } else {
- response = await generateOperations(streamTools, {
- messages,
+ // TODO: design decision, does it make sense to pass `messages` here, or should creating the message array
+ // be the responsibility of the executor / server, and should we pass editor state instead?
+ return opts.executor({
+ onStart: () => {
+ if (deleteCursorBlock) {
+ editor.removeBlocks([deleteCursorBlock]);
+ }
+ onStart?.();
+ },
+ messages,
+ streamTools,
+ llmRequestOptions: {
+ ...opts,
...rest,
- });
- if (deleteCursorBlock) {
- editor.removeBlocks([deleteCursorBlock]);
- }
- onStart?.();
- }
-
- return new LLMResponse(messages, response, streamTools);
+ },
+ });
}
diff --git a/packages/xl-ai/src/api/LLMResponse.ts b/packages/xl-ai/src/api/LLMResponse.ts
index 1321ab5b9d..7dcfc73501 100644
--- a/packages/xl-ai/src/api/LLMResponse.ts
+++ b/packages/xl-ai/src/api/LLMResponse.ts
@@ -1,9 +1,35 @@
import { CoreMessage } from "ai";
-import { OperationsResult } from "../streamTool/callLLMWithStreamTools.js";
import { StreamTool, StreamToolCall } from "../streamTool/streamTool.js";
+import { StreamToolExecutor } from "../streamTool/StreamToolExecutor.js";
+import { AsyncIterableStream } from "../util/stream.js";
+
+/**
+ * Result of an LLM call with stream tools
+ */
+export type OperationsResult[]> =
+ AsyncIterableStream<{
+ /**
+ * The operation the LLM wants to execute
+ */
+ operation: StreamToolCall;
+ /**
+ * Whether {@link operation} is an update to the previous operation in the stream.
+ *
+ * For non-streaming mode, this will always be `false`
+ */
+ isUpdateToPreviousOperation: boolean;
+ /**
+ * Whether the {@link operation} is possibly partial (i.e. the LLM is still streaming data about this operation)
+ *
+ * For non-streaming mode, this will always be `false`
+ */
+ isPossiblyPartial: boolean;
+ }>;
/**
* Result of an LLM call with stream tools that apply changes to a BlockNote Editor
+ *
+ * TODO: maybe get rid of this class?
*/
export class LLMResponse {
/**
@@ -23,42 +49,32 @@ export class LLMResponse {
private readonly streamTools: StreamTool[],
) {}
- /**
- * Apply the operations to the editor and return a stream of results.
- *
- * (this method consumes underlying streams in `llmResult`)
- */
- async *applyToolCalls() {
- let currentStream: AsyncIterable<{
- operation: StreamToolCall[]>;
- isUpdateToPreviousOperation: boolean;
- isPossiblyPartial: boolean;
- }> = this.llmResult.operationsSource;
- for (const tool of this.streamTools) {
- currentStream = tool.execute(currentStream);
- }
- yield* currentStream;
- }
-
/**
* Helper method to apply all operations to the editor if you're not interested in intermediate operations and results.
*
* (this method consumes underlying streams in `llmResult`)
*/
public async execute() {
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
- for await (const _result of this.applyToolCalls()) {
- // no op
- }
+ const executor = new StreamToolExecutor(this.streamTools);
+ await executor.execute(this.llmResult);
+ await executor.waitTillEnd();
}
/**
* @internal
+ *
+ * TODO
*/
public async _logToolCalls() {
- for await (const toolCall of this.llmResult.operationsSource) {
+ for await (const toolCall of this.llmResult) {
// eslint-disable-next-line no-console
console.log(JSON.stringify(toolCall, null, 2));
}
}
}
+
+// TODO
+// async getGeneratedOperations() {
+// return { operations };
+// },
+// }
diff --git a/packages/xl-ai/src/api/formats/base-tools/createUpdateBlockTool.ts b/packages/xl-ai/src/api/formats/base-tools/createUpdateBlockTool.ts
index 6a81d0d469..f3a16f7a3d 100644
--- a/packages/xl-ai/src/api/formats/base-tools/createUpdateBlockTool.ts
+++ b/packages/xl-ai/src/api/formats/base-tools/createUpdateBlockTool.ts
@@ -196,7 +196,6 @@ export function createUpdateBlockTool(config: {
}
const operation = chunk.operation as UpdateBlockToolCall;
-
if (chunk.isPossiblyPartial) {
const size = JSON.stringify(operation.block).length;
if (size < minSize) {
diff --git a/packages/xl-ai/src/api/formats/html-blocks/htmlBlocks.test.ts b/packages/xl-ai/src/api/formats/html-blocks/htmlBlocks.test.ts
index aafb57b6ad..236a2fd144 100644
--- a/packages/xl-ai/src/api/formats/html-blocks/htmlBlocks.test.ts
+++ b/packages/xl-ai/src/api/formats/html-blocks/htmlBlocks.test.ts
@@ -3,6 +3,7 @@ import { getSortedEntries, snapshot, toHashString } from "msw-snapshot";
import { setupServer } from "msw/node";
import path from "path";
import { afterAll, afterEach, beforeAll, describe } from "vitest";
+import { createAISDKLLMRequestExecutor } from "../../../streamTool/vercelAiSdk/clientSideExecutor/clientSideExecutor.js";
import { testAIModels } from "../../../testUtil/testAIModels.js";
import { doLLMRequest } from "../../LLMRequest.js";
import { generateSharedTestCases } from "../tests/sharedTestCases.js";
@@ -125,10 +126,12 @@ describe("Models", () => {
doLLMRequest(editor, {
...options,
dataFormat: htmlBlockLLMFormat,
- model: params.model,
- maxRetries: 0,
- stream: params.stream,
withDelays: false,
+ executor: createAISDKLLMRequestExecutor({
+ model: params.model,
+ stream: params.stream,
+ maxRetries: 0,
+ }),
}),
// TODO: remove when matthew's parsing PR is merged
{
diff --git a/packages/xl-ai/src/api/formats/html-blocks/htmlBlocks.ts b/packages/xl-ai/src/api/formats/html-blocks/htmlBlocks.ts
index e819271f58..bf78be57b7 100644
--- a/packages/xl-ai/src/api/formats/html-blocks/htmlBlocks.ts
+++ b/packages/xl-ai/src/api/formats/html-blocks/htmlBlocks.ts
@@ -8,29 +8,48 @@ import {
} from "./htmlPromptData.js";
import { tools } from "./tools/index.js";
-function getStreamTools(
+// Import the tool call types
+import { AddBlocksToolCall } from "../base-tools/createAddBlocksTool.js";
+import { UpdateBlockToolCall } from "../base-tools/createUpdateBlockTool.js";
+import { DeleteBlockToolCall } from "../base-tools/delete.js";
+
+// Define the tool types
+export type AddTool = StreamTool>;
+export type UpdateTool = StreamTool>;
+export type DeleteTool = StreamTool;
+
+// Create a conditional type that maps boolean flags to tool types
+export type StreamToolsConfig = {
+ add?: boolean;
+ update?: boolean;
+ delete?: boolean;
+};
+
+export type StreamToolsResult = [
+ ...(T extends { update: true } ? [UpdateTool] : []),
+ ...(T extends { add: true } ? [AddTool] : []),
+ ...(T extends { delete: true } ? [DeleteTool] : []),
+];
+
+function getStreamTools<
+ T extends StreamToolsConfig = { add: true; update: true; delete: true },
+>(
editor: BlockNoteEditor,
withDelays: boolean,
- defaultStreamTools?: {
- /** Enable the add tool (default: true) */
- add?: boolean;
- /** Enable the update tool (default: true) */
- update?: boolean;
- /** Enable the delete tool (default: true) */
- delete?: boolean;
- },
+ defaultStreamTools?: T,
selectionInfo?: {
from: number;
to: number;
},
onBlockUpdate?: (blockId: string) => void,
-) {
- const mergedStreamTools = {
- add: true,
- update: true,
- delete: true,
- ...defaultStreamTools,
- };
+): StreamToolsResult {
+ const mergedStreamTools =
+ defaultStreamTools ??
+ ({
+ add: true,
+ update: true,
+ delete: true,
+ } as T);
const streamTools: StreamTool[] = [
...(mergedStreamTools.update
@@ -51,7 +70,7 @@ function getStreamTools(
: []),
];
- return streamTools;
+ return streamTools as StreamToolsResult;
}
export const htmlBlockLLMFormat = {
diff --git a/packages/xl-ai/src/api/formats/json/errorHandling.test.ts b/packages/xl-ai/src/api/formats/json/errorHandling.test.ts
index 5edee6b452..a47b7e8b0a 100644
--- a/packages/xl-ai/src/api/formats/json/errorHandling.test.ts
+++ b/packages/xl-ai/src/api/formats/json/errorHandling.test.ts
@@ -5,6 +5,7 @@ import { BlockNoteEditor } from "@blocknote/core";
import { HttpResponse, http } from "msw";
import { setupServer } from "msw/node";
import { createBlockNoteAIClient } from "../../../blocknoteAIClient/client.js";
+import { createAISDKLLMRequestExecutor } from "../../../streamTool/vercelAiSdk/clientSideExecutor/clientSideExecutor.js";
import { doLLMRequest } from "../../LLMRequest.js";
import { jsonLLMFormat } from "./json.js";
@@ -35,60 +36,64 @@ describe("Error handling", () => {
errorServer.resetHandlers();
});
- it("handles 429 Too Many Requests error", async () => {
- // Set up handler for this specific test
- errorServer.use(
- http.post("*", () => {
- return new HttpResponse(
- JSON.stringify({
- error: {
- message: "Rate limit exceeded, please try again later",
- type: "rate_limit_exceeded",
- code: "rate_limit_exceeded",
+ [{ stream: true }, { stream: false }].forEach(({ stream }) => {
+ it(`handles 429 Too Many Requests error ${stream ? "streaming" : "non-streaming"}`, async () => {
+ // Set up handler for this specific test
+ errorServer.use(
+ http.post("*", () => {
+ return new HttpResponse(
+ JSON.stringify({
+ error: {
+ message: "Rate limit exceeded, please try again later",
+ type: "rate_limit_exceeded",
+ code: "rate_limit_exceeded",
+ },
+ }),
+ {
+ status: 429,
+ headers: {
+ "Content-Type": "application/json",
+ },
},
- }),
+ );
+ }),
+ );
+
+ const editor = BlockNoteEditor.create({
+ initialContent: [
{
- status: 429,
- headers: {
- "Content-Type": "application/json",
- },
+ type: "paragraph",
+ content: "Hello world",
},
- );
- }),
- );
-
- const editor = BlockNoteEditor.create({
- initialContent: [
- {
- type: "paragraph",
- content: "Hello world",
- },
- ],
- });
+ ],
+ });
- // Use a flag to track if an error was thrown
- let errorThrown = false;
- let caughtError: any = null;
+ // Use a flag to track if an error was thrown
+ let errorThrown = false;
+ let caughtError: any = null;
- try {
- const result = await doLLMRequest(editor, {
- stream: true,
- userPrompt: "translate to Spanish",
- model: openai,
- maxRetries: 0,
- dataFormat: jsonLLMFormat,
- });
- await result.execute();
- } catch (error: any) {
- errorThrown = true;
- caughtError = error;
- }
+ try {
+ const result = await doLLMRequest(editor, {
+ userPrompt: "translate to Spanish",
+ executor: createAISDKLLMRequestExecutor({
+ model: openai,
+ maxRetries: 0,
+ stream,
+ }),
+ dataFormat: jsonLLMFormat,
+ });
+ await result.execute();
+ } catch (error: any) {
+ errorThrown = true;
+ caughtError = error;
+ }
- // Assertions outside the try/catch
- expect(errorThrown).toBe(true);
- expect(caughtError).toBeDefined();
- expect(caughtError.message || caughtError.toString()).toContain(
- "Rate limit exceeded, please try again later",
- );
+ // Assertions outside the try/catch
+ expect(errorThrown).toBe(true);
+ expect(caughtError).toBeDefined();
+ expect(caughtError.message || caughtError.toString()).toContain(
+ "Rate limit exceeded, please try again later",
+ );
+ });
});
});
diff --git a/packages/xl-ai/src/api/formats/json/json.test.ts b/packages/xl-ai/src/api/formats/json/json.test.ts
index 16abc26e43..b6034264ae 100644
--- a/packages/xl-ai/src/api/formats/json/json.test.ts
+++ b/packages/xl-ai/src/api/formats/json/json.test.ts
@@ -6,6 +6,7 @@ import { setupServer } from "msw/node";
import path from "path";
import { generateSharedTestCases } from "../tests/sharedTestCases.js";
+import { createAISDKLLMRequestExecutor } from "../../../streamTool/vercelAiSdk/clientSideExecutor/clientSideExecutor.js";
import { testAIModels } from "../../../testUtil/testAIModels.js";
import { doLLMRequest } from "../../LLMRequest.js";
import { jsonLLMFormat } from "./json.js";
@@ -115,9 +116,11 @@ describe.skip("Models", () => {
generateSharedTestCases((editor, options) =>
doLLMRequest(editor, {
...options,
- stream: params.stream,
- model: params.model,
- maxRetries: 0,
+ executor: createAISDKLLMRequestExecutor({
+ model: params.model,
+ maxRetries: 0,
+ stream: params.stream,
+ }),
withDelays: false,
dataFormat: jsonLLMFormat,
}),
diff --git a/packages/xl-ai/src/api/formats/json/tools/jsontools.test.ts b/packages/xl-ai/src/api/formats/json/tools/jsontools.test.ts
index 065a09b5ac..30210a743a 100644
--- a/packages/xl-ai/src/api/formats/json/tools/jsontools.test.ts
+++ b/packages/xl-ai/src/api/formats/json/tools/jsontools.test.ts
@@ -17,6 +17,7 @@ import { tools } from "./index.js";
import { getAIExtension } from "../../../../AIExtension.js";
import { getExpectedEditor } from "../../../../testUtil/cases/index.js";
import { validateRejectingResultsInOriginalDoc } from "../../../../testUtil/suggestChangesTestUtil.js";
+
async function* createMockStream(
...operations: {
operation:
@@ -58,12 +59,7 @@ async function executeTestCase(
// bit hacky way to instantiate an LLMResponse just so we can call execute
const result = new LLMResponse(
undefined as any,
- {
- operationsSource: createAsyncIterableStreamFromAsyncIterable(stream),
- streamObjectResult: undefined,
- generateObjectResult: undefined,
- getGeneratedOperations: undefined as any,
- },
+ createAsyncIterableStreamFromAsyncIterable(stream),
streamTools,
);
diff --git a/packages/xl-ai/src/api/formats/markdown-blocks/markdownBlocks.test.ts b/packages/xl-ai/src/api/formats/markdown-blocks/markdownBlocks.test.ts
index 4bb89079f6..ddcefc7a80 100644
--- a/packages/xl-ai/src/api/formats/markdown-blocks/markdownBlocks.test.ts
+++ b/packages/xl-ai/src/api/formats/markdown-blocks/markdownBlocks.test.ts
@@ -4,6 +4,7 @@ import { getCurrentTest } from "@vitest/runner";
import { getSortedEntries, snapshot, toHashString } from "msw-snapshot";
import { setupServer } from "msw/node";
import path from "path";
+import { createAISDKLLMRequestExecutor } from "../../../streamTool/vercelAiSdk/clientSideExecutor/clientSideExecutor.js";
import { testAIModels } from "../../../testUtil/testAIModels.js";
import { doLLMRequest } from "../../LLMRequest.js";
import { generateSharedTestCases } from "../tests/sharedTestCases.js";
@@ -114,9 +115,11 @@ describe("Models", () => {
(editor, options) =>
doLLMRequest(editor, {
...options,
- model: params.model,
- maxRetries: 0,
- stream: params.stream,
+ executor: createAISDKLLMRequestExecutor({
+ model: params.model,
+ maxRetries: 0,
+ stream: params.stream,
+ }),
withDelays: false,
dataFormat: markdownBlocksLLMFormat,
// _generateObjectOptions: {
diff --git a/packages/xl-ai/src/api/index.ts b/packages/xl-ai/src/api/index.ts
index fef3fbc230..9a84127930 100644
--- a/packages/xl-ai/src/api/index.ts
+++ b/packages/xl-ai/src/api/index.ts
@@ -42,4 +42,5 @@ export const llmFormats = {
};
export { doLLMRequest as callLLM } from "./LLMRequest.js";
+export { LLMResponse } from "./LLMResponse.js";
export { promptHelpers } from "./promptHelpers/index.js";
diff --git a/packages/xl-ai/src/index.ts b/packages/xl-ai/src/index.ts
index 0f35bd8e3f..685d059609 100644
--- a/packages/xl-ai/src/index.ts
+++ b/packages/xl-ai/src/index.ts
@@ -13,3 +13,9 @@ export * from "./components/SuggestionMenu/getAISlashMenuItems.js";
export * from "./i18n/dictionary.js";
export * from "./api/index.js";
+
+// TODO: organize these exports:
+export * from "./streamTool/jsonSchema.js";
+export * from "./streamTool/StreamToolExecutor.js";
+export * from "./streamTool/vercelAiSdk/util/dataStreamResponseToOperationsResult.js";
+export * from "./streamTool/vercelAiSdk/util/partialObjectStreamUtil.js";
diff --git a/packages/xl-ai/src/streamTool/StreamToolExecutor.ts b/packages/xl-ai/src/streamTool/StreamToolExecutor.ts
new file mode 100644
index 0000000000..9da933431f
--- /dev/null
+++ b/packages/xl-ai/src/streamTool/StreamToolExecutor.ts
@@ -0,0 +1,179 @@
+import { parsePartialJson } from "@ai-sdk/ui-utils";
+import {
+ asyncIterableToStream,
+ createAsyncIterableStream,
+} from "../util/stream.js";
+import { StreamTool, StreamToolCall } from "./streamTool.js";
+
+/**
+ * The Operation types wraps a StreamToolCall with metadata on whether
+ * it's an update to an existing and / or or a possibly partial (i.e.: incomplete, streaming in progress) operation
+ */
+type Operation[] | StreamTool> = {
+ /**
+ * The StreamToolCall (parameters representing a StreamTool invocation)
+ */
+ operation: StreamToolCall;
+ /**
+ * Whether this operation is an update to the previous operation
+ * (i.e.: the previous operation was a partial operation for which we now have additional data)
+ */
+ isUpdateToPreviousOperation: boolean;
+ /**
+ * Whether this operation is a partial operation
+ * (i.e.: incomplete, streaming in progress)
+ */
+ isPossiblyPartial: boolean;
+};
+
+/**
+ * The StreamToolExecutor can apply StreamToolCalls to an editor.
+ *
+ * It accepts StreamToolCalls as JSON strings or already parsed and validated Operations.
+ * Note: When passing JSON strings, the executor will parse and validate them into Operations.
+ * When passing Operations, they're expected to have been validated by the StreamTool instances already.
+ * (StreamTool.validate)
+ *
+ * Applying the operations is delegated to the StreamTool instances.
+ *
+ * @example see the `manual-execution` example
+ */
+export class StreamToolExecutor[]> {
+ private readonly stream: TransformStream, Operation>;
+ private readonly readable: ReadableStream>;
+
+ /**
+ * @param streamTools - The StreamTools to use to apply the StreamToolCalls
+ */
+ constructor(private streamTools: T) {
+ this.stream = this.createWriteStream();
+ this.readable = this.createReadableStream();
+ }
+
+ private createWriteStream() {
+ let lastParsedResult: Operation | undefined;
+
+ const stream = new TransformStream, Operation>({
+ transform: (chunk, controller) => {
+ const operation =
+ typeof chunk === "string"
+ ? partialJsonToOperation(
+ chunk,
+ lastParsedResult?.isPossiblyPartial ?? false,
+ this.streamTools,
+ )
+ : chunk;
+ if (operation) {
+ // TODO: string operations have been validated, but object-based operations have not.
+ // To make this consistent, maybe we should extract the string parser to a separate transformer
+ lastParsedResult = operation;
+ controller.enqueue(operation);
+ }
+ },
+
+ flush: (controller) => {
+ // Check if the stream ended with a partial operation
+ if (lastParsedResult?.isPossiblyPartial) {
+ controller.error(new Error("stream ended with a partial operation"));
+ }
+ },
+ });
+
+ return stream;
+ }
+
+ private createReadableStream() {
+ // this is a bit hacky as it mixes async iterables and streams
+ // would be better to stick to streams
+ let currentStream: AsyncIterable[]>> =
+ createAsyncIterableStream(this.stream.readable);
+ for (const tool of this.streamTools) {
+ currentStream = tool.execute(currentStream);
+ }
+
+ return asyncIterableToStream(currentStream);
+ }
+
+ /**
+ * Helper method to apply all operations to the editor if you're not interested in intermediate operations and results.
+ */
+ public async waitTillEnd() {
+ const iterable = createAsyncIterableStream(this.readable);
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ for await (const _result of iterable) {
+ // no op
+ // these will be operations without a matching StreamTool.
+ // (we probably want to allow a way to access and handle these, but for now we haven't run into this scenario yet)
+ }
+ }
+
+ /**
+ * Returns a WritableStream that can be used to write StreamToolCalls to the executor.
+ *
+ * The WriteableStream accepts JSON strings or Operation objects.
+ */
+ public get writable() {
+ return this.stream.writable;
+ }
+
+ /**
+ * Accepts an async iterable and writes each chunk to the internal stream.
+ *
+ * (alternative to writing to the writable stream using {@link writable})
+ */
+ async execute(source: AsyncIterable>): Promise {
+ const writer = this.writable.getWriter();
+ for await (const chunk of source) {
+ await writer.write(chunk);
+ }
+ await writer.close();
+ }
+
+ /**
+ * Accepts a single chunk and processes it using the same logic.
+ *
+ * (alternative to writing to the writable stream using {@link writable})
+ */
+ async executeOne(chunk: StreamToolCall): Promise {
+ await this.execute(
+ (async function* () {
+ yield {
+ operation: chunk,
+ isUpdateToPreviousOperation: false,
+ isPossiblyPartial: false,
+ };
+ })(),
+ );
+ }
+}
+
+function partialJsonToOperation[]>(
+ chunk: string,
+ isUpdateToPreviousOperation: boolean,
+ streamTools: T,
+): Operation | undefined {
+ const parsed = parsePartialJson(chunk);
+
+ if (parsed.state === "undefined-input" || parsed.state === "failed-parse") {
+ return undefined;
+ }
+
+ if (!parsed) {
+ return;
+ }
+
+ const func = streamTools.find((f) => f.name === (parsed.value as any)?.type);
+
+ const validated = func && func.validate(parsed.value);
+
+ if (validated?.ok) {
+ return {
+ operation: validated.value as StreamToolCall,
+ isPossiblyPartial: parsed.state === "repaired-parse",
+ isUpdateToPreviousOperation,
+ };
+ } else {
+ // no worries, probably a partial operation that's not valid yet
+ return;
+ }
+}
diff --git a/packages/xl-ai/src/streamTool/asTool.ts b/packages/xl-ai/src/streamTool/asTool.ts
index f1f18fc44e..2cd9e1d018 100644
--- a/packages/xl-ai/src/streamTool/asTool.ts
+++ b/packages/xl-ai/src/streamTool/asTool.ts
@@ -1,7 +1,6 @@
import { jsonSchema, tool } from "ai";
-import { operationsToStream } from "./callLLMWithStreamTools.js";
import { createStreamToolsArraySchema } from "./jsonSchema.js";
-import { StreamTool } from "./streamTool.js";
+import { Result, StreamTool, StreamToolCall } from "./streamTool.js";
// TODO: remove or implement
@@ -42,3 +41,41 @@ export function streamToolsAsTool[]>(streamTools: T) {
},
});
}
+
+// TODO: review
+function operationsToStream[]>(
+ object: unknown,
+): Result<
+ AsyncIterable<{
+ partialOperation: StreamToolCall;
+ isUpdateToPreviousOperation: boolean;
+ isPossiblyPartial: boolean;
+ }>
+> {
+ if (
+ !object ||
+ typeof object !== "object" ||
+ !("operations" in object) ||
+ !Array.isArray(object.operations)
+ ) {
+ return {
+ ok: false,
+ error: "No operations returned",
+ };
+ }
+ const operations = object.operations;
+ async function* singleChunkGenerator() {
+ for (const op of operations) {
+ yield {
+ partialOperation: op,
+ isUpdateToPreviousOperation: false,
+ isPossiblyPartial: false,
+ };
+ }
+ }
+
+ return {
+ ok: true,
+ value: singleChunkGenerator(),
+ };
+}
diff --git a/packages/xl-ai/src/streamTool/callLLMWithStreamTools.ts b/packages/xl-ai/src/streamTool/callLLMWithStreamTools.ts
deleted file mode 100644
index 1dd310ff85..0000000000
--- a/packages/xl-ai/src/streamTool/callLLMWithStreamTools.ts
+++ /dev/null
@@ -1,362 +0,0 @@
-import {
- CoreMessage,
- GenerateObjectResult,
- LanguageModel,
- ObjectStreamPart,
- StreamObjectResult,
- generateObject,
- jsonSchema,
- streamObject,
-} from "ai";
-
-import { createStreamToolsArraySchema } from "./jsonSchema.js";
-
-import {
- AsyncIterableStream,
- createAsyncIterableStream,
- createAsyncIterableStreamFromAsyncIterable,
-} from "../util/stream.js";
-import { filterNewOrUpdatedOperations } from "./filterNewOrUpdatedOperations.js";
-import {
- preprocessOperationsNonStreaming,
- preprocessOperationsStreaming,
-} from "./preprocess.js";
-import { Result, StreamTool, StreamToolCall } from "./streamTool.js";
-
-type LLMRequestOptions = {
- model: LanguageModel;
- messages: CoreMessage[];
- maxRetries: number;
-};
-
-/**
- * Result of an LLM call with stream tools
- */
-export type OperationsResult[]> = {
- /**
- * Result of the underlying `streamObject` (AI SDK) call, or `undefined` if non-streaming mode
- */
- streamObjectResult: StreamObjectResult | undefined;
- /**
- * Result of the underlying `generateObject` (AI SDK) call, or `undefined` if streaming mode
- */
- generateObjectResult: GenerateObjectResult | undefined;
- /**
- * Stream of tool call operations, these are the operations the LLM "decided" to execute
- *
- * Calling this consumes the underlying streams
- */
- operationsSource: AsyncIterableStream<{
- /**
- * The operation the LLM wants to execute
- */
- operation: StreamToolCall;
- /**
- * Whether {@link operation} is an update to the previous operation in the stream.
- *
- * For non-streaming mode, this will always be `false`
- */
- isUpdateToPreviousOperation: boolean;
- /**
- * Whether the {@link operation} is possibly partial (i.e. the LLM is still streaming data about this operation)
- *
- * For non-streaming mode, this will always be `false`
- */
- isPossiblyPartial: boolean;
- }>;
- /**
- * All tool call operations the LLM decided to execute
- */
- getGeneratedOperations: () => Promise<{
- operations: StreamToolCall[];
- }>;
-};
-
-/**
- * Calls an LLM with StreamTools, using the `generateObject` of the AI SDK.
- *
- * This is the non-streaming version.
- */
-export async function generateOperations[]>(
- streamTools: T,
- opts: LLMRequestOptions & {
- _generateObjectOptions?: Partial>[0]>;
- },
-): Promise> {
- const { _generateObjectOptions, ...rest } = opts;
-
- if (
- _generateObjectOptions &&
- ("output" in _generateObjectOptions || "schema" in _generateObjectOptions)
- ) {
- throw new Error(
- "Cannot provide output or schema in _generateObjectOptions",
- );
- }
-
- const schema = jsonSchema(createStreamToolsArraySchema(streamTools));
- const options = {
- // non-overridable options for streamObject
- output: "object" as const,
- schema,
-
- // configurable options for streamObject
-
- // - optional, with defaults
-
- // mistral somehow needs "auto", while groq/llama needs "tool"
- // google needs "auto" because https://github.com/vercel/ai/issues/6959
- // TODO: further research this and / or make configurable
- // for now stick to "tool" by default as this has been tested mostly
- mode:
- rest.model.provider === "mistral.chat" ||
- rest.model.provider === "google.generative-ai"
- ? "auto"
- : "tool",
- // - mandatory ones:
- ...rest,
-
- // extra options for streamObject
- ...((_generateObjectOptions ?? {}) as any),
- };
-
- const ret = await generateObject<{ operations: any }>(options);
-
- // because the rest of the codebase always expects a stream, we convert the object to a stream here
- const stream = operationsToStream(ret.object);
-
- if (!stream.ok) {
- throw new Error(stream.error);
- }
-
- let _operationsSource: OperationsResult["operationsSource"];
-
- return {
- streamObjectResult: undefined,
- generateObjectResult: ret,
- get operationsSource() {
- if (!_operationsSource) {
- _operationsSource = createAsyncIterableStreamFromAsyncIterable(
- preprocessOperationsNonStreaming(stream.value, streamTools),
- );
- }
- return _operationsSource;
- },
- async getGeneratedOperations() {
- return ret.object;
- },
- };
-}
-
-export function operationsToStream[]>(
- object: unknown,
-): Result<
- AsyncIterable<{
- partialOperation: StreamToolCall;
- isUpdateToPreviousOperation: boolean;
- isPossiblyPartial: boolean;
- }>
-> {
- if (
- !object ||
- typeof object !== "object" ||
- !("operations" in object) ||
- !Array.isArray(object.operations)
- ) {
- return {
- ok: false,
- error: "No operations returned",
- };
- }
- const operations = object.operations;
- async function* singleChunkGenerator() {
- for (const op of operations) {
- yield {
- partialOperation: op,
- isUpdateToPreviousOperation: false,
- isPossiblyPartial: false,
- };
- }
- }
-
- return {
- ok: true,
- value: singleChunkGenerator(),
- };
-}
-
-/**
- * Calls an LLM with StreamTools, using the `streamObject` of the AI SDK.
- *
- * This is the streaming version.
- */
-export async function streamOperations[]>(
- streamTools: T,
- opts: LLMRequestOptions & {
- _streamObjectOptions?: Partial<
- Parameters>[0]
- >;
- },
- onStart: () => void = () => {
- // noop
- },
-): Promise> {
- const { _streamObjectOptions, ...rest } = opts;
-
- if (
- _streamObjectOptions &&
- ("output" in _streamObjectOptions || "schema" in _streamObjectOptions)
- ) {
- throw new Error("Cannot provide output or schema in _streamObjectOptions");
- }
-
- const schema = jsonSchema(createStreamToolsArraySchema(streamTools));
-
- const options = {
- // non-overridable options for streamObject
- output: "object" as const,
- schema,
- // configurable options for streamObject
-
- // - optional, with defaults
- // mistral somehow needs "auto", while groq/llama needs "tool"
- // google needs "auto" because https://github.com/vercel/ai/issues/6959
- // TODO: further research this and / or make configurable
- // for now stick to "tool" by default as this has been tested mostly
- mode:
- rest.model.provider === "mistral.chat" ||
- rest.model.provider === "google.generative-ai"
- ? "auto"
- : "tool",
- // - mandatory ones:
- ...rest,
-
- // extra options for streamObject
- ...((opts._streamObjectOptions ?? {}) as any),
- };
-
- const ret = streamObject<{ operations: any }>(options);
-
- let _operationsSource: OperationsResult["operationsSource"];
-
- const [fullStream1, fullStream2] = ret.fullStream.tee();
-
- // Always consume fullStream2 in the background and store the last operations
- const allOperationsPromise = (async () => {
- let lastOperations: { operations: StreamToolCall[] } = {
- operations: [],
- };
- const objectStream = createAsyncIterableStream(
- partialObjectStream(fullStream2),
- );
-
- for await (const chunk of objectStream) {
- if (chunk && typeof chunk === "object" && "operations" in chunk) {
- lastOperations = chunk as any;
- }
- }
- return lastOperations;
- })();
-
- // Note: we can probably clean this up by switching to streams instead of async iterables
- return {
- streamObjectResult: ret,
- generateObjectResult: undefined,
- get operationsSource() {
- if (!_operationsSource) {
- _operationsSource = createAsyncIterableStreamFromAsyncIterable(
- preprocessOperationsStreaming(
- filterNewOrUpdatedOperations(
- streamOnStartCallback(
- partialObjectStreamThrowError(
- createAsyncIterableStream(fullStream1),
- ),
- onStart,
- ),
- ),
- streamTools,
- ),
- );
- }
- return _operationsSource;
- },
- async getGeneratedOperations() {
- // Simply return the stored operations
- // If the stream hasn't completed yet, this will return the latest available operations
- return allOperationsPromise;
- },
- };
-}
-
-async function* streamOnStartCallback(
- stream: AsyncIterable,
- onStart: () => void,
-): AsyncIterable {
- let first = true;
- for await (const chunk of stream) {
- if (first) {
- onStart();
- first = false;
- }
- yield chunk;
- }
-}
-
-// adapted from https://github.com/vercel/ai/blob/5d4610634f119dc394d36adcba200a06f850209e/packages/ai/core/generate-object/stream-object.ts#L1041C7-L1066C1
-// change made to throw errors (with the original they're silently ignored)
-function partialObjectStreamThrowError(
- stream: ReadableStream>,
-): AsyncIterableStream {
- return createAsyncIterableStream(
- stream.pipeThrough(
- new TransformStream, PARTIAL>({
- transform(chunk, controller) {
- switch (chunk.type) {
- case "object":
- controller.enqueue(chunk.object);
- break;
-
- case "text-delta":
- case "finish":
- break;
- case "error":
- controller.error(chunk.error);
- break;
- default: {
- const _exhaustiveCheck: never = chunk;
- throw new Error(`Unsupported chunk type: ${_exhaustiveCheck}`);
- }
- }
- },
- }),
- ),
- );
-}
-
-// from https://github.com/vercel/ai/blob/5d4610634f119dc394d36adcba200a06f850209e/packages/ai/core/generate-object/stream-object.ts#L1041C7-L1066C1
-function partialObjectStream(
- stream: ReadableStream>,
-): AsyncIterableStream {
- return createAsyncIterableStream(
- stream.pipeThrough(
- new TransformStream, PARTIAL>({
- transform(chunk, controller) {
- switch (chunk.type) {
- case "object":
- controller.enqueue(chunk.object);
- break;
- case "text-delta":
- case "finish":
- break;
- case "error":
- break;
- default: {
- const _exhaustiveCheck: never = chunk;
- throw new Error(`Unsupported chunk type: ${_exhaustiveCheck}`);
- }
- }
- },
- }),
- ),
- );
-}
diff --git a/packages/xl-ai/src/streamTool/preprocess.ts b/packages/xl-ai/src/streamTool/preprocess.ts
index 0a98ecc216..71cba2d567 100644
--- a/packages/xl-ai/src/streamTool/preprocess.ts
+++ b/packages/xl-ai/src/streamTool/preprocess.ts
@@ -43,6 +43,10 @@ export async function* preprocessOperationsStreaming<
/**
* Validates an stream of operations and throws an error if an invalid operation is found.
+ *
+ * TODO: remove
+ *
+ * @deprecated
*/
export async function* preprocessOperationsNonStreaming<
T extends StreamTool[],
diff --git a/packages/xl-ai/src/streamTool/streamTool.ts b/packages/xl-ai/src/streamTool/streamTool.ts
index d907d1315e..7257a1adc4 100644
--- a/packages/xl-ai/src/streamTool/streamTool.ts
+++ b/packages/xl-ai/src/streamTool/streamTool.ts
@@ -66,14 +66,14 @@ export type StreamToolCallSingle> =
*
* Its type is the same as what a validated StreamTool returns
*/
-export type StreamToolCall | StreamTool[]> =
+export type StreamToolCall | readonly any[]> =
T extends StreamTool
? U
: // when passed an array of StreamTools, StreamToolCall represents the type of one of the StreamTool invocations
- T extends StreamTool[]
- ? T[number] extends StreamTool
- ? V
- : never
+ T extends readonly unknown[]
+ ? {
+ [K in keyof T]: T[K] extends StreamTool ? V : never;
+ }[number]
: never;
/**
diff --git a/packages/xl-ai/src/streamTool/vercelAiSdk/clientSideExecutor/clientSideExecutor.ts b/packages/xl-ai/src/streamTool/vercelAiSdk/clientSideExecutor/clientSideExecutor.ts
new file mode 100644
index 0000000000..5466cce309
--- /dev/null
+++ b/packages/xl-ai/src/streamTool/vercelAiSdk/clientSideExecutor/clientSideExecutor.ts
@@ -0,0 +1,245 @@
+import {
+ CoreMessage,
+ LanguageModel,
+ LanguageModelV1,
+ generateObject,
+ jsonSchema,
+ streamObject,
+} from "ai";
+
+import { ExecuteLLMRequestOptions } from "../../../api/LLMRequest.js";
+import { LLMResponse } from "../../../api/LLMResponse.js";
+import { createStreamToolsArraySchema } from "../../jsonSchema.js";
+import { StreamTool } from "../../streamTool.js";
+import { dataStreamResponseToOperationsResult } from "../util/dataStreamResponseToOperationsResult.js";
+import {
+ objectToDataStream,
+ partialObjectStreamToDataStream,
+} from "../util/partialObjectStreamUtil.js";
+
+type LLMRequestOptions = {
+ model: LanguageModel;
+ messages: CoreMessage[];
+ maxRetries: number;
+};
+
+/**
+ * Calls an LLM with StreamTools, using the `generateObject` of the AI SDK.
+ *
+ * This is the non-streaming version.
+ */
+export async function generateOperations[]>(
+ streamTools: T,
+ opts: LLMRequestOptions & {
+ _generateObjectOptions?: Partial>[0]>;
+ },
+) {
+ const { _generateObjectOptions, ...rest } = opts;
+
+ if (
+ _generateObjectOptions &&
+ ("output" in _generateObjectOptions || "schema" in _generateObjectOptions)
+ ) {
+ throw new Error(
+ "Cannot provide output or schema in _generateObjectOptions",
+ );
+ }
+
+ const schema = jsonSchema(createStreamToolsArraySchema(streamTools));
+ const options = {
+ // non-overridable options for streamObject
+ output: "object" as const,
+ schema,
+
+ // configurable options for streamObject
+
+ // - optional, with defaults
+
+ // mistral somehow needs "auto", while groq/llama needs "tool"
+ // google needs "auto" because https://github.com/vercel/ai/issues/6959
+ // TODO: further research this and / or make configurable
+ // for now stick to "tool" by default as this has been tested mostly
+ mode:
+ rest.model.provider === "mistral.chat" ||
+ rest.model.provider === "google.generative-ai"
+ ? "auto"
+ : "tool",
+ // - mandatory ones:
+ ...rest,
+
+ // extra options for streamObject
+ ...((_generateObjectOptions ?? {}) as any),
+ };
+
+ const ret = await generateObject<{ operations: any }>(options);
+
+ const stream = objectToDataStream(ret.object);
+
+ return {
+ dataStreamResponse: new Response(
+ stream.pipeThrough(new TextEncoderStream()),
+ {
+ status: 200,
+ statusText: "OK",
+ headers: {
+ contentType: "text/plain; charset=utf-8",
+ dataStreamVersion: "v1",
+ },
+ },
+ ),
+ /**
+ * Result of the underlying `generateObject` (AI SDK) call, or `undefined` if streaming mode
+ */
+ generateObjectResult: ret,
+ };
+}
+
+/**
+ * Calls an LLM with StreamTools, using the `streamObject` of the AI SDK.
+ *
+ * This is the streaming version.
+ */
+export async function streamOperations[]>(
+ streamTools: T,
+ opts: LLMRequestOptions & {
+ _streamObjectOptions?: Partial<
+ Parameters>[0]
+ >;
+ },
+) {
+ const { _streamObjectOptions, ...rest } = opts;
+
+ if (
+ _streamObjectOptions &&
+ ("output" in _streamObjectOptions || "schema" in _streamObjectOptions)
+ ) {
+ throw new Error("Cannot provide output or schema in _streamObjectOptions");
+ }
+
+ const schema = jsonSchema(createStreamToolsArraySchema(streamTools));
+
+ const options = {
+ // non-overridable options for streamObject
+ output: "object" as const,
+ schema,
+ // configurable options for streamObject
+
+ // - optional, with defaults
+ // mistral somehow needs "auto", while groq/llama needs "tool"
+ // google needs "auto" because https://github.com/vercel/ai/issues/6959
+ // TODO: further research this and / or make configurable
+ // for now stick to "tool" by default as this has been tested mostly
+ mode:
+ rest.model.provider === "mistral.chat" ||
+ rest.model.provider === "google.generative-ai"
+ ? "auto"
+ : "tool",
+ // - mandatory ones:
+ ...rest,
+
+ // extra options for streamObject
+ ...((opts._streamObjectOptions ?? {}) as any),
+ };
+
+ const ret = streamObject<{ operations: any }>(options);
+
+ // Transform the partial object stream to a data stream format
+ const stream = partialObjectStreamToDataStream(ret.fullStream);
+
+ return {
+ dataStreamResponse: new Response(
+ stream.pipeThrough(new TextEncoderStream()),
+ {
+ status: 200,
+ statusText: "OK",
+ headers: {
+ contentType: "text/plain; charset=utf-8",
+ dataStreamVersion: "v1",
+ },
+ },
+ ),
+ /**
+ * Result of the underlying `streamObject` (AI SDK) call, or `undefined` if non-streaming mode
+ */
+ streamObjectResult: ret,
+ };
+}
+
+export function createAISDKLLMRequestExecutor(opts: {
+ /**
+ * The language model to use for the LLM call (AI SDK)
+ *
+ * (when invoking `callLLM` via the `AIExtension` this will default to the
+ * model set in the `AIExtension` options)
+ *
+ * Note: perhaps we want to remove this
+ */
+ model: LanguageModelV1;
+
+ /**
+ * Whether to stream the LLM response or not
+ *
+ * When streaming, we use the AI SDK `streamObject` function,
+ * otherwise, we use the AI SDK `generateObject` function.
+ *
+ * @default true
+ */
+ stream?: boolean;
+
+ /**
+ * The maximum number of retries for the LLM call
+ *
+ * @default 2
+ */
+ maxRetries?: number;
+
+ /**
+ * Additional options to pass to the AI SDK `generateObject` function
+ * (only used when `stream` is `false`)
+ */
+ _generateObjectOptions?: Partial>[0]>;
+ /**
+ * Additional options to pass to the AI SDK `streamObject` function
+ * (only used when `stream` is `true`)
+ */
+ _streamObjectOptions?: Partial>[0]>;
+}) {
+ const {
+ model,
+ stream,
+ maxRetries,
+ _generateObjectOptions,
+ _streamObjectOptions,
+ } = opts;
+ return async (opts: ExecuteLLMRequestOptions) => {
+ const { messages, streamTools, onStart } = opts;
+
+ // TODO: add support for streamText / generateText and tool calls
+
+ let response: // | Awaited>>
+ Awaited>>;
+
+ if (stream) {
+ response = await streamOperations(streamTools, {
+ messages,
+ model,
+ maxRetries,
+ ...(_streamObjectOptions as any),
+ });
+ } else {
+ response = (await generateOperations(streamTools, {
+ messages,
+ model,
+ maxRetries,
+ ...(_generateObjectOptions as any),
+ })) as any;
+ }
+
+ const parsedResponse = await dataStreamResponseToOperationsResult(
+ response.dataStreamResponse,
+ streamTools,
+ onStart,
+ );
+ return new LLMResponse(messages, parsedResponse, streamTools);
+ };
+}
diff --git a/packages/xl-ai/src/streamTool/vercelAiSdk/util/dataStreamResponseToOperationsResult.ts b/packages/xl-ai/src/streamTool/vercelAiSdk/util/dataStreamResponseToOperationsResult.ts
new file mode 100644
index 0000000000..fa75e30fee
--- /dev/null
+++ b/packages/xl-ai/src/streamTool/vercelAiSdk/util/dataStreamResponseToOperationsResult.ts
@@ -0,0 +1,50 @@
+import { OperationsResult } from "../../../api/LLMResponse.js";
+import {
+ createAsyncIterableStream,
+ createAsyncIterableStreamFromAsyncIterable,
+} from "../../../util/stream.js";
+import { filterNewOrUpdatedOperations } from "../../filterNewOrUpdatedOperations.js";
+import { preprocessOperationsStreaming } from "../../preprocess.js";
+import { StreamTool, StreamToolCall } from "../../streamTool.js";
+import {
+ dataStreamToTextStream,
+ textStreamToPartialObjectStream,
+} from "./partialObjectStreamUtil.js";
+
+export async function dataStreamResponseToOperationsResult<
+ T extends StreamTool[],
+>(
+ response: Response,
+ streamTools: T,
+ onStart: () => void = () => {
+ // noop
+ },
+): Promise> {
+ const ret = dataStreamToTextStream(response.body!).pipeThrough(
+ textStreamToPartialObjectStream<{ operations: StreamToolCall[] }>(),
+ );
+
+ // Note: we can probably clean this up by switching to streams instead of async iterables
+ return createAsyncIterableStreamFromAsyncIterable(
+ preprocessOperationsStreaming(
+ filterNewOrUpdatedOperations(
+ streamOnStartCallback(createAsyncIterableStream(ret), onStart),
+ ),
+ streamTools,
+ ),
+ );
+}
+
+async function* streamOnStartCallback(
+ stream: AsyncIterable,
+ onStart: () => void,
+): AsyncIterable {
+ let first = true;
+ for await (const chunk of stream) {
+ if (first) {
+ onStart();
+ first = false;
+ }
+ yield chunk;
+ }
+}
diff --git a/packages/xl-ai/src/streamTool/vercelAiSdk/util/partialObjectStreamUtil.ts b/packages/xl-ai/src/streamTool/vercelAiSdk/util/partialObjectStreamUtil.ts
new file mode 100644
index 0000000000..0d53e83645
--- /dev/null
+++ b/packages/xl-ai/src/streamTool/vercelAiSdk/util/partialObjectStreamUtil.ts
@@ -0,0 +1,167 @@
+import { getErrorMessage } from "@ai-sdk/provider-utils";
+import {
+ formatDataStreamPart,
+ isDeepEqualData,
+ parsePartialJson,
+ processDataStream,
+} from "@ai-sdk/ui-utils";
+import { DeepPartial, ObjectStreamPart } from "ai";
+import {
+ AsyncIterableStream,
+ createAsyncIterableStream,
+} from "../../../util/stream.js";
+
+// adapted from https://github.com/vercel/ai/blob/5d4610634f119dc394d36adcba200a06f850209e/packages/ai/core/generate-object/stream-object.ts#L1041C7-L1066C1
+// change made to throw errors (with the original they're silently ignored)
+export function partialObjectStreamThrowError_UNUSED(
+ stream: ReadableStream>,
+): AsyncIterableStream {
+ return createAsyncIterableStream(
+ stream.pipeThrough(
+ new TransformStream, PARTIAL>({
+ transform(chunk, controller) {
+ switch (chunk.type) {
+ case "object":
+ controller.enqueue(chunk.object);
+ break;
+
+ case "text-delta":
+ case "finish":
+ break;
+ case "error":
+ controller.error(chunk.error);
+ break;
+ default: {
+ const _exhaustiveCheck: never = chunk;
+ throw new Error(`Unsupported chunk type: ${_exhaustiveCheck}`);
+ }
+ }
+ },
+ }),
+ ),
+ );
+}
+
+// from https://github.com/vercel/ai/blob/5d4610634f119dc394d36adcba200a06f850209e/packages/ai/core/generate-object/stream-object.ts#L1041C7-L1066C1
+export function partialObjectStream_UNUSED(
+ stream: ReadableStream>,
+): AsyncIterableStream {
+ return createAsyncIterableStream(
+ stream.pipeThrough(
+ new TransformStream, PARTIAL>({
+ transform(chunk, controller) {
+ switch (chunk.type) {
+ case "object":
+ controller.enqueue(chunk.object);
+ break;
+ case "text-delta":
+ case "finish":
+ break;
+ case "error":
+ break;
+ default: {
+ const _exhaustiveCheck: never = chunk;
+ throw new Error(`Unsupported chunk type: ${_exhaustiveCheck}`);
+ }
+ }
+ },
+ }),
+ ),
+ );
+}
+
+// based on https://github.com/vercel/ai/blob/d383c37072a91dfd0cebac13893dea044d9f88fa/packages/react/src/use-object.ts#L185
+export function textStreamToPartialObjectStream() {
+ let accumulatedText = "";
+ let latestObject: DeepPartial | undefined = undefined;
+ return new TransformStream>({
+ transform(chunk, controller) {
+ accumulatedText += chunk;
+ const { value } = parsePartialJson(accumulatedText);
+ const currentObject = value as DeepPartial;
+
+ if (!isDeepEqualData(latestObject, currentObject)) {
+ latestObject = currentObject;
+
+ controller.enqueue(currentObject);
+ }
+ },
+ });
+}
+
+export function dataStreamToTextStream(stream: ReadableStream) {
+ let errored = false;
+ const textStream = new ReadableStream({
+ start(controller) {
+ processDataStream({
+ stream,
+ onTextPart: (chunk) => {
+ controller.enqueue(chunk);
+ },
+ onErrorPart: (chunk) => {
+ errored = true;
+ controller.error(chunk);
+ // console.log("error", chunk);
+ },
+ }).then(
+ () => {
+ if (!errored) {
+ controller.close();
+ }
+ },
+ (error) => {
+ controller.error(error);
+ },
+ );
+ },
+ });
+ return textStream;
+}
+
+/**
+ * Transforms a partial object stream to a data stream format.
+ * This is needed to pass errors through to the client in a clean way.
+ *
+ * @param stream - The partial object stream to transform
+ * @returns A ReadableStream that emits data stream formatted chunks
+ *
+ * @see https://github.com/vercel/ai/issues/5027#issuecomment-2701011869
+ * @see https://github.com/vercel/ai/issues/5115
+ */
+export function partialObjectStreamToDataStream(
+ stream: ReadableStream>,
+): ReadableStream {
+ return stream.pipeThrough(
+ new TransformStream({
+ transform(chunk, controller) {
+ switch (chunk.type) {
+ case "text-delta":
+ controller.enqueue(formatDataStreamPart("text", chunk.textDelta));
+ break;
+ case "object":
+ case "finish":
+ break;
+ case "error":
+ controller.enqueue(
+ formatDataStreamPart("error", getErrorMessage(chunk.error)),
+ );
+ break;
+ default: {
+ const _exhaustiveCheck: never = chunk;
+ throw new Error(`Unsupported chunk type: ${_exhaustiveCheck}`);
+ }
+ }
+ },
+ }),
+ );
+}
+
+export function objectToDataStream(object: any) {
+ const stream = new ReadableStream({
+ start(controller) {
+ controller.enqueue(formatDataStreamPart("text", JSON.stringify(object)));
+ controller.close();
+ },
+ });
+ return stream;
+}
diff --git a/packages/xl-ai/src/testUtil/cases/editors/blockFormatting.ts b/packages/xl-ai/src/testUtil/cases/editors/blockFormatting.ts
index 90ec7580f3..01bf9d44ff 100644
--- a/packages/xl-ai/src/testUtil/cases/editors/blockFormatting.ts
+++ b/packages/xl-ai/src/testUtil/cases/editors/blockFormatting.ts
@@ -24,7 +24,7 @@ export function getEditorWithBlockFormatting() {
schema,
extensions: [
createAIExtension({
- model: undefined as any,
+ executor: undefined as any, // disable
}),
],
});
diff --git a/packages/xl-ai/src/testUtil/cases/editors/emptyEditor.ts b/packages/xl-ai/src/testUtil/cases/editors/emptyEditor.ts
index 144cb722e3..92bd606871 100644
--- a/packages/xl-ai/src/testUtil/cases/editors/emptyEditor.ts
+++ b/packages/xl-ai/src/testUtil/cases/editors/emptyEditor.ts
@@ -12,7 +12,7 @@ export function getEmptyEditor() {
trailingBlock: false,
extensions: [
createAIExtension({
- model: undefined as any,
+ executor: undefined as any, // disable
}),
],
});
diff --git a/packages/xl-ai/src/testUtil/cases/editors/formattingAndMentions.ts b/packages/xl-ai/src/testUtil/cases/editors/formattingAndMentions.ts
index 77478cce3f..dc22e839d7 100644
--- a/packages/xl-ai/src/testUtil/cases/editors/formattingAndMentions.ts
+++ b/packages/xl-ai/src/testUtil/cases/editors/formattingAndMentions.ts
@@ -74,7 +74,7 @@ export function getEditorWithFormattingAndMentions() {
schema,
extensions: [
createAIExtension({
- model: undefined as any,
+ executor: undefined as any, // disable
}),
],
});
diff --git a/packages/xl-ai/src/testUtil/cases/editors/simpleEditor.ts b/packages/xl-ai/src/testUtil/cases/editors/simpleEditor.ts
index e3bea4dab1..8776c340d4 100644
--- a/packages/xl-ai/src/testUtil/cases/editors/simpleEditor.ts
+++ b/packages/xl-ai/src/testUtil/cases/editors/simpleEditor.ts
@@ -18,7 +18,7 @@ export function getSimpleEditor() {
schema,
extensions: [
createAIExtension({
- model: undefined as any,
+ executor: undefined as any,
}),
],
});
@@ -46,7 +46,7 @@ export function getSimpleEditorWithCursorBetweenBlocks() {
schema,
extensions: [
createAIExtension({
- model: undefined as any,
+ executor: undefined as any, // disable
}),
],
});
diff --git a/packages/xl-ai/src/testUtil/cases/editors/tables.ts b/packages/xl-ai/src/testUtil/cases/editors/tables.ts
index f67dcbb810..cf4f5d853a 100644
--- a/packages/xl-ai/src/testUtil/cases/editors/tables.ts
+++ b/packages/xl-ai/src/testUtil/cases/editors/tables.ts
@@ -40,7 +40,7 @@ export function getEditorWithTables() {
trailingBlock: false,
extensions: [
createAIExtension({
- model: undefined as any,
+ executor: undefined as any, // disable
}),
],
});
diff --git a/packages/xl-ai/src/testUtil/cases/updateOperationTestCases.ts b/packages/xl-ai/src/testUtil/cases/updateOperationTestCases.ts
index 0d7f0f4dcb..9292c27ac6 100644
--- a/packages/xl-ai/src/testUtil/cases/updateOperationTestCases.ts
+++ b/packages/xl-ai/src/testUtil/cases/updateOperationTestCases.ts
@@ -685,7 +685,7 @@ export const updateOperationTestCases: DocumentOperationTestCase[] = [
schema,
extensions: [
createAIExtension({
- model: undefined as any,
+ executor: undefined as any, // disable
}),
],
});
@@ -742,7 +742,7 @@ export const updateOperationTestCases: DocumentOperationTestCase[] = [
schema,
extensions: [
createAIExtension({
- model: undefined as any,
+ executor: undefined as any, // disable
}),
],
});
@@ -781,7 +781,7 @@ export const updateOperationTestCases: DocumentOperationTestCase[] = [
schema,
extensions: [
createAIExtension({
- model: undefined as any,
+ executor: undefined as any, // disable
}),
],
});
diff --git a/packages/xl-ai/src/util/stream.ts b/packages/xl-ai/src/util/stream.ts
index 03ac85c234..ee130c52b1 100644
--- a/packages/xl-ai/src/util/stream.ts
+++ b/packages/xl-ai/src/util/stream.ts
@@ -2,7 +2,7 @@
* Converts an AsyncIterable to a ReadableStream
*/
export function asyncIterableToStream(
- iterable: AsyncIterable
+ iterable: AsyncIterable,
): ReadableStream {
return new ReadableStream({
async start(controller) {
@@ -29,11 +29,11 @@ export type AsyncIterableStream = AsyncIterable & ReadableStream;
* Creates an AsyncIterableStream from a ReadableStream
*/
export function createAsyncIterableStream(
- source: ReadableStream
+ source: ReadableStream,
): AsyncIterableStream {
if (source.locked) {
throw new Error(
- "Stream (source) is already locked and cannot be iterated."
+ "Stream (source) is already locked and cannot be iterated.",
);
}
@@ -60,7 +60,7 @@ export function createAsyncIterableStream(
* Creates an AsyncIterableStream from an AsyncGenerator
*/
export function createAsyncIterableStreamFromAsyncIterable(
- source: AsyncIterable
+ source: AsyncIterable,
): AsyncIterableStream {
return createAsyncIterableStream(asyncIterableToStream(source));
}
diff --git a/playground/src/examples.gen.tsx b/playground/src/examples.gen.tsx
index 0cd32ceb3f..7c0f24710c 100644
--- a/playground/src/examples.gen.tsx
+++ b/playground/src/examples.gen.tsx
@@ -1651,6 +1651,61 @@
"slug": "ai"
},
"readme": "This example combines the AI extension with the ghost writer example to show how to use the AI extension in a collaborative environment.\n\n**Relevant Docs:**\n\n- [Editor Setup](/docs/getting-started/editor-setup)\n- [Changing the Formatting Toolbar](/docs/react/components/formatting-toolbar#changing-the-formatting-toolbar)\n- [Changing Slash Menu Items](/docs/react/components/suggestion-menus#changing-slash-menu-items)\n- [Getting Stared with BlockNote AI](/docs/features/ai/setup)"
+ },
+ {
+ "projectSlug": "manual-execution",
+ "fullSlug": "ai/manual-execution",
+ "pathFromRoot": "examples/09-ai/05-manual-execution",
+ "config": {
+ "playground": true,
+ "docs": false,
+ "author": "yousefed",
+ "tags": [
+ "AI",
+ "llm"
+ ],
+ "dependencies": {
+ "@blocknote/xl-ai": "latest",
+ "@mantine/core": "^7.17.3",
+ "ai": "^4.3.15",
+ "@ai-sdk/groq": "^1.2.9",
+ "y-partykit": "^0.0.25",
+ "yjs": "^13.6.27",
+ "zustand": "^5.0.3"
+ } as any
+ },
+ "title": "AI manual execution",
+ "group": {
+ "pathFromRoot": "examples/09-ai",
+ "slug": "ai"
+ },
+ "readme": "Instead of calling AI models directly, this example shows how you can use an existing stream of responses and apply them to the editor"
+ },
+ {
+ "projectSlug": "server-execution",
+ "fullSlug": "ai/server-execution",
+ "pathFromRoot": "examples/09-ai/06-server-execution",
+ "config": {
+ "playground": true,
+ "docs": true,
+ "author": "yousefed",
+ "tags": [
+ "AI",
+ "llm"
+ ],
+ "dependencies": {
+ "@blocknote/xl-ai": "latest",
+ "@mantine/core": "^7.17.3",
+ "ai": "^4.3.15",
+ "zustand": "^5.0.3"
+ } as any
+ },
+ "title": "AI Integration with server LLM execution",
+ "group": {
+ "pathFromRoot": "examples/09-ai",
+ "slug": "ai"
+ },
+ "readme": "This example shows how to setup to add AI integration while handling the LLM calls (in this case, using the Vercel AI SDK) on your server, using a custom executor"
}
]
},
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 9657b2a136..ef1729579c 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -225,8 +225,8 @@ importers:
specifier: ^0.6.3
version: 0.6.4(react@19.1.0)(yjs@13.6.27)
ai:
- specifier: ^4.1.0
- version: 4.3.15(react@19.1.0)(zod@3.25.76)
+ specifier: ^4.3.15
+ version: 4.3.19(react@19.1.0)(zod@3.25.76)
babel-plugin-react-compiler:
specifier: 19.1.0-rc.2
version: 19.1.0-rc.2
@@ -3303,6 +3303,113 @@ importers:
specifier: ^5.3.4
version: 5.4.15(@types/node@22.15.2)(lightningcss@1.30.1)(terser@5.43.1)
+ examples/09-ai/05-manual-execution:
+ dependencies:
+ '@ai-sdk/groq':
+ specifier: ^1.2.9
+ version: 1.2.9(zod@3.25.76)
+ '@blocknote/ariakit':
+ specifier: latest
+ version: link:../../../packages/ariakit
+ '@blocknote/core':
+ specifier: latest
+ version: link:../../../packages/core
+ '@blocknote/mantine':
+ specifier: latest
+ version: link:../../../packages/mantine
+ '@blocknote/react':
+ specifier: latest
+ version: link:../../../packages/react
+ '@blocknote/shadcn':
+ specifier: latest
+ version: link:../../../packages/shadcn
+ '@blocknote/xl-ai':
+ specifier: latest
+ version: link:../../../packages/xl-ai
+ '@mantine/core':
+ specifier: ^7.17.3
+ version: 7.17.3(@mantine/hooks@7.17.3(react@19.1.0))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ ai:
+ specifier: ^4.3.15
+ version: 4.3.19(react@19.1.0)(zod@3.25.76)
+ react:
+ specifier: ^19.1.0
+ version: 19.1.0
+ react-dom:
+ specifier: ^19.1.0
+ version: 19.1.0(react@19.1.0)
+ y-partykit:
+ specifier: ^0.0.25
+ version: 0.0.25
+ yjs:
+ specifier: ^13.6.27
+ version: 13.6.27
+ zustand:
+ specifier: ^5.0.3
+ version: 5.0.3(@types/react@19.1.8)(immer@10.1.1)(react@19.1.0)(use-sync-external-store@1.5.0(react@19.1.0))
+ devDependencies:
+ '@types/react':
+ specifier: ^19.1.0
+ version: 19.1.8
+ '@types/react-dom':
+ specifier: ^19.1.0
+ version: 19.1.6(@types/react@19.1.8)
+ '@vitejs/plugin-react':
+ specifier: ^4.3.1
+ version: 4.4.1(vite@5.4.15(@types/node@22.15.2)(lightningcss@1.30.1)(terser@5.43.1))
+ vite:
+ specifier: ^5.3.4
+ version: 5.4.15(@types/node@22.15.2)(lightningcss@1.30.1)(terser@5.43.1)
+
+ examples/09-ai/06-server-execution:
+ dependencies:
+ '@blocknote/ariakit':
+ specifier: latest
+ version: link:../../../packages/ariakit
+ '@blocknote/core':
+ specifier: latest
+ version: link:../../../packages/core
+ '@blocknote/mantine':
+ specifier: latest
+ version: link:../../../packages/mantine
+ '@blocknote/react':
+ specifier: latest
+ version: link:../../../packages/react
+ '@blocknote/shadcn':
+ specifier: latest
+ version: link:../../../packages/shadcn
+ '@blocknote/xl-ai':
+ specifier: latest
+ version: link:../../../packages/xl-ai
+ '@mantine/core':
+ specifier: ^7.17.3
+ version: 7.17.3(@mantine/hooks@7.17.3(react@19.1.0))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ ai:
+ specifier: ^4.3.15
+ version: 4.3.19(react@19.1.0)(zod@3.25.76)
+ react:
+ specifier: ^19.1.0
+ version: 19.1.0
+ react-dom:
+ specifier: ^19.1.0
+ version: 19.1.0(react@19.1.0)
+ zustand:
+ specifier: ^5.0.3
+ version: 5.0.3(@types/react@19.1.8)(immer@10.1.1)(react@19.1.0)(use-sync-external-store@1.5.0(react@19.1.0))
+ devDependencies:
+ '@types/react':
+ specifier: ^19.1.0
+ version: 19.1.8
+ '@types/react-dom':
+ specifier: ^19.1.0
+ version: 19.1.6(@types/react@19.1.8)
+ '@vitejs/plugin-react':
+ specifier: ^4.3.1
+ version: 4.4.1(vite@5.4.15(@types/node@22.15.2)(lightningcss@1.30.1)(terser@5.43.1))
+ vite:
+ specifier: ^5.3.4
+ version: 5.4.15(@types/node@22.15.2)(lightningcss@1.30.1)(terser@5.43.1)
+
examples/vanilla-js/react-vanilla-custom-blocks:
dependencies:
'@blocknote/ariakit':
@@ -4012,6 +4119,12 @@ importers:
packages/xl-ai:
dependencies:
+ '@ai-sdk/provider-utils':
+ specifier: ^2.2.8
+ version: 2.2.8(zod@3.25.76)
+ '@ai-sdk/ui-utils':
+ specifier: ^1.2.11
+ version: 1.2.11(zod@3.25.76)
'@blocknote/core':
specifier: 0.35.0
version: link:../core
@@ -4031,8 +4144,8 @@ importers:
specifier: ^2.12.0
version: 2.12.0(@tiptap/pm@2.12.0)
ai:
- specifier: ^4.3.15
- version: 4.3.15(react@19.1.0)(zod@3.25.76)
+ specifier: ^4.3.19
+ version: 4.3.19(react@19.1.0)(zod@3.25.76)
lodash.isequal:
specifier: ^4.5.0
version: 4.5.0
@@ -4166,9 +4279,18 @@ importers:
packages/xl-ai-server:
dependencies:
+ '@ai-sdk/openai':
+ specifier: ^1.3.22
+ version: 1.3.22(zod@3.25.76)
+ '@blocknote/xl-ai':
+ specifier: workspace:*
+ version: link:../xl-ai
'@hono/node-server':
specifier: ^1.13.7
version: 1.14.0(hono@4.7.5)
+ ai:
+ specifier: ^4
+ version: 4.3.19(react@19.1.0)(zod@3.25.76)
hono:
specifier: ^4.6.12
version: 4.7.5
@@ -9810,6 +9932,16 @@ packages:
react:
optional: true
+ ai@4.3.19:
+ resolution: {integrity: sha512-dIE2bfNpqHN3r6IINp9znguYdhIOheKW2LDigAMrgt/upT3B8eBGPSCblENvaZGoq+hxaN9fSMzjWpbqloP+7Q==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ react: ^18 || ^19 || ^19.0.0-rc
+ zod: ^3.23.8
+ peerDependenciesMeta:
+ react:
+ optional: true
+
ajv-formats@2.1.1:
resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==}
peerDependencies:
@@ -21074,6 +21206,18 @@ snapshots:
optionalDependencies:
react: 19.1.0
+ ai@4.3.19(react@19.1.0)(zod@3.25.76):
+ dependencies:
+ '@ai-sdk/provider': 1.1.3
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
+ '@ai-sdk/react': 1.2.12(react@19.1.0)(zod@3.25.76)
+ '@ai-sdk/ui-utils': 1.2.11(zod@3.25.76)
+ '@opentelemetry/api': 1.9.0
+ jsondiffpatch: 0.6.0
+ zod: 3.25.76
+ optionalDependencies:
+ react: 19.1.0
+
ajv-formats@2.1.1(ajv@8.17.1):
optionalDependencies:
ajv: 8.17.1
@@ -26691,7 +26835,7 @@ snapshots:
dependencies:
dequal: 2.0.3
react: 19.1.0
- use-sync-external-store: 1.4.0(react@19.1.0)
+ use-sync-external-store: 1.5.0(react@19.1.0)
symbol-tree@3.2.4: {}