Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions packages/ai-semantic-conventions/src/SemanticAttributes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ export const SpanAttributes = {
LLM_REQUEST_TOP_P: "gen_ai.request.top_p",
LLM_PROMPTS: "gen_ai.prompt",
LLM_COMPLETIONS: "gen_ai.completion",
LLM_INPUT_MESSAGES: "gen_ai.input.messages",
LLM_OUTPUT_MESSAGES: "gen_ai.output.messages",
LLM_RESPONSE_MODEL: "gen_ai.response.model",
LLM_USAGE_PROMPT_TOKENS: "gen_ai.usage.prompt_tokens",
LLM_USAGE_COMPLETION_TOKENS: "gen_ai.usage.completion_tokens",
Expand Down
2 changes: 1 addition & 1 deletion packages/instrumentation-anthropic/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/core": "^2.0.1",
"@opentelemetry/instrumentation": "^0.203.0",
"@opentelemetry/semantic-conventions": "^1.36.0",
"@opentelemetry/semantic-conventions": "^1.37.0",
"@traceloop/ai-semantic-conventions": "workspace:*",
"tslib": "^2.8.1"
},
Expand Down
2 changes: 1 addition & 1 deletion packages/instrumentation-bedrock/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/core": "^2.0.1",
"@opentelemetry/instrumentation": "^0.203.0",
"@opentelemetry/semantic-conventions": "^1.36.0",
"@opentelemetry/semantic-conventions": "^1.37.0",
"@traceloop/ai-semantic-conventions": "workspace:*",
"tslib": "^2.8.1"
},
Expand Down
2 changes: 1 addition & 1 deletion packages/instrumentation-chromadb/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/core": "^2.0.1",
"@opentelemetry/instrumentation": "^0.203.0",
"@opentelemetry/semantic-conventions": "^1.36.0",
"@opentelemetry/semantic-conventions": "^1.37.0",
"@traceloop/ai-semantic-conventions": "workspace:*",
"tslib": "^2.8.1"
},
Expand Down
2 changes: 1 addition & 1 deletion packages/instrumentation-cohere/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/core": "^2.0.1",
"@opentelemetry/instrumentation": "^0.203.0",
"@opentelemetry/semantic-conventions": "^1.36.0",
"@opentelemetry/semantic-conventions": "^1.37.0",
"@traceloop/ai-semantic-conventions": "workspace:*",
"tslib": "^2.8.1"
},
Expand Down
2 changes: 1 addition & 1 deletion packages/instrumentation-langchain/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/core": "^2.0.1",
"@opentelemetry/instrumentation": "^0.203.0",
"@opentelemetry/semantic-conventions": "^1.36.0",
"@opentelemetry/semantic-conventions": "^1.37.0",
"@traceloop/ai-semantic-conventions": "workspace:*",
"tslib": "^2.8.1"
},
Expand Down
2 changes: 1 addition & 1 deletion packages/instrumentation-llamaindex/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/core": "^2.0.1",
"@opentelemetry/instrumentation": "^0.203.0",
"@opentelemetry/semantic-conventions": "^1.36.0",
"@opentelemetry/semantic-conventions": "^1.37.0",
"@traceloop/ai-semantic-conventions": "workspace:*",
"lodash": "^4.17.21",
"tslib": "^2.8.1"
Expand Down
2 changes: 1 addition & 1 deletion packages/instrumentation-openai/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/core": "^2.0.1",
"@opentelemetry/instrumentation": "^0.203.0",
"@opentelemetry/semantic-conventions": "^1.36.0",
"@opentelemetry/semantic-conventions": "^1.37.0",
"@traceloop/ai-semantic-conventions": "workspace:*",
"js-tiktoken": "^1.0.20",
"tslib": "^2.8.1"
Expand Down
91 changes: 91 additions & 0 deletions packages/instrumentation-openai/test/instrumentation.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,47 @@ import {
InMemorySpanExporter,
SimpleSpanProcessor,
} from "@opentelemetry/sdk-trace-node";
// Minimal transformation function to test LLM_INPUT_MESSAGES and LLM_OUTPUT_MESSAGES
const transformToStandardFormat = (attributes: any) => {
// Transform prompts to LLM_INPUT_MESSAGES
const inputMessages = [];
let i = 0;
while (attributes[`${SpanAttributes.LLM_PROMPTS}.${i}.role`]) {
const role = attributes[`${SpanAttributes.LLM_PROMPTS}.${i}.role`];
const content = attributes[`${SpanAttributes.LLM_PROMPTS}.${i}.content`];
if (role && content) {
inputMessages.push({
role,
parts: [{ type: "text", content }],
});
}
i++;
}
if (inputMessages.length > 0) {
attributes[SpanAttributes.LLM_INPUT_MESSAGES] =
JSON.stringify(inputMessages);
}

// Transform completions to LLM_OUTPUT_MESSAGES
const outputMessages = [];
let j = 0;
while (attributes[`${SpanAttributes.LLM_COMPLETIONS}.${j}.role`]) {
const role = attributes[`${SpanAttributes.LLM_COMPLETIONS}.${j}.role`];
const content =
attributes[`${SpanAttributes.LLM_COMPLETIONS}.${j}.content`];
if (role && content) {
outputMessages.push({
role,
parts: [{ type: "text", content }],
});
}
j++;
}
if (outputMessages.length > 0) {
attributes[SpanAttributes.LLM_OUTPUT_MESSAGES] =
JSON.stringify(outputMessages);
}
};

import type * as OpenAIModule from "openai";
import { toFile } from "openai";
Expand Down Expand Up @@ -878,4 +919,54 @@ describe("Test OpenAI instrumentation", async function () {
4160,
);
});

it("should set LLM_INPUT_MESSAGES and LLM_OUTPUT_MESSAGES attributes for chat completions", async () => {
const result = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Tell me a joke about OpenTelemetry" },
],
model: "gpt-3.5-turbo",
});

const spans = memoryExporter.getFinishedSpans();
const completionSpan = spans.find((span) => span.name === "openai.chat");

assert.ok(result);
assert.ok(completionSpan);

// Apply transformations to create LLM_INPUT_MESSAGES and LLM_OUTPUT_MESSAGES
transformToStandardFormat(completionSpan.attributes);

// Verify LLM_INPUT_MESSAGES attribute exists and is valid JSON
assert.ok(completionSpan.attributes[SpanAttributes.LLM_INPUT_MESSAGES]);
const inputMessages = JSON.parse(
completionSpan.attributes[SpanAttributes.LLM_INPUT_MESSAGES] as string,
);
assert.ok(Array.isArray(inputMessages));
assert.strictEqual(inputMessages.length, 1);

// Check user message structure
assert.strictEqual(inputMessages[0].role, "user");
assert.ok(Array.isArray(inputMessages[0].parts));
assert.strictEqual(inputMessages[0].parts[0].type, "text");
assert.strictEqual(
inputMessages[0].parts[0].content,
"Tell me a joke about OpenTelemetry",
);

// Verify LLM_OUTPUT_MESSAGES attribute exists and is valid JSON
assert.ok(completionSpan.attributes[SpanAttributes.LLM_OUTPUT_MESSAGES]);
const outputMessages = JSON.parse(
completionSpan.attributes[SpanAttributes.LLM_OUTPUT_MESSAGES] as string,
);
assert.ok(Array.isArray(outputMessages));
assert.strictEqual(outputMessages.length, 1);

// Check assistant response structure
assert.strictEqual(outputMessages[0].role, "assistant");
assert.ok(Array.isArray(outputMessages[0].parts));
assert.strictEqual(outputMessages[0].parts[0].type, "text");
assert.ok(outputMessages[0].parts[0].content);
assert.ok(typeof outputMessages[0].parts[0].content === "string");
});
});
Loading
Loading