|
| 1 | +/* eslint-disable max-lines */ |
1 | 2 | /**
|
2 | 3 | * AI SDK Telemetry Attributes
|
3 | 4 | * Based on https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data
|
@@ -269,6 +270,15 @@ export const AI_MODEL_PROVIDER_ATTRIBUTE = 'ai.model.provider';
|
269 | 270 | */
|
270 | 271 | export const AI_REQUEST_HEADERS_ATTRIBUTE = 'ai.request.headers';
|
271 | 272 |
|
| 273 | +/** |
| 274 | + * Basic LLM span information |
| 275 | + * Multiple spans |
| 276 | + * |
| 277 | + * Provider specific metadata returned with the generation response |
| 278 | + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information |
| 279 | + */ |
| 280 | +export const AI_RESPONSE_PROVIDER_METADATA_ATTRIBUTE = 'ai.response.providerMetadata'; |
| 281 | + |
272 | 282 | /**
|
273 | 283 | * Basic LLM span information
|
274 | 284 | * Multiple spans
|
@@ -792,3 +802,225 @@ export const AI_TOOL_CALL_SPAN_ATTRIBUTES = {
|
792 | 802 | AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE,
|
793 | 803 | AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE,
|
794 | 804 | } as const;
|
| 805 | + |
| 806 | +// ============================================================================= |
| 807 | +// PROVIDER METADATA |
| 808 | +// ============================================================================= |
| 809 | + |
| 810 | +/** |
| 811 | + * OpenAI Provider Metadata |
| 812 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/openai |
| 813 | + * @see https://github.com/vercel/ai/blob/65e042afde6aad4da9d7a62526ece839eb34f9a5/packages/openai/src/openai-chat-language-model.ts#L397-L416 |
| 814 | + * @see https://github.com/vercel/ai/blob/65e042afde6aad4da9d7a62526ece839eb34f9a5/packages/openai/src/responses/openai-responses-language-model.ts#L377C7-L384 |
| 815 | + */ |
| 816 | +interface OpenAiProviderMetadata { |
| 817 | + /** |
| 818 | + * The number of predicted output tokens that were accepted. |
| 819 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/openai#predicted-outputs |
| 820 | + */ |
| 821 | + acceptedPredictionTokens?: number; |
| 822 | + |
| 823 | + /** |
| 824 | + * The number of predicted output tokens that were rejected. |
| 825 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/openai#predicted-outputs |
| 826 | + */ |
| 827 | + rejectedPredictionTokens?: number; |
| 828 | + |
| 829 | + /** |
| 830 | + * The number of reasoning tokens that the model generated. |
| 831 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/openai#responses-models |
| 832 | + */ |
| 833 | + reasoningTokens?: number; |
| 834 | + |
| 835 | + /** |
| 836 | + * The number of prompt tokens that were a cache hit. |
| 837 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/openai#responses-models |
| 838 | + */ |
| 839 | + cachedPromptTokens?: number; |
| 840 | + |
| 841 | + /** |
| 842 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/openai#responses-models |
| 843 | + * |
| 844 | + * The ID of the response. Can be used to continue a conversation. |
| 845 | + */ |
| 846 | + responseId?: string; |
| 847 | +} |
| 848 | + |
| 849 | +/** |
| 850 | + * Anthropic Provider Metadata |
| 851 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/anthropic |
| 852 | + * @see https://github.com/vercel/ai/blob/65e042afde6aad4da9d7a62526ece839eb34f9a5/packages/anthropic/src/anthropic-messages-language-model.ts#L346-L352 |
| 853 | + */ |
| 854 | +interface AnthropicProviderMetadata { |
| 855 | + /** |
| 856 | + * The number of tokens that were used to create the cache. |
| 857 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/anthropic#cache-control |
| 858 | + */ |
| 859 | + cacheCreationInputTokens?: number; |
| 860 | + |
| 861 | + /** |
| 862 | + * The number of tokens that were read from the cache. |
| 863 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/anthropic#cache-control |
| 864 | + */ |
| 865 | + cacheReadInputTokens?: number; |
| 866 | +} |
| 867 | + |
| 868 | +/** |
| 869 | + * Amazon Bedrock Provider Metadata |
| 870 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/amazon-bedrock |
| 871 | + * @see https://github.com/vercel/ai/blob/65e042afde6aad4da9d7a62526ece839eb34f9a5/packages/amazon-bedrock/src/bedrock-chat-language-model.ts#L263-L280 |
| 872 | + */ |
| 873 | +interface AmazonBedrockProviderMetadata { |
| 874 | + /** |
| 875 | + * @see https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ConverseTrace.html |
| 876 | + */ |
| 877 | + trace?: { |
| 878 | + /** |
| 879 | + * The guardrail trace object. |
| 880 | + * @see https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_GuardrailTraceAssessment.html |
| 881 | + * |
| 882 | + * This was purposely left as unknown as it's a complex object. This can be typed in the future |
| 883 | + * if the SDK decides to support bedrock in a more advanced way. |
| 884 | + */ |
| 885 | + guardrail?: unknown; |
| 886 | + /** |
| 887 | + * The request's prompt router. |
| 888 | + * @see https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_PromptRouterTrace.html |
| 889 | + */ |
| 890 | + promptRouter?: { |
| 891 | + /** |
| 892 | + * The ID of the invoked model. |
| 893 | + */ |
| 894 | + invokedModelId?: string; |
| 895 | + }; |
| 896 | + }; |
| 897 | + usage?: { |
| 898 | + /** |
| 899 | + * The number of tokens that were read from the cache. |
| 900 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/amazon-bedrock#cache-points |
| 901 | + */ |
| 902 | + cacheReadInputTokens?: number; |
| 903 | + |
| 904 | + /** |
| 905 | + * The number of tokens that were written to the cache. |
| 906 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/amazon-bedrock#cache-points |
| 907 | + */ |
| 908 | + cacheWriteInputTokens?: number; |
| 909 | + }; |
| 910 | +} |
| 911 | + |
| 912 | +/** |
| 913 | + * Google Generative AI Provider Metadata |
| 914 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai |
| 915 | + */ |
| 916 | +export interface GoogleGenerativeAIProviderMetadata { |
| 917 | + /** |
| 918 | + * @see https://github.com/vercel/ai/blob/65e042afde6aad4da9d7a62526ece839eb34f9a5/packages/google/src/google-generative-ai-prompt.ts#L28-L30 |
| 919 | + */ |
| 920 | + groundingMetadata: null | { |
| 921 | + /** |
| 922 | + * Array of search queries used to retrieve information |
| 923 | + * @example ["What's the weather in Chicago this weekend?"] |
| 924 | + * |
| 925 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai#search-grounding |
| 926 | + */ |
| 927 | + webSearchQueries: string[] | null; |
| 928 | + /** |
| 929 | + * Contains the main search result content used as an entry point |
| 930 | + * The `renderedContent` field contains the formatted content |
| 931 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai#search-grounding |
| 932 | + */ |
| 933 | + searchEntryPoint?: { |
| 934 | + renderedContent: string; |
| 935 | + } | null; |
| 936 | + /** |
| 937 | + * Contains details about how specific response parts are supported by search results |
| 938 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai#search-grounding |
| 939 | + */ |
| 940 | + groundingSupports: Array<{ |
| 941 | + /** |
| 942 | + * Information about the grounded text segment. |
| 943 | + */ |
| 944 | + segment: { |
| 945 | + /** |
| 946 | + * The start index of the text segment. |
| 947 | + */ |
| 948 | + startIndex?: number | null; |
| 949 | + /** |
| 950 | + * The end index of the text segment. |
| 951 | + */ |
| 952 | + endIndex?: number | null; |
| 953 | + /** |
| 954 | + * The actual text segment. |
| 955 | + */ |
| 956 | + text?: string | null; |
| 957 | + }; |
| 958 | + /** |
| 959 | + * References to supporting search result chunks. |
| 960 | + */ |
| 961 | + groundingChunkIndices?: number[] | null; |
| 962 | + /** |
| 963 | + * Confidence scores (0-1) for each supporting chunk. |
| 964 | + */ |
| 965 | + confidenceScores?: number[] | null; |
| 966 | + }> | null; |
| 967 | + }; |
| 968 | + /** |
| 969 | + * @see https://github.com/vercel/ai/blob/65e042afde6aad4da9d7a62526ece839eb34f9a5/packages/google/src/google-generative-ai-language-model.ts#L620-L627 |
| 970 | + * @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters |
| 971 | + */ |
| 972 | + safetyRatings?: null | unknown; |
| 973 | +} |
| 974 | + |
| 975 | +/** |
| 976 | + * DeepSeek Provider Metadata |
| 977 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/deepseek |
| 978 | + */ |
| 979 | +interface DeepSeekProviderMetadata { |
| 980 | + /** |
| 981 | + * The number of tokens that were cache hits. |
| 982 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/deepseek#cache-token-usage |
| 983 | + */ |
| 984 | + promptCacheHitTokens?: number; |
| 985 | + |
| 986 | + /** |
| 987 | + * The number of tokens that were cache misses. |
| 988 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/deepseek#cache-token-usage |
| 989 | + */ |
| 990 | + promptCacheMissTokens?: number; |
| 991 | +} |
| 992 | + |
| 993 | +/** |
| 994 | + * Perplexity Provider Metadata |
| 995 | + * @see https://ai-sdk.dev/providers/ai-sdk-providers/perplexity |
| 996 | + */ |
| 997 | +interface PerplexityProviderMetadata { |
| 998 | + /** |
| 999 | + * Object containing citationTokens and numSearchQueries metrics |
| 1000 | + */ |
| 1001 | + usage?: { |
| 1002 | + citationTokens?: number; |
| 1003 | + numSearchQueries?: number; |
| 1004 | + }; |
| 1005 | + /** |
| 1006 | + * Array of image URLs when return_images is enabled. |
| 1007 | + * |
| 1008 | + * You can enable image responses by setting return_images: true in the provider options. |
| 1009 | + * This feature is only available to Perplexity Tier-2 users and above. |
| 1010 | + */ |
| 1011 | + images?: Array<{ |
| 1012 | + imageUrl?: string; |
| 1013 | + originUrl?: string; |
| 1014 | + height?: number; |
| 1015 | + width?: number; |
| 1016 | + }>; |
| 1017 | +} |
| 1018 | + |
| 1019 | +export interface ProviderMetadata { |
| 1020 | + openai?: OpenAiProviderMetadata; |
| 1021 | + anthropic?: AnthropicProviderMetadata; |
| 1022 | + bedrock?: AmazonBedrockProviderMetadata; |
| 1023 | + google?: GoogleGenerativeAIProviderMetadata; |
| 1024 | + deepseek?: DeepSeekProviderMetadata; |
| 1025 | + perplexity?: PerplexityProviderMetadata; |
| 1026 | +} |
0 commit comments