Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
166 changes: 165 additions & 1 deletion packages/core/src/tracing/ai/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,30 @@
*/
import { captureException } from '../../exports';
import { getClient } from '../../currentScopes';
import type { Span } from '../../types-hoist/span';
import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes';
import type { Span, SpanAttributeValue } from '../../types-hoist/span';
import { isThenable } from '../../utils/is';
import {
GEN_AI_CONVERSATION_ID_ATTRIBUTE,
GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE,
GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE,
GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE,
GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE,
GEN_AI_REQUEST_STREAM_ATTRIBUTE,
GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE,
GEN_AI_REQUEST_TOP_K_ATTRIBUTE,
GEN_AI_REQUEST_TOP_P_ATTRIBUTE,
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
GEN_AI_RESPONSE_ID_ATTRIBUTE,
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
GEN_AI_RESPONSE_STREAMING_ATTRIBUTE,
GEN_AI_RESPONSE_TEXT_ATTRIBUTE,
GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
GEN_AI_SYSTEM_ATTRIBUTE,
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE,
Expand Down Expand Up @@ -63,6 +78,155 @@
return currentPath ? `${currentPath}.${prop}` : prop;
}

/**
* Extract model from params or context.
* params.model covers OpenAI/Anthropic, context.model/modelVersion covers Google GenAI chat instances.
*/
export function extractModel(params: Record<string, unknown> | undefined, context?: unknown): string {
if (params && 'model' in params && typeof params.model === 'string') {
return params.model;
}
// Google GenAI chat instances store the model on the context object
if (context && typeof context === 'object') {
const ctx = context as Record<string, unknown>;
if (typeof ctx.model === 'string') return ctx.model;
if (typeof ctx.modelVersion === 'string') return ctx.modelVersion;
}
return 'unknown';
}

/**
* Set an attribute if the key exists in the source object.
*/
function extractIfPresent(
attributes: Record<string, SpanAttributeValue>,
source: Record<string, unknown>,
key: string,
attribute: string,
): void {
if (key in source) {
attributes[attribute] = source[key] as SpanAttributeValue;
}
}

/**
* Extract available tools from request parameters.
* Handles OpenAI (params.tools + web_search_options), Anthropic (params.tools),
* and Google GenAI (config.tools[].functionDeclarations).
*/
function extractTools(params: Record<string, unknown>, config: Record<string, unknown>): string | undefined {
// OpenAI: web_search_options are treated as tools
const hasWebSearchOptions = params.web_search_options && typeof params.web_search_options === 'object';
const webSearchOptions = hasWebSearchOptions
? [{ type: 'web_search_options', ...(params.web_search_options as Record<string, unknown>) }]
: [];

// Google GenAI: tools contain functionDeclarations
if ('tools' in config && Array.isArray(config.tools)) {
const hasDeclarations = config.tools.some(
(tool: unknown) =>
tool && typeof tool === 'object' && 'functionDeclarations' in (tool as Record<string, unknown>),
);
if (hasDeclarations) {
const declarations = (config.tools as Array<{ functionDeclarations?: unknown[] }>).flatMap(
tool => tool.functionDeclarations ?? [],
);
if (declarations.length > 0) {
return JSON.stringify(declarations);
}
return undefined;
}
}

// OpenAI / Anthropic: tools are at the top level
const tools = Array.isArray(params.tools) ? params.tools : [];
const availableTools = [...tools, ...webSearchOptions];

if (availableTools.length === 0) {
return undefined;
}

return JSON.stringify(availableTools);
}

/**
* Extract conversation ID from request parameters.
* Supports OpenAI Conversations API and previous_response_id chaining.
*/
function extractConversationId(params: Record<string, unknown>): string | undefined {
if ('conversation' in params && typeof params.conversation === 'string') {
return params.conversation;
}
if ('previous_response_id' in params && typeof params.previous_response_id === 'string') {
return params.previous_response_id;
}
return undefined;
}

/**
* Extract request attributes from AI method arguments.
* Shared across all AI provider integrations (OpenAI, Anthropic, Google GenAI).
*/
export function extractRequestAttributes(
system: string,
origin: string,
operationName: string,
args: unknown[],
context?: unknown,
): Record<string, SpanAttributeValue> {
const params =
args.length > 0 && typeof args[0] === 'object' && args[0] !== null
? (args[0] as Record<string, unknown>)
: undefined;

const attributes: Record<string, SpanAttributeValue> = {
[GEN_AI_SYSTEM_ATTRIBUTE]: system,
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationName,
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: origin,
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: extractModel(params, context),
};

if (!params) {
return attributes;
}

// Google GenAI nests generation params under config; OpenAI/Anthropic are flat
const config =
'config' in params && typeof params.config === 'object' && params.config
? (params.config as Record<string, unknown>)
: params;

// Generation parameters — handles both snake_case (OpenAI/Anthropic) and camelCase (Google GenAI)
extractIfPresent(attributes, config, 'temperature', GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE);
extractIfPresent(attributes, config, 'top_p', GEN_AI_REQUEST_TOP_P_ATTRIBUTE);
extractIfPresent(attributes, config, 'topP', GEN_AI_REQUEST_TOP_P_ATTRIBUTE);
extractIfPresent(attributes, config, 'top_k', GEN_AI_REQUEST_TOP_K_ATTRIBUTE);
extractIfPresent(attributes, config, 'topK', GEN_AI_REQUEST_TOP_K_ATTRIBUTE);
extractIfPresent(attributes, config, 'frequency_penalty', GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE);
extractIfPresent(attributes, config, 'frequencyPenalty', GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE);
extractIfPresent(attributes, config, 'presence_penalty', GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE);
extractIfPresent(attributes, config, 'presencePenalty', GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE);
extractIfPresent(attributes, config, 'max_tokens', GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE);
extractIfPresent(attributes, config, 'maxOutputTokens', GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE);
extractIfPresent(attributes, params, 'stream', GEN_AI_REQUEST_STREAM_ATTRIBUTE);
extractIfPresent(attributes, params, 'encoding_format', GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE);
extractIfPresent(attributes, params, 'dimensions', GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE);

// Tools
const tools = extractTools(params, config);
if (tools) {
attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] = tools;
}

// Conversation ID (OpenAI)
const conversationId = extractConversationId(params);
if (conversationId) {
attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE] = conversationId;
}

return attributes;
}

/**
* Set token usage attributes
* @param span - The span to add attributes to
Expand Down Expand Up @@ -306,4 +470,4 @@
return typeof value === 'function' ? value.bind(source) : value;
},
}) as Promise<R>;
}

Check failure on line 473 in packages/core/src/tracing/ai/utils.ts

View workflow job for this annotation

GitHub Actions / Lint

eslint(max-lines)

File has too many lines (320).
61 changes: 11 additions & 50 deletions packages/core/src/tracing/anthropic-ai/index.ts
Original file line number Diff line number Diff line change
@@ -1,28 +1,19 @@
import { captureException } from '../../exports';
import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes';
import { SPAN_STATUS_ERROR } from '../../tracing';
import { startSpan, startSpanManual } from '../../tracing/trace';
import type { Span, SpanAttributeValue } from '../../types-hoist/span';
import {
GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_PROMPT_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE,
GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
GEN_AI_REQUEST_STREAM_ATTRIBUTE,
GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE,
GEN_AI_REQUEST_TOP_K_ATTRIBUTE,
GEN_AI_REQUEST_TOP_P_ATTRIBUTE,
GEN_AI_RESPONSE_ID_ATTRIBUTE,
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
GEN_AI_RESPONSE_TEXT_ATTRIBUTE,
GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
GEN_AI_SYSTEM_ATTRIBUTE,
} from '../ai/gen-ai-attributes';
import type { InstrumentedMethodEntry } from '../ai/utils';
import {
buildMethodPath,
extractRequestAttributes,
resolveAIRecordingOptions,
setTokenUsageAttributes,
wrapPromiseWithMethods,
Expand All @@ -32,42 +23,6 @@ import { instrumentAsyncIterableStream, instrumentMessageStream } from './stream
import type { AnthropicAiOptions, AnthropicAiResponse, AnthropicAiStreamingEvent, ContentBlock } from './types';
import { handleResponseError, messagesFromParams, setMessagesAttribute } from './utils';

/**
* Extract request attributes from method arguments
*/
function extractRequestAttributes(args: unknown[], methodPath: string, operationName: string): Record<string, unknown> {
const attributes: Record<string, unknown> = {
[GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationName,
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
};

if (args.length > 0 && typeof args[0] === 'object' && args[0] !== null) {
const params = args[0] as Record<string, unknown>;
if (params.tools && Array.isArray(params.tools)) {
attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] = JSON.stringify(params.tools);
}

attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = params.model ?? 'unknown';
if ('temperature' in params) attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE] = params.temperature;
if ('top_p' in params) attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE] = params.top_p;
if ('stream' in params) attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] = params.stream;
if ('top_k' in params) attributes[GEN_AI_REQUEST_TOP_K_ATTRIBUTE] = params.top_k;
if ('frequency_penalty' in params)
attributes[GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE] = params.frequency_penalty;
if ('max_tokens' in params) attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE] = params.max_tokens;
} else {
if (methodPath === 'models.retrieve' || methodPath === 'models.get') {
// models.retrieve(model-id) and models.get(model-id)
attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = args[0];
} else {
attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = 'unknown';
}
}

return attributes;
}

/**
* Add private request attributes to spans.
* This is only recorded if recordInputs is true.
Expand Down Expand Up @@ -183,7 +138,7 @@ function handleStreamingRequest<T extends unknown[], R>(
target: (...args: T) => R | Promise<R>,
context: unknown,
args: T,
requestAttributes: Record<string, unknown>,
requestAttributes: Record<string, SpanAttributeValue>,
operationName: string,
methodPath: string,
params: Record<string, unknown> | undefined,
Expand All @@ -195,7 +150,7 @@ function handleStreamingRequest<T extends unknown[], R>(
const spanConfig = {
name: `${operationName} ${model}`,
op: `gen_ai.${operationName}`,
attributes: requestAttributes as Record<string, SpanAttributeValue>,
attributes: requestAttributes,
};

// messages.stream() always returns a sync MessageStream, even with stream: true param
Expand Down Expand Up @@ -254,7 +209,13 @@ function instrumentMethod<T extends unknown[], R>(
return new Proxy(originalMethod, {
apply(target, thisArg, args: T): R | Promise<R> {
const operationName = instrumentedMethod.operation || 'unknown';
const requestAttributes = extractRequestAttributes(args, methodPath, operationName);
const requestAttributes = extractRequestAttributes('anthropic', 'auto.ai.anthropic', operationName, args);

// Anthropic models.retrieve/models.get take model ID as positional string arg
if ((methodPath === 'models.retrieve' || methodPath === 'models.get') && typeof args[0] === 'string') {
requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = args[0];
}

const model = requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] ?? 'unknown';

const params = typeof args[0] === 'object' ? (args[0] as Record<string, unknown>) : undefined;
Expand Down Expand Up @@ -283,7 +244,7 @@ function instrumentMethod<T extends unknown[], R>(
{
name: `${operationName} ${model}`,
op: `gen_ai.${operationName}`,
attributes: requestAttributes as Record<string, SpanAttributeValue>,
attributes: requestAttributes,
},
span => {
originalResult = target.apply(context, args) as Promise<R>;
Expand Down
Loading
Loading