Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions sentry_sdk/integrations/langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -975,7 +975,7 @@ def new_invoke(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
origin=LangchainIntegration.origin,
) as span:
if run_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, run_name)
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, run_name)

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
Expand Down Expand Up @@ -1035,7 +1035,7 @@ def new_stream(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
span.__enter__()

if run_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, run_name)
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, run_name)

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
Expand Down
169 changes: 169 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1126,6 +1126,175 @@ def nonstreaming_chat_completions_model_response():
)


@pytest.fixture
def streaming_chat_completions_model_responses():
def inner():
yield [
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
role="assistant"
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
tool_calls=[
openai.types.chat.chat_completion_chunk.ChoiceDeltaToolCall(
index=0,
id="call_BbeyNhCKa6kYLYzrD40NGm3b",
type="function",
function=openai.types.chat.chat_completion_chunk.ChoiceDeltaToolCallFunction(
name="get_word_length",
arguments="",
),
),
],
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
tool_calls=[
openai.types.chat.chat_completion_chunk.ChoiceDeltaToolCall(
index=0,
function=openai.types.chat.chat_completion_chunk.ChoiceDeltaToolCallFunction(
arguments='{"word": "eudca"}',
),
),
],
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
content="5"
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(),
finish_reason="function_call",
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-1",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[],
usage=openai.types.chat.chat_completion_chunk.CompletionUsage(
prompt_tokens=142,
completion_tokens=50,
total_tokens=192,
),
),
]

yield [
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-2",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
role="assistant"
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-2",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(
content="The word eudca has 5 letters."
),
finish_reason=None,
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-2",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[
openai.types.chat.chat_completion_chunk.Choice(
index=0,
delta=openai.types.chat.chat_completion_chunk.ChoiceDelta(),
finish_reason="stop",
),
],
),
openai.types.chat.chat_completion_chunk.ChatCompletionChunk(
id="chatcmpl-turn-2",
object="chat.completion.chunk",
created=10000000,
model="gpt-3.5-turbo",
choices=[],
usage=openai.types.chat.chat_completion_chunk.CompletionUsage(
prompt_tokens=89,
completion_tokens=28,
total_tokens=117,
),
),
]

return inner


@pytest.fixture
def responses_tool_call_model_responses():
def inner(
Expand Down
Loading
Loading