Skip to content

Commit 0f36282

Browse files
committed
CR Changes
1 parent 6dd6483 commit 0f36282

File tree

4 files changed

+86
-223
lines changed

4 files changed

+86
-223
lines changed

packages/instrumentation-langchain/package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
"author": "Traceloop",
2323
"license": "Apache-2.0",
2424
"engines": {
25-
"node": ">=14"
25+
"node": ">=20"
2626
},
2727
"files": [
2828
"dist/**/*.js",
@@ -46,6 +46,7 @@
4646
}
4747
},
4848
"dependencies": {
49+
"@langchain/core": ">=1.0.0 <2.0.0",
4950
"@opentelemetry/api": "^1.9.0",
5051
"@opentelemetry/core": "^2.0.1",
5152
"@opentelemetry/instrumentation": "^0.203.0",

packages/instrumentation-langchain/src/callback_handler.ts

Lines changed: 80 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -213,42 +213,57 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
213213
span.setAttribute(ATTR_GEN_AI_RESPONSE_ID, responseId);
214214
}
215215

216-
// Map raw finish reason to OTel standard value; unknown reasons pass through unchanged
217-
const rawFinishReason = this.extractFinishReason(output);
218-
const mappedFinishReason = rawFinishReason
219-
? (langchainFinishReasonMap[rawFinishReason] ?? rawFinishReason)
220-
: null;
216+
// Collect finish reasons from ALL candidates across ALL generation groups.
217+
// LLMResult.generations is Generation[][] where outer = prompt batch, inner = n candidates.
218+
// ATTR_GEN_AI_RESPONSE_FINISH_REASONS should contain one entry per candidate (OTel spec).
219+
const allFinishReasons: string[] = [];
220+
if (output.generations) {
221+
for (const group of output.generations) {
222+
if (group) {
223+
for (const gen of group) {
224+
const raw =
225+
gen?.generationInfo?.finish_reason ||
226+
gen?.generationInfo?.stop_reason ||
227+
gen?.generationInfo?.done_reason ||
228+
null;
229+
if (raw) {
230+
allFinishReasons.push(
231+
langchainFinishReasonMap[raw] ?? raw,
232+
);
233+
}
234+
}
235+
}
236+
}
237+
}
221238

222239
// Set finish reasons on span (metadata — NOT gated by traceContent)
223-
if (mappedFinishReason) {
224-
span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [
225-
mappedFinishReason,
226-
]);
240+
if (allFinishReasons.length > 0) {
241+
span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, allFinishReasons);
227242
}
228243

229244
if (
230245
this.traceContent &&
231246
output.generations &&
232247
output.generations.length > 0
233248
) {
234-
const outputMessages = output.generations.map((generation) => {
235-
const text =
236-
generation && generation.length > 0 ? generation[0].text : "";
237-
// Extract per-generation finish reason
238-
const genRaw =
239-
generation?.[0]?.generationInfo?.finish_reason ||
240-
generation?.[0]?.generationInfo?.stop_reason ||
241-
generation?.[0]?.generationInfo?.done_reason ||
242-
null;
243-
const genFinishReason = genRaw
244-
? (langchainFinishReasonMap[genRaw] ?? genRaw)
245-
: (mappedFinishReason ?? "");
246-
return {
247-
role: "assistant",
248-
parts: [{ type: "text", content: text }],
249-
finish_reason: genFinishReason,
250-
};
251-
});
249+
// flatMap over all candidates in all groups — one output message per candidate
250+
const outputMessages = output.generations.flatMap((group) =>
251+
(group ?? []).map((gen) => {
252+
const raw =
253+
gen?.generationInfo?.finish_reason ||
254+
gen?.generationInfo?.stop_reason ||
255+
gen?.generationInfo?.done_reason ||
256+
null;
257+
const genFinishReason = raw
258+
? (langchainFinishReasonMap[raw] ?? raw)
259+
: (allFinishReasons[0] ?? "");
260+
return {
261+
role: "assistant",
262+
parts: [{ type: "text", content: gen?.text ?? "" }],
263+
finish_reason: genFinishReason,
264+
};
265+
}),
266+
);
252267
span.setAttribute(
253268
ATTR_GEN_AI_OUTPUT_MESSAGES,
254269
JSON.stringify(outputMessages),
@@ -264,9 +279,11 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
264279
if (usage.output_tokens || usage.output_tokens === 0) {
265280
span.setAttribute(ATTR_GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens);
266281
}
267-
const totalTokens =
268-
(usage.input_tokens || 0) + (usage.output_tokens || 0);
269-
if (totalTokens > 0) {
282+
const hasUsage =
283+
usage.input_tokens != null || usage.output_tokens != null;
284+
if (hasUsage) {
285+
const totalTokens =
286+
(usage.input_tokens || 0) + (usage.output_tokens || 0);
270287
span.setAttribute(
271288
SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS,
272289
totalTokens,
@@ -331,23 +348,34 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
331348
_extra?: Record<string, unknown>,
332349
): Promise<void> {
333350
const chainName = chain.id?.[chain.id.length - 1] || "unknown";
334-
const agentName = runName || chainName;
335-
336-
const span = this.tracer.startSpan(
337-
`${GEN_AI_OPERATION_NAME_VALUE_INVOKE_AGENT} ${agentName}`,
338-
{
339-
kind: SpanKind.INTERNAL,
340-
},
341-
);
351+
const displayName = runName || chainName;
352+
353+
// Detect whether this chain is an agent executor vs a regular chain.
354+
// Both pass runType: undefined in LangChain 1.x, so we use the class name.
355+
// Only agent executors get invoke_agent; regular chains use "workflow" (custom,
356+
// no OTel well-known value exists for generic chain execution).
357+
const isAgent = this.isAgentChain(chainName);
358+
const operationName = isAgent
359+
? GEN_AI_OPERATION_NAME_VALUE_INVOKE_AGENT
360+
: "workflow";
361+
362+
const span = this.tracer.startSpan(`${operationName} ${displayName}`, {
363+
kind: SpanKind.INTERNAL,
364+
});
342365

343-
span.setAttributes({
344-
[ATTR_GEN_AI_OPERATION_NAME]: GEN_AI_OPERATION_NAME_VALUE_INVOKE_AGENT,
366+
const attributes: Record<string, string> = {
367+
[ATTR_GEN_AI_OPERATION_NAME]: operationName,
345368
[ATTR_GEN_AI_PROVIDER_NAME]: "langchain",
346-
[ATTR_GEN_AI_AGENT_NAME]: agentName,
347369
// Backward compatibility
348370
"traceloop.span.kind": "workflow",
349-
"traceloop.workflow.name": agentName,
350-
});
371+
"traceloop.workflow.name": displayName,
372+
};
373+
374+
if (isAgent) {
375+
attributes[ATTR_GEN_AI_AGENT_NAME] = displayName;
376+
}
377+
378+
span.setAttributes(attributes);
351379

352380
if (this.traceContent) {
353381
span.setAttributes({
@@ -505,26 +533,6 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
505533
return null;
506534
}
507535

508-
private extractFinishReason(output: LLMResult): string | null {
509-
// Try to extract finish reason from LangChain's LLMResult
510-
// LangChain exposes it in generationInfo or llmOutput
511-
if (output.generations && output.generations.length > 0) {
512-
const firstGen = output.generations[0];
513-
if (firstGen && firstGen.length > 0) {
514-
const genInfo = firstGen[0].generationInfo;
515-
if (genInfo) {
516-
// Different providers use different field names
517-
const reason =
518-
genInfo.finish_reason || genInfo.stop_reason || genInfo.done_reason;
519-
if (reason && typeof reason === "string") {
520-
return reason;
521-
}
522-
}
523-
}
524-
}
525-
return null;
526-
}
527-
528536
private detectVendor(llm: Serialized): string {
529537
const className = llm.id?.[llm.id.length - 1] || "";
530538

@@ -648,6 +656,15 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
648656
return "langchain";
649657
}
650658

659+
private isAgentChain(chainName: string): boolean {
660+
const lower = chainName.toLowerCase();
661+
return (
662+
lower.includes("agent") ||
663+
lower.includes("executor") ||
664+
lower === "agentexecutor"
665+
);
666+
}
667+
651668
private mapMessageTypeToRole(messageType: string): string {
652669
// Map LangChain message types to standard OpenTelemetry roles
653670
switch (messageType) {

packages/instrumentation-langchain/test/instrumentation.test.ts

Lines changed: 1 addition & 156 deletions
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,11 @@ import {
3030
AgentExecutor,
3131
} from "@langchain/classic/agents";
3232
import { Calculator } from "@langchain/community/tools/calculator";
33-
import { HNSWLib } from "@langchain/community/vectorstores/hnswlib";
34-
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from "@langchain/openai";
35-
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
33+
import { ChatOpenAI } from "@langchain/openai";
3634
import { StringOutputParser } from "@langchain/core/output_parsers";
3735
import { WikipediaQueryRun } from "@langchain/community/tools/wikipedia_query_run";
3836

3937
import { LangChainInstrumentation } from "../src/instrumentation";
40-
import { SpanAttributes } from "@traceloop/ai-semantic-conventions";
4138
import {
4239
ATTR_GEN_AI_INPUT_MESSAGES,
4340
ATTR_GEN_AI_OUTPUT_MESSAGES,
@@ -46,12 +43,10 @@ import {
4643
ATTR_GEN_AI_REQUEST_MODEL,
4744
ATTR_GEN_AI_USAGE_INPUT_TOKENS,
4845
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
49-
ATTR_GEN_AI_RESPONSE_FINISH_REASONS,
5046
ATTR_GEN_AI_AGENT_NAME,
5147
GEN_AI_OPERATION_NAME_VALUE_CHAT,
5248
GEN_AI_OPERATION_NAME_VALUE_INVOKE_AGENT,
5349
GEN_AI_OPERATION_NAME_VALUE_EXECUTE_TOOL,
54-
GEN_AI_PROVIDER_NAME_VALUE_OPENAI,
5550
GEN_AI_PROVIDER_NAME_VALUE_AWS_BEDROCK,
5651
} from "@opentelemetry/semantic-conventions/incubating";
5752
import { BedrockInstrumentation } from "@traceloop/instrumentation-bedrock";
@@ -213,156 +208,6 @@ describe("Test Langchain instrumentation", async function () {
213208
);
214209
}).timeout(60000);
215210

216-
it.skip(
217-
"should set attributes in span for chain instrumentation",
218-
async () => {
219-
const slowerModel = new OpenAI({
220-
modelName: "gpt-3.5-turbo-instruct",
221-
temperature: 0.0,
222-
});
223-
const SYSTEM_TEMPLATE = `Use the following pieces of context to answer the question at the end.
224-
If you don't know the answer, just say that you don't know, don't try to make up an answer.
225-
----------------
226-
{context}`;
227-
const prompt = PromptTemplate.fromTemplate(SYSTEM_TEMPLATE);
228-
const text = "sample text";
229-
const textSplitter = new RecursiveCharacterTextSplitter({
230-
chunkSize: 1000,
231-
});
232-
const docs = await textSplitter.createDocuments([text]);
233-
const vectorStore = await HNSWLib.fromDocuments(
234-
docs,
235-
new OpenAIEmbeddings(),
236-
);
237-
const chain = new langchainChainsModule.RetrievalQAChain({
238-
combineDocumentsChain: langchainChainsModule.loadQAStuffChain(
239-
slowerModel,
240-
{ prompt },
241-
),
242-
retriever: vectorStore.asRetriever(2),
243-
returnSourceDocuments: true,
244-
});
245-
const answer = await chain.call({
246-
query: "What did the author do growing up?",
247-
k: 8,
248-
});
249-
const spans = memoryExporter.getFinishedSpans();
250-
251-
const llmChainSpan = spans.find((span) => span.name === "LLMChain.task");
252-
const stuffDocumentsChainSpan = spans.find(
253-
(span) => span.name === "StuffDocumentsChain.task",
254-
);
255-
const retrievalQASpan = spans.find(
256-
(span) => span.name === "retrieval_qa.workflow",
257-
);
258-
const retrievalQAChainSpan = spans.find(
259-
(span) => span.name === "RetrievalQAChain.task",
260-
);
261-
262-
assert.ok(answer);
263-
assert.ok(llmChainSpan);
264-
assert.ok(stuffDocumentsChainSpan);
265-
assert.ok(retrievalQASpan);
266-
assert.ok(retrievalQAChainSpan);
267-
assert.strictEqual(
268-
llmChainSpan.attributes["traceloop.span.kind"],
269-
"task",
270-
);
271-
assert.strictEqual(
272-
stuffDocumentsChainSpan.attributes["traceloop.span.kind"],
273-
"task",
274-
);
275-
assert.strictEqual(
276-
retrievalQASpan.attributes["traceloop.span.kind"],
277-
"workflow",
278-
);
279-
assert.strictEqual(
280-
retrievalQAChainSpan.attributes["traceloop.span.kind"],
281-
"task",
282-
);
283-
assert.ok(retrievalQAChainSpan.attributes["traceloop.entity.input"]);
284-
assert.ok(retrievalQAChainSpan.attributes["traceloop.entity.output"]);
285-
assert.strictEqual(
286-
JSON.parse(
287-
retrievalQAChainSpan.attributes["traceloop.entity.input"].toString(),
288-
).kwargs.query,
289-
"What did the author do growing up?",
290-
);
291-
assert.deepEqual(
292-
JSON.parse(
293-
retrievalQAChainSpan.attributes["traceloop.entity.output"].toString(),
294-
),
295-
answer,
296-
);
297-
},
298-
).timeout(300000);
299-
300-
it.skip(
301-
"should set attributes in span for retrieval qa instrumentation",
302-
async () => {
303-
const llm = new ChatOpenAI({});
304-
const text = "sample text";
305-
const textSplitter = new RecursiveCharacterTextSplitter({
306-
chunkSize: 1000,
307-
});
308-
const docs = await textSplitter.createDocuments([text]);
309-
const vectorStore = await HNSWLib.fromDocuments(
310-
docs,
311-
new OpenAIEmbeddings(),
312-
);
313-
const vectorStoreRetriever = vectorStore.asRetriever();
314-
const chain = langchainChainsModule.RetrievalQAChain.fromLLM(
315-
llm,
316-
vectorStoreRetriever,
317-
);
318-
const answer = await chain.invoke({
319-
query: "What did the president say about Justice Breyer?",
320-
});
321-
322-
const spans = memoryExporter.getFinishedSpans();
323-
const stuffDocumentsChainSpan = spans.find(
324-
(span) => span.name === "langchain.task.StuffDocumentsChain",
325-
);
326-
const llmChainSpan = spans.find(
327-
(span) => span.name === "langchain.task.LLMChain",
328-
);
329-
const retrievalQASpan = spans.find(
330-
(span) => span.name === "retrieval_qa.workflow",
331-
);
332-
333-
assert.ok(answer);
334-
assert.ok(llmChainSpan);
335-
assert.ok(stuffDocumentsChainSpan);
336-
assert.ok(retrievalQASpan);
337-
assert.strictEqual(
338-
llmChainSpan.attributes["traceloop.span.kind"],
339-
"task",
340-
);
341-
assert.strictEqual(
342-
stuffDocumentsChainSpan.attributes["traceloop.span.kind"],
343-
"task",
344-
);
345-
assert.strictEqual(
346-
retrievalQASpan.attributes["traceloop.span.kind"],
347-
"workflow",
348-
);
349-
assert.ok(retrievalQASpan.attributes["traceloop.entity.input"]);
350-
assert.ok(retrievalQASpan.attributes["traceloop.entity.output"]);
351-
assert.strictEqual(
352-
JSON.parse(
353-
retrievalQASpan.attributes["traceloop.entity.input"].toString(),
354-
).args[0].query,
355-
"What did the president say about Justice Breyer?",
356-
);
357-
assert.deepEqual(
358-
JSON.parse(
359-
retrievalQASpan.attributes["traceloop.entity.output"].toString(),
360-
),
361-
answer,
362-
);
363-
},
364-
).timeout(300000);
365-
366211
it("should set correct attributes in span for LCEL", async () => {
367212
const wikipediaQuery = new WikipediaQueryRun({
368213
topKResults: 3,

0 commit comments

Comments
 (0)