Skip to content

Commit cc63573

Browse files
giulio-leoneCopilot
andcommitted
fix(python/google): preserve thought_signature in Gemini function call parts
When using Gemini models with thinking enabled, the API returns a thought_signature on function_call Part objects that must be included in subsequent requests. Without this, follow-up turns with tool results fail with HTTP 400: 'Function call is missing a thought_signature'. Changes: - Google AI: Capture thought_signature from response parts into FunctionCallContent.metadata during response parsing (both non-streaming and streaming paths) - Google AI: Restore thought_signature when formatting assistant messages back to Gemini Part objects - Vertex AI: Same capture and restore logic for Vertex AI connector - Tests: Added 4 new tests covering thought_signature round-trip for both Google AI and Vertex AI Fixes #13480 Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
1 parent 429dd1c commit cc63573

File tree

6 files changed

+139
-12
lines changed

6 files changed

+139
-12
lines changed

python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -303,13 +303,17 @@ def _create_chat_message_content(
303303
if part.text:
304304
items.append(TextContent(text=part.text, inner_content=response, metadata=response_metadata))
305305
elif part.function_call:
306+
fc_metadata: dict[str, Any] = {}
307+
if part.thought_signature:
308+
fc_metadata["thought_signature"] = part.thought_signature
306309
items.append(
307310
FunctionCallContent(
308311
id=f"{part.function_call.name}_{idx!s}",
309312
name=format_gemini_function_name_to_kernel_function_fully_qualified_name(
310313
part.function_call.name # type: ignore[arg-type]
311314
),
312315
arguments={k: v for k, v in part.function_call.args.items()}, # type: ignore
316+
metadata=fc_metadata if fc_metadata else None,
313317
)
314318
)
315319

@@ -360,13 +364,17 @@ def _create_streaming_chat_message_content(
360364
)
361365
)
362366
elif part.function_call:
367+
fc_metadata: dict[str, Any] = {}
368+
if part.thought_signature:
369+
fc_metadata["thought_signature"] = part.thought_signature
363370
items.append(
364371
FunctionCallContent(
365372
id=f"{part.function_call.name}_{idx!s}",
366373
name=format_gemini_function_name_to_kernel_function_fully_qualified_name(
367374
part.function_call.name # type: ignore[arg-type]
368375
),
369376
arguments={k: v for k, v in part.function_call.args.items()}, # type: ignore
377+
metadata=fc_metadata if fc_metadata else None,
370378
)
371379
)
372380

python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -91,12 +91,26 @@ def format_assistant_message(message: ChatMessageContent) -> list[Part]:
9191
if item.text:
9292
parts.append(Part.from_text(text=item.text))
9393
elif isinstance(item, FunctionCallContent):
94-
parts.append(
95-
Part.from_function_call(
96-
name=item.name, # type: ignore[arg-type]
97-
args=json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments, # type: ignore[arg-type]
94+
thought_signature = item.metadata.get("thought_signature") if item.metadata else None
95+
if thought_signature:
96+
parts.append(
97+
Part(
98+
function_call={
99+
"name": item.name,
100+
"args": json.loads(item.arguments)
101+
if isinstance(item.arguments, str)
102+
else item.arguments,
103+
},
104+
thought_signature=thought_signature,
105+
)
106+
)
107+
else:
108+
parts.append(
109+
Part.from_function_call(
110+
name=item.name, # type: ignore[arg-type]
111+
args=json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments, # type: ignore[arg-type]
112+
)
98113
)
99-
)
100114
elif isinstance(item, ImageContent):
101115
parts.append(_create_image_part(item))
102116
else:

python/semantic_kernel/connectors/ai/google/vertex_ai/services/utils.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import json
44
import logging
5-
from typing import TYPE_CHECKING
5+
from typing import TYPE_CHECKING, Any
66

77
from google.cloud.aiplatform_v1beta1.types.content import Candidate
88
from vertexai.generative_models import FunctionDeclaration, Part, Tool, ToolConfig
@@ -89,13 +89,17 @@ def format_assistant_message(message: ChatMessageContent) -> list[Part]:
8989
if item.text:
9090
parts.append(Part.from_text(item.text))
9191
elif isinstance(item, FunctionCallContent):
92+
part_dict: dict[str, Any] = {
93+
"function_call": {
94+
"name": item.name,
95+
"args": json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments,
96+
}
97+
}
98+
thought_signature = item.metadata.get("thought_signature") if item.metadata else None
99+
if thought_signature:
100+
part_dict["thought_signature"] = thought_signature
92101
parts.append(
93-
Part.from_dict({
94-
"function_call": {
95-
"name": item.name,
96-
"args": json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments,
97-
}
98-
})
102+
Part.from_dict(part_dict)
99103
)
100104
elif isinstance(item, ImageContent):
101105
parts.append(_create_image_part(item))

python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_chat_completion.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,13 +252,18 @@ def _create_chat_message_content(self, response: GenerationResponse, candidate:
252252
if "text" in part_dict:
253253
items.append(TextContent(text=part.text, inner_content=response, metadata=response_metadata))
254254
elif "function_call" in part_dict:
255+
fc_metadata: dict[str, Any] = {}
256+
thought_sig = part_dict.get("thought_signature")
257+
if thought_sig:
258+
fc_metadata["thought_signature"] = thought_sig
255259
items.append(
256260
FunctionCallContent(
257261
id=f"{part.function_call.name}_{idx!s}",
258262
name=format_gemini_function_name_to_kernel_function_fully_qualified_name(
259263
part.function_call.name
260264
),
261265
arguments={k: v for k, v in part.function_call.args.items()},
266+
metadata=fc_metadata if fc_metadata else None,
262267
)
263268
)
264269

@@ -309,13 +314,18 @@ def _create_streaming_chat_message_content(
309314
)
310315
)
311316
elif "function_call" in part_dict:
317+
fc_metadata_s: dict[str, Any] = {}
318+
thought_sig_s = part_dict.get("thought_signature")
319+
if thought_sig_s:
320+
fc_metadata_s["thought_signature"] = thought_sig_s
312321
items.append(
313322
FunctionCallContent(
314323
id=f"{part.function_call.name}_{idx!s}",
315324
name=format_gemini_function_name_to_kernel_function_fully_qualified_name(
316325
part.function_call.name
317326
),
318327
arguments={k: v for k, v in part.function_call.args.items()},
328+
metadata=fc_metadata_s if fc_metadata_s else None,
319329
)
320330
)
321331

python/tests/unit/connectors/ai/google/google_ai/services/test_google_ai_utils.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,3 +113,47 @@ def test_format_assistant_message_with_unsupported_items() -> None:
113113

114114
with pytest.raises(ServiceInvalidRequestError):
115115
format_assistant_message(assistant_message)
116+
117+
118+
def test_format_assistant_message_with_thought_signature() -> None:
119+
"""Test that thought_signature is preserved in function call parts."""
120+
import base64
121+
122+
thought_sig = base64.b64encode(b"test_thought_signature_data")
123+
assistant_message = ChatMessageContent(
124+
role=AuthorRole.ASSISTANT,
125+
items=[
126+
FunctionCallContent(
127+
name="test_function",
128+
arguments={"arg1": "value1"},
129+
metadata={"thought_signature": thought_sig},
130+
),
131+
],
132+
)
133+
134+
formatted = format_assistant_message(assistant_message)
135+
assert len(formatted) == 1
136+
assert isinstance(formatted[0], Part)
137+
assert formatted[0].function_call.name == "test_function"
138+
assert formatted[0].function_call.args == {"arg1": "value1"}
139+
assert formatted[0].thought_signature == thought_sig
140+
141+
142+
def test_format_assistant_message_without_thought_signature() -> None:
143+
"""Test that function calls without thought_signature still work."""
144+
assistant_message = ChatMessageContent(
145+
role=AuthorRole.ASSISTANT,
146+
items=[
147+
FunctionCallContent(
148+
name="test_function",
149+
arguments={"arg1": "value1"},
150+
),
151+
],
152+
)
153+
154+
formatted = format_assistant_message(assistant_message)
155+
assert len(formatted) == 1
156+
assert isinstance(formatted[0], Part)
157+
assert formatted[0].function_call.name == "test_function"
158+
assert formatted[0].function_call.args == {"arg1": "value1"}
159+
assert not formatted[0].thought_signature

python/tests/unit/connectors/ai/google/vertex_ai/services/test_vertex_ai_utils.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,3 +110,50 @@ def test_format_assistant_message_with_unsupported_items() -> None:
110110

111111
with pytest.raises(ServiceInvalidRequestError):
112112
format_assistant_message(assistant_message)
113+
114+
115+
def test_format_assistant_message_with_thought_signature() -> None:
116+
"""Test that thought_signature is preserved in function call parts for Vertex AI."""
117+
import base64
118+
119+
thought_sig = base64.b64encode(b"test_thought_signature_data").decode("utf-8")
120+
assistant_message = ChatMessageContent(
121+
role=AuthorRole.ASSISTANT,
122+
items=[
123+
FunctionCallContent(
124+
name="test_function",
125+
arguments={"arg1": "value1"},
126+
metadata={"thought_signature": thought_sig},
127+
),
128+
],
129+
)
130+
131+
formatted = format_assistant_message(assistant_message)
132+
assert len(formatted) == 1
133+
assert isinstance(formatted[0], Part)
134+
assert formatted[0].function_call.name == "test_function"
135+
assert formatted[0].function_call.args == {"arg1": "value1"}
136+
part_dict = formatted[0].to_dict()
137+
assert "thought_signature" in part_dict
138+
assert part_dict["thought_signature"] == thought_sig
139+
140+
141+
def test_format_assistant_message_without_thought_signature() -> None:
142+
"""Test that function calls without thought_signature still work for Vertex AI."""
143+
assistant_message = ChatMessageContent(
144+
role=AuthorRole.ASSISTANT,
145+
items=[
146+
FunctionCallContent(
147+
name="test_function",
148+
arguments={"arg1": "value1"},
149+
),
150+
],
151+
)
152+
153+
formatted = format_assistant_message(assistant_message)
154+
assert len(formatted) == 1
155+
assert isinstance(formatted[0], Part)
156+
assert formatted[0].function_call.name == "test_function"
157+
assert formatted[0].function_call.args == {"arg1": "value1"}
158+
part_dict = formatted[0].to_dict()
159+
assert "thought_signature" not in part_dict

0 commit comments

Comments
 (0)