Skip to content

Commit cc83889

Browse files
giulio-leonegiulio-leoneCopilot
authored
Python: fix(python/google): preserve thought_signature in Gemini function call parts (#13609)
### Motivation and Context Fixes #13480 When using Gemini models (2.5 Pro / 3 Pro) with thinking enabled, the API returns a `thought_signature` on function call `Part` objects. Per [Google's Thought Signatures documentation](https://ai.google.dev/gemini-api/docs/thought-signatures), this signature **must** be included in subsequent requests. Without it, follow-up turns with tool results fail with: ``` HTTP 400: Function call is missing a thought_signature in functionCall parts ``` The .NET connector was already fixed in #13418. This PR applies the equivalent fix to the Python connector. ### Description **Response parsing** (capture `thought_signature`): - `google_ai_chat_completion.py`: When creating `FunctionCallContent` from response parts, capture `part.thought_signature` into `FunctionCallContent.metadata` (both non-streaming and streaming paths) - `vertex_ai_chat_completion.py`: Same capture via `part_dict.get("thought_signature")` for Vertex AI **Request building** (restore `thought_signature`): - `google_ai/services/utils.py`: In `format_assistant_message()`, when a `FunctionCallContent` has `thought_signature` in its metadata, construct the `Part` directly with the `thought_signature` parameter instead of using `Part.from_function_call()` - `vertex_ai/services/utils.py`: In `format_assistant_message()`, include `thought_signature` in the `Part.from_dict()` dictionary when present **Backward compatible**: When `thought_signature` is not present (thinking disabled or older models), behavior is identical to before. ### Test Coverage Added 4 new tests: - `test_format_assistant_message_with_thought_signature` (Google AI) — verifies round-trip preservation - `test_format_assistant_message_without_thought_signature` (Google AI) — verifies backward compatibility - `test_format_assistant_message_with_thought_signature` (Vertex AI) — verifies round-trip preservation - `test_format_assistant_message_without_thought_signature` (Vertex AI) — verifies backward compatibility All 110 existing Google connector tests continue to pass. ### Contribution Checklist - [x] The code builds clean without any errors or warnings - [x] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) - [x] All unit tests pass, and I have added new tests - [x] I didn't break anyone 😄 --------- Co-authored-by: giulio-leone <giulio.leone@users.noreply.github.com> Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
1 parent b3cf64a commit cc83889

File tree

11 files changed

+480
-16
lines changed

11 files changed

+480
-16
lines changed

python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -303,13 +303,18 @@ def _create_chat_message_content(
303303
if part.text:
304304
items.append(TextContent(text=part.text, inner_content=response, metadata=response_metadata))
305305
elif part.function_call:
306+
fc_metadata: dict[str, Any] = {}
307+
thought_sig = getattr(part, "thought_signature", None)
308+
if thought_sig:
309+
fc_metadata["thought_signature"] = thought_sig
306310
items.append(
307311
FunctionCallContent(
308312
id=f"{part.function_call.name}_{idx!s}",
309313
name=format_gemini_function_name_to_kernel_function_fully_qualified_name(
310314
part.function_call.name # type: ignore[arg-type]
311315
),
312316
arguments={k: v for k, v in part.function_call.args.items()}, # type: ignore
317+
metadata=fc_metadata if fc_metadata else None,
313318
)
314319
)
315320

@@ -360,13 +365,18 @@ def _create_streaming_chat_message_content(
360365
)
361366
)
362367
elif part.function_call:
368+
fc_metadata: dict[str, Any] = {}
369+
thought_sig = getattr(part, "thought_signature", None)
370+
if thought_sig:
371+
fc_metadata["thought_signature"] = thought_sig
363372
items.append(
364373
FunctionCallContent(
365374
id=f"{part.function_call.name}_{idx!s}",
366375
name=format_gemini_function_name_to_kernel_function_fully_qualified_name(
367376
part.function_call.name # type: ignore[arg-type]
368377
),
369378
arguments={k: v for k, v in part.function_call.args.items()}, # type: ignore
379+
metadata=fc_metadata if fc_metadata else None,
370380
)
371381
)
372382

python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -91,12 +91,24 @@ def format_assistant_message(message: ChatMessageContent) -> list[Part]:
9191
if item.text:
9292
parts.append(Part.from_text(text=item.text))
9393
elif isinstance(item, FunctionCallContent):
94-
parts.append(
95-
Part.from_function_call(
96-
name=item.name, # type: ignore[arg-type]
97-
args=json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments, # type: ignore[arg-type]
94+
thought_signature = item.metadata.get("thought_signature") if item.metadata else None
95+
if thought_signature:
96+
parts.append(
97+
Part(
98+
function_call={
99+
"name": item.name, # type: ignore[arg-type]
100+
"args": json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments,
101+
},
102+
thought_signature=thought_signature,
103+
)
104+
)
105+
else:
106+
parts.append(
107+
Part.from_function_call(
108+
name=item.name, # type: ignore[arg-type]
109+
args=json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments, # type: ignore[arg-type]
110+
)
98111
)
99-
)
100112
elif isinstance(item, ImageContent):
101113
parts.append(_create_image_part(item))
102114
else:

python/semantic_kernel/connectors/ai/google/vertex_ai/services/utils.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import json
44
import logging
5-
from typing import TYPE_CHECKING
5+
from typing import TYPE_CHECKING, Any
66

77
from google.cloud.aiplatform_v1beta1.types.content import Candidate
88
from vertexai.generative_models import FunctionDeclaration, Part, Tool, ToolConfig
@@ -89,14 +89,16 @@ def format_assistant_message(message: ChatMessageContent) -> list[Part]:
8989
if item.text:
9090
parts.append(Part.from_text(item.text))
9191
elif isinstance(item, FunctionCallContent):
92-
parts.append(
93-
Part.from_dict({
94-
"function_call": {
95-
"name": item.name,
96-
"args": json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments,
97-
}
98-
})
99-
)
92+
part_dict: dict[str, Any] = {
93+
"function_call": {
94+
"name": item.name, # type: ignore[arg-type]
95+
"args": json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments,
96+
}
97+
}
98+
thought_signature = item.metadata.get("thought_signature") if item.metadata else None
99+
if thought_signature:
100+
part_dict["thought_signature"] = thought_signature
101+
parts.append(Part.from_dict(part_dict))
100102
elif isinstance(item, ImageContent):
101103
parts.append(_create_image_part(item))
102104
else:

python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_chat_completion.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,13 +252,18 @@ def _create_chat_message_content(self, response: GenerationResponse, candidate:
252252
if "text" in part_dict:
253253
items.append(TextContent(text=part.text, inner_content=response, metadata=response_metadata))
254254
elif "function_call" in part_dict:
255+
fc_metadata: dict[str, Any] = {}
256+
thought_sig = part_dict.get("thought_signature")
257+
if thought_sig:
258+
fc_metadata["thought_signature"] = thought_sig
255259
items.append(
256260
FunctionCallContent(
257261
id=f"{part.function_call.name}_{idx!s}",
258262
name=format_gemini_function_name_to_kernel_function_fully_qualified_name(
259263
part.function_call.name
260264
),
261265
arguments={k: v for k, v in part.function_call.args.items()},
266+
metadata=fc_metadata if fc_metadata else None,
262267
)
263268
)
264269

@@ -309,13 +314,18 @@ def _create_streaming_chat_message_content(
309314
)
310315
)
311316
elif "function_call" in part_dict:
317+
fc_metadata_s: dict[str, Any] = {}
318+
thought_sig_s = part_dict.get("thought_signature")
319+
if thought_sig_s:
320+
fc_metadata_s["thought_signature"] = thought_sig_s
312321
items.append(
313322
FunctionCallContent(
314323
id=f"{part.function_call.name}_{idx!s}",
315324
name=format_gemini_function_name_to_kernel_function_fully_qualified_name(
316325
part.function_call.name
317326
),
318327
arguments={k: v for k, v in part.function_call.args.items()},
328+
metadata=fc_metadata_s if fc_metadata_s else None,
319329
)
320330
)
321331

python/tests/unit/connectors/ai/google/google_ai/services/test_google_ai_chat_completion.py

Lines changed: 194 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -440,3 +440,197 @@ def test_google_ai_chat_completion_parse_chat_history_correctly(google_ai_unit_t
440440
assert parsed_chat_history[0].parts[0].text == "test_user_message"
441441
assert parsed_chat_history[1].role == "model"
442442
assert parsed_chat_history[1].parts[0].text == "test_assistant_message"
443+
444+
445+
# region deserialization (Part → FunctionCallContent round-trip)
446+
447+
448+
def test_create_chat_message_content_with_thought_signature(google_ai_unit_test_env) -> None:
449+
"""Test that thought_signature from a Part is deserialized into FunctionCallContent.metadata."""
450+
from google.genai.types import (
451+
Candidate,
452+
Content,
453+
GenerateContentResponse,
454+
GenerateContentResponseUsageMetadata,
455+
Part,
456+
)
457+
from google.genai.types import (
458+
FinishReason as GFinishReason,
459+
)
460+
461+
from semantic_kernel.contents.function_call_content import FunctionCallContent
462+
463+
thought_sig_value = b"test-thought-sig-bytes"
464+
part = Part.from_function_call(name="test_function", args={"key": "value"})
465+
part.thought_signature = thought_sig_value
466+
467+
candidate = Candidate()
468+
candidate.index = 0
469+
candidate.content = Content(role="user", parts=[part])
470+
candidate.finish_reason = GFinishReason.STOP
471+
472+
response = GenerateContentResponse()
473+
response.candidates = [candidate]
474+
response.usage_metadata = GenerateContentResponseUsageMetadata(
475+
prompt_token_count=0, cached_content_token_count=0, candidates_token_count=0, total_token_count=0
476+
)
477+
478+
completion = GoogleAIChatCompletion()
479+
result = completion._create_chat_message_content(response, candidate)
480+
481+
fc_items = [item for item in result.items if isinstance(item, FunctionCallContent)]
482+
assert len(fc_items) == 1
483+
assert fc_items[0].metadata is not None
484+
assert fc_items[0].metadata["thought_signature"] == thought_sig_value
485+
486+
487+
def test_create_chat_message_content_without_thought_signature(google_ai_unit_test_env) -> None:
488+
"""Test that FunctionCallContent works when Part has no thought_signature."""
489+
from google.genai.types import (
490+
Candidate,
491+
Content,
492+
GenerateContentResponse,
493+
GenerateContentResponseUsageMetadata,
494+
Part,
495+
)
496+
from google.genai.types import (
497+
FinishReason as GFinishReason,
498+
)
499+
500+
from semantic_kernel.contents.function_call_content import FunctionCallContent
501+
502+
part = Part.from_function_call(name="test_function", args={"key": "value"})
503+
504+
candidate = Candidate()
505+
candidate.index = 0
506+
candidate.content = Content(role="user", parts=[part])
507+
candidate.finish_reason = GFinishReason.STOP
508+
509+
response = GenerateContentResponse()
510+
response.candidates = [candidate]
511+
response.usage_metadata = GenerateContentResponseUsageMetadata(
512+
prompt_token_count=0, cached_content_token_count=0, candidates_token_count=0, total_token_count=0
513+
)
514+
515+
completion = GoogleAIChatCompletion()
516+
result = completion._create_chat_message_content(response, candidate)
517+
518+
fc_items = [item for item in result.items if isinstance(item, FunctionCallContent)]
519+
assert len(fc_items) == 1
520+
assert "thought_signature" not in fc_items[0].metadata
521+
522+
523+
def test_create_streaming_chat_message_content_with_thought_signature(google_ai_unit_test_env) -> None:
524+
"""Test that thought_signature from a Part is deserialized in streaming path."""
525+
from google.genai.types import (
526+
Candidate,
527+
Content,
528+
GenerateContentResponse,
529+
GenerateContentResponseUsageMetadata,
530+
Part,
531+
)
532+
from google.genai.types import (
533+
FinishReason as GFinishReason,
534+
)
535+
536+
from semantic_kernel.contents.function_call_content import FunctionCallContent
537+
538+
thought_sig_value = b"streaming-thought-sig"
539+
part = Part.from_function_call(name="stream_func", args={"a": "b"})
540+
part.thought_signature = thought_sig_value
541+
542+
candidate = Candidate()
543+
candidate.index = 0
544+
candidate.content = Content(role="user", parts=[part])
545+
candidate.finish_reason = GFinishReason.STOP
546+
547+
chunk = GenerateContentResponse()
548+
chunk.candidates = [candidate]
549+
chunk.usage_metadata = GenerateContentResponseUsageMetadata(
550+
prompt_token_count=0, cached_content_token_count=0, candidates_token_count=0, total_token_count=0
551+
)
552+
553+
completion = GoogleAIChatCompletion()
554+
result = completion._create_streaming_chat_message_content(chunk, candidate)
555+
556+
fc_items = [item for item in result.items if isinstance(item, FunctionCallContent)]
557+
assert len(fc_items) == 1
558+
assert fc_items[0].metadata is not None
559+
assert fc_items[0].metadata["thought_signature"] == thought_sig_value
560+
561+
562+
def test_create_streaming_chat_message_content_without_thought_signature(google_ai_unit_test_env) -> None:
563+
"""Test that streaming FunctionCallContent works when Part lacks thought_signature."""
564+
from google.genai.types import (
565+
Candidate,
566+
Content,
567+
GenerateContentResponse,
568+
GenerateContentResponseUsageMetadata,
569+
Part,
570+
)
571+
from google.genai.types import (
572+
FinishReason as GFinishReason,
573+
)
574+
575+
from semantic_kernel.contents.function_call_content import FunctionCallContent
576+
577+
part = Part.from_function_call(name="stream_func", args={"a": "b"})
578+
579+
candidate = Candidate()
580+
candidate.index = 0
581+
candidate.content = Content(role="user", parts=[part])
582+
candidate.finish_reason = GFinishReason.STOP
583+
584+
chunk = GenerateContentResponse()
585+
chunk.candidates = [candidate]
586+
chunk.usage_metadata = GenerateContentResponseUsageMetadata(
587+
prompt_token_count=0, cached_content_token_count=0, candidates_token_count=0, total_token_count=0
588+
)
589+
590+
completion = GoogleAIChatCompletion()
591+
result = completion._create_streaming_chat_message_content(chunk, candidate)
592+
593+
fc_items = [item for item in result.items if isinstance(item, FunctionCallContent)]
594+
assert len(fc_items) == 1
595+
assert "thought_signature" not in fc_items[0].metadata
596+
597+
598+
def test_create_chat_message_content_getattr_guard_on_missing_attribute(google_ai_unit_test_env) -> None:
599+
"""Test that getattr guard handles SDK versions where thought_signature doesn't exist on Part."""
600+
from unittest.mock import MagicMock
601+
602+
from google.genai.types import (
603+
GenerateContentResponse,
604+
GenerateContentResponseUsageMetadata,
605+
)
606+
607+
from semantic_kernel.contents.function_call_content import FunctionCallContent
608+
609+
# Create a mock Part that lacks 'thought_signature' attribute entirely
610+
mock_part = MagicMock()
611+
mock_part.text = None
612+
mock_part.function_call.name = "test_func"
613+
mock_part.function_call.args = {"x": "y"}
614+
del mock_part.thought_signature # simulate older SDK without the field
615+
616+
# Use a fully-mocked candidate to avoid Content pydantic validation
617+
mock_candidate = MagicMock()
618+
mock_candidate.index = 0
619+
mock_candidate.content.parts = [mock_part]
620+
mock_candidate.finish_reason = 1 # STOP
621+
622+
response = GenerateContentResponse()
623+
response.candidates = [mock_candidate]
624+
response.usage_metadata = GenerateContentResponseUsageMetadata(
625+
prompt_token_count=0, cached_content_token_count=0, candidates_token_count=0, total_token_count=0
626+
)
627+
628+
completion = GoogleAIChatCompletion()
629+
result = completion._create_chat_message_content(response, mock_candidate)
630+
631+
fc_items = [item for item in result.items if isinstance(item, FunctionCallContent)]
632+
assert len(fc_items) == 1
633+
assert "thought_signature" not in fc_items[0].metadata
634+
635+
636+
# endregion deserialization

python/tests/unit/connectors/ai/google/google_ai/services/test_google_ai_utils.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,3 +113,47 @@ def test_format_assistant_message_with_unsupported_items() -> None:
113113

114114
with pytest.raises(ServiceInvalidRequestError):
115115
format_assistant_message(assistant_message)
116+
117+
118+
def test_format_assistant_message_with_thought_signature() -> None:
119+
"""Test that thought_signature is preserved in function call parts."""
120+
import base64
121+
122+
thought_sig = base64.b64encode(b"test_thought_signature_data")
123+
assistant_message = ChatMessageContent(
124+
role=AuthorRole.ASSISTANT,
125+
items=[
126+
FunctionCallContent(
127+
name="test_function",
128+
arguments={"arg1": "value1"},
129+
metadata={"thought_signature": thought_sig},
130+
),
131+
],
132+
)
133+
134+
formatted = format_assistant_message(assistant_message)
135+
assert len(formatted) == 1
136+
assert isinstance(formatted[0], Part)
137+
assert formatted[0].function_call.name == "test_function"
138+
assert formatted[0].function_call.args == {"arg1": "value1"}
139+
assert formatted[0].thought_signature == thought_sig
140+
141+
142+
def test_format_assistant_message_without_thought_signature() -> None:
143+
"""Test that function calls without thought_signature still work."""
144+
assistant_message = ChatMessageContent(
145+
role=AuthorRole.ASSISTANT,
146+
items=[
147+
FunctionCallContent(
148+
name="test_function",
149+
arguments={"arg1": "value1"},
150+
),
151+
],
152+
)
153+
154+
formatted = format_assistant_message(assistant_message)
155+
assert len(formatted) == 1
156+
assert isinstance(formatted[0], Part)
157+
assert formatted[0].function_call.name == "test_function"
158+
assert formatted[0].function_call.args == {"arg1": "value1"}
159+
assert not getattr(formatted[0], "thought_signature", None)

0 commit comments

Comments
 (0)