Skip to content
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/Animated_Story_Video_Generation_gemini.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@
" 'api_version': 'v1alpha'\n",
"})\n",
"# Create a client for text generation using Gemini.\n",
"MODEL = \"gemini-3.1-flash-lite-preview\"\n",
"MODEL = \"gemini-3-flash-preview\"\n",
"# Create a client for image generation using Imagen.\n",
"IMAGE_MODEL_ID = \"imagen-3.0-generate-002\"\n"
]
Expand Down
6 changes: 3 additions & 3 deletions examples/Anomaly_detection_with_embeddings.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/137.7 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r",
"\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m\u001b[90m━\u001b[0m \u001b[32m133.1/137.7 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r",
"\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/137.7 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\n",
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why are you adding \n in the output?

"\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m\u001b[90m━\u001b[0m \u001b[32m133.1/137.7 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m137.7/137.7 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h"
]
Expand Down Expand Up @@ -1276,7 +1276,7 @@
"\n",
"\n",
"def create_embeddings(df):\n",
" MODEL_ID = \"text-embedding-004\" # @param [\"embedding-001\",\"text-embedding-004\"] {allow-input: true}\n",
" MODEL_ID = \"text-embedding-001\" # @param [\"text-embedding-001\", \"text-embedding-004\"] {allow-input: true}\n",
" model = f\"models/{MODEL_ID}\"\n",
" embed_fn = make_embed_text_fn(model)\n",
"\n",
Expand Down
34 changes: 17 additions & 17 deletions examples/Book_illustration.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@
"outputs": [],
"source": [
"IMAGE_MODEL_ID = \"gemini-2.5-flash-image\" # @param [\"gemini-2.5-flash-image\", \"gemini-3-pro-image-preview\"] {\"allow-input\":true, isTemplate: true}\n",
"GEMINI_MODEL_ID = \"gemini-2.5-flash\" # @param [\"gemini-2.5-flash-lite\", \"gemini-2.5-flash\", \"gemini-2.5-pro\", \"gemini-3.1-flash-lite-preview\", \"gemini-3-flash-preview\", \"gemini-3.1-pro-preview\"] {\"allow-input\":true, isTemplate: true}"
"GEMINI_MODEL_ID = \"gemini-3-flash-preview\" # @param [\"gemini-2.5-flash-lite\", \"gemini-2.5-flash\", \"gemini-2.5-pro\", \"gemini-3.1-flash-lite-preview\", \"gemini-3-flash-preview\", \"gemini-3.1-pro-preview\"] {\"allow-input\":true, isTemplate: true}"
]
},
{
Expand Down Expand Up @@ -390,15 +390,15 @@
"\n",
"if style==\"\":\n",
" response = chat.send_message(\"\"\"\n",
" Can you define a art style that would fit the story?\n",
" Just give us the prompt for the art syle that will added to the furture prompts.\n",
" Can you define an art style that would fit the story?\n",
" Please provide the prompt for the art style that will be added to future prompts.\n",
" \"\"\")\n",
" style = json.loads(response.text)[0][\"prompt\"]\n",
"else:\n",
" chat.send_message(f\"\"\"\n",
" The art style will be:\"{style}\".\n",
" The art style will be: \"{style}\".\n",
" Keep that in mind when generating future prompts.\n",
" Keep quiet for now, instructions will follow.\n",
" Please wait; instructions will follow.\n",
" \"\"\")\n",
"\n",
"display(Markdown(f\"### Style:\"))\n",
Expand All @@ -413,7 +413,7 @@
"id": "vHjcGgtoadjB"
},
"source": [
"Let's also define some more instructions which will act as \"system instructions\" or a negative prompt to tell the model what you do not want to see (text on the images)."
"Define some additional instructions that will act as \"system instructions\" or a negative prompt to tell the model what you do not want to see (text on the images)."
]
},
{
Expand Down Expand Up @@ -503,16 +503,16 @@
"id": "Smw57Slmhh7v"
},
"source": [
"Now that you have the prompts, you just need to loop on all the characters and have Gemini 2.5 Image generate an image for them. This model uses the same API as the text generation models.\n",
"Now that you have the prompts, loop over all the characters and have Gemini 2.5 Image generate an image for each. This model uses the same API as the text generation models.\n",
"\n",
"Like before, for the sake of consistency, we are going to use chat mode, but within a different instance.\n",
"Like before, for consistency, use chat mode, but within a different instance.\n",
"\n",
"For an extensive explanation on the Gemini 2.5 Image model and its options, check the [getting started with Gemini 2.5 Image](../quickstarts/Get_Started_Nano_Banana.ipynb) notebook. But here's a quick overview of what being used here:\n",
"* `prompt` is the prompt passed down to Gemini 2.5 Image. You're not just sending what Gemini has generate to describe the chacaters but also our style and our system instructions.\n",
"* `response_modalities=['Image']` because we only want images\n",
"* `aspect_ratio=\"9:16\"` because we want portraits images\n",
"For an extensive explanation on the Gemini 2.5 Image model and its options, check the [getting started with Gemini 2.5 Image](../quickstarts/Get_Started_Nano_Banana.ipynb) notebook. Here's a quick overview of what is used here:\n",
"* `prompt` is the prompt passed down to Gemini 2.5 Image. You're not just sending what Gemini has generated to describe the characters but also the style and the system instructions.\n",
"* `response_modalities=['Image']` because only images are requested\n",
"* `aspect_ratio=\"9:16\"` because portraits are desired\n",
"\n",
"Note that we could have used system instructions but the model currently ignores them so we decided to pass them as message."
"Note that system instructions could have been used, but the model currently ignores them, so the instructions are passed as a message."
]
},
{
Expand Down Expand Up @@ -638,7 +638,7 @@
"\n",
"image_chat.send_message(f\"\"\"\n",
" You are going to generate portrait images to illustrate The Wind in the Willows from Kenneth Grahame.\n",
" The style we want you to follow is: {style}\n",
" The style you should follow is: {style}\n",
" Also follow those rules: {system_instructions}\n",
"\"\"\")\n",
"\n",
Expand Down Expand Up @@ -815,7 +815,7 @@
}
],
"source": [
"image_chat.send_message(\"Starting from now, we're going to illustrate the book's chapters. Don't forget to refer to your previous illustrations of the characters to keep the characters consistency, but feel free to change their position.\")\n",
"image_chat.send_message(\"Starting from now, you are going to illustrate the book's chapters. Don't forget to refer to your previous illustrations of the characters to keep the characters consistency, but feel free to change their position.\")\n",
"\n",
"for chapter in chapters:\n",
" display(Markdown(f\"### {chapter['name']}\"))\n",
Expand Down Expand Up @@ -1246,7 +1246,7 @@
"\n",
"image_chat.send_message(f\"\"\"\n",
" You are going to generate portrait images to illustrate The Wind in the Willows from Kenneth Grahame.\n",
" The style we want you to follow is: {style}\n",
" The style you should follow is: {style}\n",
" Also follow those rules: {system_instructions}\n",
"\"\"\")\n",
"\n",
Expand Down Expand Up @@ -1423,7 +1423,7 @@
}
],
"source": [
"image_chat.send_message(\"Starting from now, we're going to illustrate the book's chapters. Don't forget to refer to your previous illustrations of the characters to keep the characters consistency, but feel free to change their position.\")\n",
"image_chat.send_message(\"Starting from now, you are going to illustrate the book's chapters. Don't forget to refer to your previous illustrations of the characters to keep the characters consistency, but feel free to change their position.\")\n",
"\n",
"for chapter in chapters:\n",
" display(Markdown(f\"### {chapter['name']}\"))\n",
Expand Down
4 changes: 2 additions & 2 deletions examples/Browser_as_a_tool.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -155,8 +155,8 @@
"\n",
"client = genai.Client(api_key=GOOGLE_API_KEY)\n",
"\n",
"LIVE_MODEL = 'gemini-2.5-flash-native-audio-preview-09-2025' # @param ['gemini-2.0-flash-live-001', 'gemini-live-2.5-flash-preview', 'gemini-2.5-flash-native-audio-preview-09-2025'] {allow-input: true, isTemplate: true}\n",
"MODEL = 'gemini-2.5-flash' # @param ['gemini-2.5-flash'] {allow-input: true, isTemplate: true}"
"LIVE_MODEL = 'gemini-live-2.5-flash-preview' # @param ['gemini-live-2.5-flash-preview', 'gemini-2.0-flash-live-001'] {allow-input: true, isTemplate: true}\n",
"MODEL = 'gemini-3-flash-preview' # @param ['gemini-3-flash-preview', 'gemini-3-pro-preview'] {allow-input: true, isTemplate: true}"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion examples/Datasets.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@
}
],
"source": [
"models_to_eval = ['gemini-2.5-flash', 'gemini-2.5-pro']\n",
"models_to_eval = ['gemini-3-flash-preview', 'gemini-3-pro-preview']\n",
"\n",
"batches = []\n",
"for model in models_to_eval:\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@
"client = genai.Client(api_key=api_key)\n",
"\n",
"# Define the model you are going to use\n",
"model_id = \"gemini-2.5-flash\" # or \"gemini-2.5-flash-lite\", \"gemini-2.5-flash\", \"gemini-2.5-pro\", \"gemini-3.1-flash-lite-preview\", \"gemini-3.1-pro-preview\""
"model_id = \"gemini-3-flash-preview\" # @param [\"gemini-3-flash-preview\", \"gemini-3-pro-preview\", \"gemini-3.1-flash-lite-preview\", \"gemini-3.1-pro-preview\"] {\"allow-input\":true, isTemplate: true}"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion examples/Search_Wikipedia_using_ReAct.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -854,7 +854,7 @@
}
],
"source": [
"gemini_ReAct_chat = ReAct(model='gemini-2.0-flash', ReAct_prompt='model_instructions.txt')\n",
"gemini_ReAct_chat = ReAct(model='gemini-3-flash-preview', ReAct_prompt='model_instructions.txt')\n",
"# Note: try different combinations of generational_config parameters for variational results\n",
"gemini_ReAct_chat(\"What are the total of ages of the main trio from the new Percy Jackson and the Olympians TV series in real life?\", temperature=0.2)"
]
Expand Down
2 changes: 1 addition & 1 deletion examples/Spatial_understanding_3d.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2227,7 +2227,7 @@
}
],
"source": [
"PRO_MODEL_ID ='gemini-2.5-pro'\n",
"PRO_MODEL_ID ='gemini-3-flash-preview'\n",
"\n",
"# Load and resize the image.\n",
"img_0 = Image.open(\"music_0.jpg\")\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/Tag_and_caption_images.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@
"from PIL import Image as PILImage\n",
"import time\n",
"\n",
"MODEL_ID='gemini-2.0-flash' # @param [\"gemini-2.5-flash-lite\", \"gemini-2.5-flash\", \"gemini-2.5-pro\", \"gemini-3.1-flash-lite-preview\", \"gemini-3.1-pro-preview\"] {\"allow-input\":true, isTemplate: true}\n",
"MODEL_ID='gemini-3-flash-preview' # @param [\"gemini-2.5-flash-lite\", \"gemini-2.5-flash\", \"gemini-2.5-pro\", \"gemini-3.1-flash-lite-preview\", \"gemini-3.1-pro-preview\"] {\"allow-input\":true, isTemplate: true}\n",
"\n",
"# a helper function for calling\n",
"\n",
Expand Down
3 changes: 2 additions & 1 deletion examples/Voice_memos.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,8 @@
"\n",
"prompt = \"Draft my next blog post based on my thoughts in this audio file and these two previous blog posts I wrote.\"\n",
"\n",
"MODEL_ID =\"gemini-2.5-flash\" # @param [\"gemini-2.5-flash\", \"gemini-2.5-pro\"] {\"allow-input\":true, isTemplate: true}\n",
"MODEL_ID =\"gemini-3-flash-preview\" # @param [\"gemini-3-flash-preview\", \"gemini-3-pro-preview\"] {\"allow-input\":true, isTemplate: true}\n",
"\n",
"\n",
"response = client.models.generate_content(\n",
" model=MODEL_ID,\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/fastrtc_ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ async def start_up(self):
),
)
async with client.aio.live.connect(
model="gemini-2.5-flash-lite", config=config
model="gemini-3-flash-preview", config=config
) as session:
async for audio in session.start_stream(
stream=self.stream(), mime_type="audio/pcm"
Expand Down
Loading
Loading