Skip to content

Commit

Permalink
Merge branch 'sdk-updates' of https://github.com/GoogleCloudPlatform/…
Browse files Browse the repository at this point in the history
…generative-ai into sdk-updates
  • Loading branch information
holtskinner committed Jan 22, 2025
2 parents 25cde02 + d0c4754 commit 946e89d
Show file tree
Hide file tree
Showing 12 changed files with 617 additions and 274 deletions.
401 changes: 217 additions & 184 deletions conversation/chat-app/package-lock.json

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions conversation/chat-app/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"@sveltejs/adapter-static": "^3.0.1",
"@sveltejs/kit": "^2.9.0",
"autoprefixer": "^10.4.14",
"postcss": "^8.4.31",
"postcss": "^8.5.1",
"postcss-load-config": "^6.0.0",
"prettier": "^3.0.0",
"prettier-plugin-svelte": "^3.0.0",
Expand All @@ -26,7 +26,7 @@
"tailwindcss": "^3.3.2",
"tslib": "^2.4.1",
"typescript": "^5.0.0",
"vite": "^6.0.0"
"vite": "^6.0.11"
},
"type": "module",
"dependencies": {
Expand Down
22 changes: 14 additions & 8 deletions gemini/getting-started/intro_gemini_2_0_flash.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,7 @@
" Retrieval,\n",
" SafetySetting,\n",
" Tool,\n",
" ToolCodeExecution,\n",
" VertexAISearch,\n",
")"
]
Expand Down Expand Up @@ -275,7 +276,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {
"id": "UCgUOv4nSWhc"
},
Expand Down Expand Up @@ -303,7 +304,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"metadata": {
"id": "zpIPG_YhSjaw"
},
Expand Down Expand Up @@ -363,7 +364,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"metadata": {
"id": "-coEslfWPrxo"
},
Expand Down Expand Up @@ -1228,14 +1229,19 @@
" Args:\n",
" location: The city and state, e.g. San Francisco, CA\n",
" \"\"\"\n",
" import random\n",
"\n",
" return random.choice([\"sunny\", \"raining\", \"snowing\", \"fog\"])\n",
" weather_map: dict[str, str] = {\n",
" \"Boston, MA\": \"snowing\",\n",
" \"San Francisco, CA\": \"foggy\",\n",
" \"Seattle, WA\": \"raining\",\n",
" \"Austin, TX\": \"hot\",\n",
" \"Chicago, IL\": \"windy\",\n",
" }\n",
" return weather_map.get(location, \"unknown\")\n",
"\n",
"\n",
"response = client.models.generate_content(\n",
" model=MODEL_ID,\n",
" contents=\"What is the weather like in Boston?\",\n",
" contents=\"What is the weather like in Austin?\",\n",
" config=GenerateContentConfig(\n",
" tools=[get_current_weather],\n",
" temperature=0,\n",
Expand Down Expand Up @@ -1314,7 +1320,7 @@
},
"outputs": [],
"source": [
"code_execution_tool = Tool(code_execution={})\n",
"code_execution_tool = Tool(code_execution=ToolCodeExecution())\n",
"\n",
"response = client.models.generate_content(\n",
" model=MODEL_ID,\n",
Expand Down
189 changes: 189 additions & 0 deletions gemini/multimodal-live-api/intro_multimodal_live_api.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@
"- Text-to-text generation\n",
"- Text-to-audio generation\n",
"- Text-to-audio conversation\n",
"- Function calling\n",
"- Code execution\n",
"\n",
"See the [Multimodal Live API](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/multimodal-live) page for more details."
]
Expand Down Expand Up @@ -590,6 +592,193 @@
"await main()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "f214d0c3bee0"
},
"source": [
"### **Example 4**: Function calling\n",
"\n",
"You can use function calling to create a description of a function, then pass that description to the model in a request. The response from the model includes the name of a function that matches the description and the arguments to call it with.\n",
"\n",
"**Notes**:\n",
"\n",
"- All functions must be declared at the start of the session by sending tool definitions as part of the `setup` message.\n",
"- Currently only one tool is supported in the API."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "8a7595aee24a"
},
"outputs": [],
"source": [
"# Set model generation_config\n",
"CONFIG = {\"response_modalities\": [\"TEXT\"]}\n",
"\n",
"# Define function declarations\n",
"TOOLS = {\n",
" \"function_declarations\": {\n",
" \"name\": \"get_current_weather\",\n",
" \"description\": \"Get the current weather in the given location\",\n",
" \"parameters\": {\n",
" \"type\": \"OBJECT\",\n",
" \"properties\": {\"location\": {\"type\": \"STRING\"}},\n",
" },\n",
" }\n",
"}\n",
"\n",
"headers = {\n",
" \"Content-Type\": \"application/json\",\n",
" \"Authorization\": f\"Bearer {bearer_token[0]}\",\n",
"}\n",
"\n",
"# Connect to the server\n",
"async with connect(SERVICE_URL, additional_headers=headers) as ws:\n",
" # Setup the session\n",
" await ws.send(\n",
" json.dumps(\n",
" {\n",
" \"setup\": {\n",
" \"model\": MODEL,\n",
" \"generation_config\": CONFIG,\n",
" \"tools\": TOOLS,\n",
" }\n",
" }\n",
" )\n",
" )\n",
"\n",
" # Receive setup response\n",
" raw_response = await ws.recv(decode=False)\n",
" setup_response = json.loads(raw_response.decode())\n",
"\n",
" # Send text message\n",
" text_input = \"Get the current weather in Santa Clara, San Jose and Mountain View\"\n",
" display(Markdown(f\"**Input:** {text_input}\"))\n",
"\n",
" msg = {\n",
" \"client_content\": {\n",
" \"turns\": [{\"role\": \"user\", \"parts\": [{\"text\": text_input}]}],\n",
" \"turn_complete\": True,\n",
" }\n",
" }\n",
"\n",
" await ws.send(json.dumps(msg))\n",
"\n",
" responses = []\n",
"\n",
" # Receive chucks of server response\n",
" async for raw_response in ws:\n",
" response = json.loads(raw_response.decode(\"UTF-8\"))\n",
"\n",
" if (tool_call := response.get(\"toolCall\")) is not None:\n",
" for function_call in tool_call[\"functionCalls\"]:\n",
" responses.append(f\"FunctionCall: {str(function_call)}\\n\")\n",
"\n",
" if (server_content := response.get(\"serverContent\")) is not None:\n",
" if server_content.get(\"turnComplete\", True):\n",
" break\n",
"\n",
" # Print the server response\n",
" display(Markdown(\"**Response >** {}\".format(\"\\n\".join(responses))))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ad6b585deadb"
},
"source": [
"### **Example 5**: Code execution\n",
"\n",
"You can use code execution capability to generate and execute Python code directly within the API.\n",
"\n",
"In this example, you initialize the code execution tool by passing `code_execution` in the `Tools` configuration, and register this tool with the model at the start of the session by sending tool definitions as part of the `setup` message."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "acbbd8c0155e"
},
"outputs": [],
"source": [
"# Set model generation_config\n",
"CONFIG = {\"response_modalities\": [\"TEXT\"]}\n",
"\n",
"# Set code execution\n",
"TOOLS = {\"code_execution\": {}}\n",
"\n",
"headers = {\n",
" \"Content-Type\": \"application/json\",\n",
" \"Authorization\": f\"Bearer {bearer_token[0]}\",\n",
"}\n",
"\n",
"# Connect to the server\n",
"async with connect(SERVICE_URL, additional_headers=headers) as ws:\n",
" # Setup the session\n",
" await ws.send(\n",
" json.dumps(\n",
" {\n",
" \"setup\": {\n",
" \"model\": MODEL,\n",
" \"generation_config\": CONFIG,\n",
" \"tools\": TOOLS,\n",
" }\n",
" }\n",
" )\n",
" )\n",
"\n",
" # Receive setup response\n",
" raw_response = await ws.recv(decode=False)\n",
" setup_response = json.loads(raw_response.decode())\n",
"\n",
" # Send text message\n",
" text_input = \"Write code to calculate the 15th fibonacci number then find the nearest palindrome to it\"\n",
" display(Markdown(f\"**Input:** {text_input}\"))\n",
"\n",
" msg = {\n",
" \"client_content\": {\n",
" \"turns\": [{\"role\": \"user\", \"parts\": [{\"text\": text_input}]}],\n",
" \"turn_complete\": True,\n",
" }\n",
" }\n",
"\n",
" await ws.send(json.dumps(msg))\n",
"\n",
" responses = []\n",
"\n",
" # Receive chucks of server response\n",
" async for raw_response in ws:\n",
" response = json.loads(raw_response.decode(\"UTF-8\"))\n",
"\n",
" if (server_content := response.get(\"serverContent\")) is not None:\n",
" model_turn = server_content.get(\"modelTurn\")\n",
" if (parts := model_turn.get(\"parts\")) is not None:\n",
" if parts[0].get(\"text\"):\n",
" responses.append(parts[0][\"text\"])\n",
" for part in parts:\n",
" if (executable_code := part.get(\"executableCode\")) is not None:\n",
" display(\n",
" Markdown(\n",
" f\"\"\"**Executable code:**\n",
"```py\n",
"{executable_code.get(\"code\")}\n",
"```\n",
" \"\"\"\n",
" )\n",
" )\n",
" if server_content.get(\"turnComplete\", False):\n",
" break\n",
"\n",
" # Print the server response\n",
" display(Markdown(f\"**Response >** {''.join(responses)}\"))"
]
},
{
"cell_type": "markdown",
"metadata": {
Expand Down
Loading

0 comments on commit 946e89d

Please sign in to comment.