Skip to content

Commit dfafd5f

Browse files
feat: gemini code samples updates (GoogleCloudPlatform#11579)
* chore: minor code readability improvements * feat: update region tags and imports * move region tags & imports to inside a function * feat: Add readme notes for Generative AI sample developers * chore: simplify README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent 768f82b commit dfafd5f

18 files changed

+134
-90
lines changed

generative_ai/README.md

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# Generative AI on Google Cloud
2+
3+
Product Page: https://cloud.google.com/ai/generative-ai?hl=en
4+
Code samples: https://cloud.google.com/docs/samples?text=Generative%20AI
5+
6+
## Developer Notes
7+
8+
In developing code samples for Generative AI products, a scripting style format is recommended. It's important to wrap the code samples and region tags to be with in a function definition.
9+
10+
This change is motivated by the desire to provide a code format that can effortlessly integrate with popular data science community tools. These tools include Colab, Jupyter Notebooks, and the IPython shell.
11+
12+
Example:
13+
14+
```python
15+
def create_hello_world_file(filename):
16+
# <region tag: starts here>
17+
import os
18+
19+
# TODO(developer): Update and uncomment below code
20+
# filename = `/tmp/test.txt`
21+
22+
if os.path.isfile(filename):
23+
print(f'Overriding content in file(name: {filename})!')
24+
25+
with open(filename) as fp:
26+
fp.write('Hello world!')
27+
# <region tag: ends here>
28+
```
29+
30+
In Google Cloud documentation page, code sample is shown as below
31+
32+
```python
33+
import os
34+
35+
# TODO(developer): Update and uncomment below code
36+
# filename = `/tmp/test.txt`
37+
38+
if os.path.isfile(filename):
39+
print(f'Overriding content in file(name: {filename})!')
40+
41+
with open(filename) as fp:
42+
fp.write('Hello world!')
43+
```
44+
45+
46+
47+
48+
49+

generative_ai/function_calling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def generate_function_call(prompt: str, project_id: str, location: str) -> tuple
2929
vertexai.init(project=project_id, location=location)
3030

3131
# Initialize Gemini model
32-
model = GenerativeModel("gemini-1.0-pro-001")
32+
model = GenerativeModel(model_name="gemini-1.0-pro-001")
3333

3434
# Specify a function declaration and parameters for an API request
3535
get_current_weather_func = FunctionDeclaration(

generative_ai/function_calling_chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def generate_function_call_chat(project_id: str, location: str) -> tuple:
6464

6565
# Initialize Gemini model
6666
model = GenerativeModel(
67-
"gemini-1.0-pro-001",
67+
model_name="gemini-1.0-pro-001",
6868
generation_config=GenerationConfig(temperature=0),
6969
tools=[retail_tool],
7070
)

generative_ai/gemini_all_modalities.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def analyze_all_modalities(project_id: str) -> str:
2424

2525
vertexai.init(project=project_id, location="us-central1")
2626

27-
model = GenerativeModel("gemini-1.5-pro-preview-0409")
27+
model = GenerativeModel(model_name="gemini-1.5-pro-preview-0409")
2828

2929
video_file_uri = (
3030
"gs://cloud-samples-data/generative-ai/video/behind_the_scenes_pixel.mp4"

generative_ai/gemini_audio.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def summarize_audio(project_id: str) -> str:
2424

2525
vertexai.init(project=project_id, location="us-central1")
2626

27-
model = GenerativeModel("gemini-1.5-pro-preview-0409")
27+
model = GenerativeModel(model_name="gemini-1.5-pro-preview-0409")
2828

2929
prompt = """
3030
Please provide a summary for the audio.
@@ -55,7 +55,7 @@ def transcript_audio(project_id: str) -> str:
5555

5656
vertexai.init(project=project_id, location="us-central1")
5757

58-
model = GenerativeModel("gemini-1.5-pro-preview-0409")
58+
model = GenerativeModel(model_name="gemini-1.5-pro-preview-0409")
5959

6060
prompt = """
6161
Can you transcribe this interview, in the format of timecode, speaker, caption.

generative_ai/gemini_chat_example.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,15 @@
1616
def chat_text_example(project_id: str, location: str) -> str:
1717
# [START generativeaionvertexai_gemini_multiturn_chat]
1818
import vertexai
19+
1920
from vertexai.generative_models import GenerativeModel, ChatSession
2021

2122
# TODO(developer): Update and un-comment below lines
2223
# project_id = "PROJECT_ID"
2324
# location = "us-central1"
2425
vertexai.init(project=project_id, location=location)
2526

26-
model = GenerativeModel("gemini-1.0-pro")
27+
model = GenerativeModel(model_name="gemini-1.0-pro-002")
2728
chat = model.start_chat()
2829

2930
def get_chat_response(chat: ChatSession, prompt: str) -> str:
@@ -45,13 +46,14 @@ def get_chat_response(chat: ChatSession, prompt: str) -> str:
4546
def chat_stream_example(project_id: str, location: str) -> str:
4647
# [START generativeaionvertexai_gemini_multiturn_chat_stream]
4748
import vertexai
49+
4850
from vertexai.generative_models import GenerativeModel, ChatSession
4951

5052
# TODO(developer): Update and un-comment below lines
5153
# project_id = "PROJECT_ID"
5254
# location = "us-central1"
5355
vertexai.init(project=project_id, location=location)
54-
model = GenerativeModel("gemini-1.0-pro")
56+
model = GenerativeModel(model_name="gemini-1.0-pro-002")
5557
chat = model.start_chat()
5658

5759
def get_chat_response(chat: ChatSession, prompt: str) -> str:

generative_ai/gemini_count_token_example.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,17 @@
1313
# limitations under the License.
1414

1515

16-
# [START generativeaionvertexai_gemini_token_count]
17-
import vertexai
18-
from vertexai.generative_models import GenerativeModel
16+
def generate_text(project_id: str, location: str) -> str:
17+
# [START generativeaionvertexai_gemini_token_count]
18+
import vertexai
1919

20+
from vertexai.generative_models import GenerativeModel
2021

21-
def generate_text(project_id: str, location: str) -> str:
2222
# Initialize Vertex AI
2323
vertexai.init(project=project_id, location=location)
2424

2525
# Load the model
26-
model = GenerativeModel("gemini-1.0-pro")
26+
model = GenerativeModel(model_name="gemini-1.0-pro-002")
2727

2828
# prompt tokens count
2929
print(model.count_tokens("why is sky blue?"))
@@ -33,7 +33,5 @@ def generate_text(project_id: str, location: str) -> str:
3333

3434
# response tokens count
3535
print(response._raw_response.usage_metadata)
36+
# [END generativeaionvertexai_gemini_token_count]
3637
return response.text
37-
38-
39-
# [END generativeaionvertexai_gemini_token_count]

generative_ai/gemini_grounding_example.py

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -12,29 +12,23 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
# [START generativeaionvertexai_gemini_grounding_with_web]
16-
# [START generativeaionvertexai_gemini_grounding_with_vais]
1715

18-
import vertexai
19-
from vertexai.preview.generative_models import (
20-
GenerationConfig,
21-
GenerationResponse,
22-
GenerativeModel,
23-
grounding,
24-
Tool,
25-
)
26-
27-
# [END generativeaionvertexai_gemini_grounding_with_vais]
16+
from vertexai.preview.generative_models import GenerationResponse
2817

2918

3019
def generate_text_with_grounding_web(
3120
project_id: str, location: str
3221
) -> GenerationResponse:
22+
# [START generativeaionvertexai_gemini_grounding_with_web]
23+
import vertexai
24+
from vertexai.preview.generative_models import grounding
25+
from vertexai.generative_models import GenerationConfig, GenerativeModel, Tool
26+
3327
# Initialize Vertex AI
3428
vertexai.init(project=project_id, location=location)
3529

3630
# Load the model
37-
model = GenerativeModel(model_name="gemini-1.0-pro")
31+
model = GenerativeModel(model_name="gemini-1.0-pro-002")
3832

3933
# Use Google Search for grounding
4034
tool = Tool.from_google_search_retrieval(grounding.GoogleSearchRetrieval())
@@ -54,17 +48,20 @@ def generate_text_with_grounding_web(
5448
return response
5549

5650

57-
# [START generativeaionvertexai_gemini_grounding_with_vais]
58-
59-
6051
def generate_text_with_grounding_vertex_ai_search(
6152
project_id: str, location: str, data_store_path: str
6253
) -> GenerationResponse:
54+
# [START generativeaionvertexai_gemini_grounding_with_vais]
55+
import vertexai
56+
57+
from vertexai.preview.generative_models import grounding
58+
from vertexai.generative_models import GenerationConfig, GenerativeModel, Tool
59+
6360
# Initialize Vertex AI
6461
vertexai.init(project=project_id, location=location)
6562

6663
# Load the model
67-
model = GenerativeModel(model_name="gemini-1.0-pro")
64+
model = GenerativeModel(model_name="gemini-1.0-pro-002")
6865

6966
# Use Vertex AI Search data store
7067
# Format: projects/{project_id}/locations/{location}/collections/default_collection/dataStores/{data_store_id}

generative_ai/gemini_guide_example.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,20 +12,20 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
# [START generativeaionvertexai_gemini_get_started]
16-
# TODO(developer): Vertex AI SDK - uncomment below & run
17-
# pip3 install --upgrade --user google-cloud-aiplatform
18-
# gcloud auth application-default login
1915

20-
import vertexai
21-
from vertexai.generative_models import GenerativeModel, Part
16+
def generate_text(project_id: str, location: str) -> str:
17+
# [START generativeaionvertexai_gemini_get_started]
18+
# TODO(developer): Vertex AI SDK - uncomment below & run
19+
# pip3 install --upgrade --user google-cloud-aiplatform
20+
# gcloud auth application-default login
2221

22+
import vertexai
23+
from vertexai.generative_models import GenerativeModel, Part
2324

24-
def generate_text(project_id: str, location: str) -> str:
2525
# Initialize Vertex AI
2626
vertexai.init(project=project_id, location=location)
2727
# Load the model
28-
multimodal_model = GenerativeModel("gemini-1.0-pro-vision")
28+
multimodal_model = GenerativeModel(model_name="gemini-1.0-pro-vision-001")
2929
# Query the model
3030
response = multimodal_model.generate_content(
3131
[

generative_ai/gemini_multi_image_example.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,19 +12,19 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import vertexai
16-
1715

1816
def generate_text_multimodal(project_id: str, location: str) -> str:
19-
# Initialize Vertex AI
20-
vertexai.init(project=project_id, location=location)
21-
2217
# [START generativeaionvertexai_gemini_single_turn_multi_image]
2318
import http.client
2419
import typing
2520
import urllib.request
21+
import vertexai
22+
2623
from vertexai.generative_models import GenerativeModel, Image
2724

25+
# Initialize Vertex AI
26+
vertexai.init(project=project_id, location=location)
27+
2828
# create helper function
2929
def load_image_from_url(image_url: str) -> Image:
3030
with urllib.request.urlopen(image_url) as response:
@@ -44,7 +44,7 @@ def load_image_from_url(image_url: str) -> Image:
4444
)
4545

4646
# Pass multimodal prompt
47-
model = GenerativeModel("gemini-1.0-pro-vision")
47+
model = GenerativeModel(model_name="gemini-1.0-pro-vision-001")
4848
response = model.generate_content(
4949
[
5050
landmark1,

0 commit comments

Comments
 (0)