@@ -115,7 +115,7 @@ def setUp(self):
115115
116116 def test_hello (self ):
117117 # Generate text from text prompt
118- model = generative_models .GenerativeModel (model_name = "gemini-pro " )
118+ model = generative_models .GenerativeModel (model_name = "gemini-1.5-flash " )
119119
120120 self .responses ["generate_content" ].append (simple_response ("world!" ))
121121
@@ -138,7 +138,7 @@ def test_hello(self):
138138 )
139139 def test_image (self , content ):
140140 # Generate text from image
141- model = generative_models .GenerativeModel ("gemini-pro " )
141+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
142142
143143 cat = "It's a cat"
144144 self .responses ["generate_content" ].append (simple_response (cat ))
@@ -172,7 +172,7 @@ def test_image(self, content):
172172 )
173173 def test_generation_config_overwrite (self , config1 , config2 ):
174174 # Generation config
175- model = generative_models .GenerativeModel ("gemini-pro " , generation_config = config1 )
175+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , generation_config = config1 )
176176
177177 self .responses ["generate_content" ] = [
178178 simple_response (" world!" ),
@@ -218,7 +218,7 @@ def test_generation_config_overwrite(self, config1, config2):
218218 )
219219 def test_safety_overwrite (self , safe1 , safe2 ):
220220 # Safety
221- model = generative_models .GenerativeModel ("gemini-pro " , safety_settings = safe1 )
221+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , safety_settings = safe1 )
222222
223223 self .responses ["generate_content" ] = [
224224 simple_response (" world!" ),
@@ -253,7 +253,7 @@ def test_stream_basic(self):
253253 chunks = ["first" , " second" , " third" ]
254254 self .responses ["stream_generate_content" ] = [(simple_response (text ) for text in chunks )]
255255
256- model = generative_models .GenerativeModel ("gemini-pro " )
256+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
257257 response = model .generate_content ("Hello" , stream = True )
258258
259259 self .assertEqual (self .observed_requests [0 ].contents [0 ].parts [0 ].text , "Hello" )
@@ -267,7 +267,7 @@ def test_stream_lookahead(self):
267267 chunks = ["first" , " second" , " third" ]
268268 self .responses ["stream_generate_content" ] = [(simple_response (text ) for text in chunks )]
269269
270- model = generative_models .GenerativeModel ("gemini-pro " )
270+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
271271 response = model .generate_content ("Hello" , stream = True )
272272
273273 self .assertEqual (self .observed_requests [0 ].contents [0 ].parts [0 ].text , "Hello" )
@@ -287,7 +287,7 @@ def test_stream_prompt_feedback_blocked(self):
287287 ]
288288 self .responses ["stream_generate_content" ] = [(chunk for chunk in chunks )]
289289
290- model = generative_models .GenerativeModel ("gemini-pro " )
290+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
291291 response = model .generate_content ("Bad stuff!" , stream = True )
292292
293293 self .assertEqual (
@@ -322,7 +322,7 @@ def test_stream_prompt_feedback_not_blocked(self):
322322 ]
323323 self .responses ["stream_generate_content" ] = [(chunk for chunk in chunks )]
324324
325- model = generative_models .GenerativeModel ("gemini-pro " )
325+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
326326 response = model .generate_content ("Hello" , stream = True )
327327
328328 self .assertEqual (
@@ -389,7 +389,7 @@ def add(a: int, b: int) -> int:
389389
390390 def test_chat (self ):
391391 # Multi turn chat
392- model = generative_models .GenerativeModel ("gemini-pro " )
392+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
393393 chat = model .start_chat ()
394394
395395 self .responses ["generate_content" ] = [
@@ -423,7 +423,7 @@ def test_chat(self):
423423 def test_chat_roles (self ):
424424 self .responses ["generate_content" ] = [simple_response ("hello!" )]
425425
426- model = generative_models .GenerativeModel ("gemini-pro " )
426+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
427427 chat = model .start_chat ()
428428 response = chat .send_message ("hello?" )
429429 history = chat .history
@@ -792,7 +792,7 @@ def test_tool_config(self, tool_config, expected_tool_config):
792792 )
793793 self .responses ["generate_content" ] = [simple_response ("echo echo" )]
794794
795- model = generative_models .GenerativeModel ("gemini-pro " , tools = tools )
795+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , tools = tools )
796796 _ = model .generate_content ("Hello" , tools = [tools ], tool_config = tool_config )
797797
798798 req = self .observed_requests [0 ]
@@ -811,7 +811,9 @@ def test_tool_config(self, tool_config, expected_tool_config):
811811 )
812812 def test_system_instruction (self , instruction , expected_instr ):
813813 self .responses ["generate_content" ] = [simple_response ("echo echo" )]
814- model = generative_models .GenerativeModel ("gemini-pro" , system_instruction = instruction )
814+ model = generative_models .GenerativeModel (
815+ "gemini-1.5-flash" , system_instruction = instruction
816+ )
815817
816818 _ = model .generate_content ("test" )
817819
@@ -852,7 +854,7 @@ def test_count_tokens_smoke(self, kwargs):
852854 )
853855
854856 def test_repr_for_unary_non_streamed_response (self ):
855- model = generative_models .GenerativeModel (model_name = "gemini-pro " )
857+ model = generative_models .GenerativeModel (model_name = "gemini-1.5-flash " )
856858 self .responses ["generate_content" ].append (simple_response ("world!" ))
857859 response = model .generate_content ("Hello" )
858860
@@ -885,7 +887,7 @@ def test_repr_for_streaming_start_to_finish(self):
885887 chunks = ["first" , " second" , " third" ]
886888 self .responses ["stream_generate_content" ] = [(simple_response (text ) for text in chunks )]
887889
888- model = generative_models .GenerativeModel ("gemini-pro " )
890+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
889891 response = model .generate_content ("Hello" , stream = True )
890892 iterator = iter (response )
891893
@@ -980,7 +982,7 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self):
980982 ]
981983 self .responses ["stream_generate_content" ] = [(chunk for chunk in chunks )]
982984
983- model = generative_models .GenerativeModel ("gemini-pro " )
985+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
984986 response = model .generate_content ("Bad stuff!" , stream = True )
985987
986988 result = repr (response )
@@ -1096,7 +1098,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):
10961098
10971099 def test_repr_for_multi_turn_chat (self ):
10981100 # Multi turn chat
1099- model = generative_models .GenerativeModel ("gemini-pro " )
1101+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
11001102 chat = model .start_chat ()
11011103
11021104 self .responses ["generate_content" ] = [
@@ -1119,7 +1121,7 @@ def test_repr_for_multi_turn_chat(self):
11191121 """\
11201122 ChatSession(
11211123 model=genai.GenerativeModel(
1122- model_name='models/gemini-pro ',
1124+ model_name='models/gemini-1.5-flash ',
11231125 generation_config={},
11241126 safety_settings={},
11251127 tools=None,
@@ -1133,7 +1135,7 @@ def test_repr_for_multi_turn_chat(self):
11331135
11341136 def test_repr_for_incomplete_streaming_chat (self ):
11351137 # Multi turn chat
1136- model = generative_models .GenerativeModel ("gemini-pro " )
1138+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
11371139 chat = model .start_chat ()
11381140
11391141 self .responses ["stream_generate_content" ] = [
@@ -1148,7 +1150,7 @@ def test_repr_for_incomplete_streaming_chat(self):
11481150 """\
11491151 ChatSession(
11501152 model=genai.GenerativeModel(
1151- model_name='models/gemini-pro ',
1153+ model_name='models/gemini-1.5-flash ',
11521154 generation_config={},
11531155 safety_settings={},
11541156 tools=None,
@@ -1162,7 +1164,7 @@ def test_repr_for_incomplete_streaming_chat(self):
11621164
11631165 def test_repr_for_broken_streaming_chat (self ):
11641166 # Multi turn chat
1165- model = generative_models .GenerativeModel ("gemini-pro " )
1167+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
11661168 chat = model .start_chat ()
11671169
11681170 self .responses ["stream_generate_content" ] = [
@@ -1193,7 +1195,7 @@ def test_repr_for_broken_streaming_chat(self):
11931195 """\
11941196 ChatSession(
11951197 model=genai.GenerativeModel(
1196- model_name='models/gemini-pro ',
1198+ model_name='models/gemini-1.5-flash ',
11971199 generation_config={},
11981200 safety_settings={},
11991201 tools=None,
@@ -1206,7 +1208,9 @@ def test_repr_for_broken_streaming_chat(self):
12061208 self .assertEqual (expected , result )
12071209
12081210 def test_repr_for_system_instruction (self ):
1209- model = generative_models .GenerativeModel ("gemini-pro" , system_instruction = "Be excellent." )
1211+ model = generative_models .GenerativeModel (
1212+ "gemini-1.5-flash" , system_instruction = "Be excellent."
1213+ )
12101214 result = repr (model )
12111215 self .assertIn ("system_instruction='Be excellent.'" , result )
12121216
@@ -1237,7 +1241,7 @@ def test_chat_with_request_options(self):
12371241 )
12381242 request_options = {"timeout" : 120 }
12391243
1240- model = generative_models .GenerativeModel ("gemini-pro " )
1244+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
12411245 chat = model .start_chat ()
12421246 chat .send_message ("hello" , request_options = helper_types .RequestOptions (** request_options ))
12431247
0 commit comments