You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: docs/dyn/aiplatform_v1.endpoints.html
+33-9Lines changed: 33 additions & 9 deletions
Original file line number
Diff line number
Diff line change
@@ -154,7 +154,8 @@ <h3>Method Details</h3>
154
154
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
155
155
},
156
156
"text": "A String", # Optional. Text part (can be code).
157
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
157
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
158
+
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
158
159
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
159
160
"endOffset": "A String", # Optional. The end offset of the video.
160
161
"startOffset": "A String", # Optional. The start offset of the video.
@@ -238,7 +239,8 @@ <h3>Method Details</h3>
238
239
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
239
240
},
240
241
"text": "A String", # Optional. Text part (can be code).
241
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
242
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
243
+
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
242
244
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
243
245
"endOffset": "A String", # Optional. The end offset of the video.
244
246
"startOffset": "A String", # Optional. The start offset of the video.
@@ -368,7 +370,8 @@ <h3>Method Details</h3>
368
370
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
369
371
},
370
372
"text": "A String", # Optional. Text part (can be code).
371
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
373
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
374
+
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
372
375
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
373
376
"endOffset": "A String", # Optional. The end offset of the video.
374
377
"startOffset": "A String", # Optional. The start offset of the video.
@@ -381,6 +384,9 @@ <h3>Method Details</h3>
381
384
{ # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval).
382
385
"codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation.
383
386
},
387
+
"computerUse": { # Tool to support computer use. # Optional. Tool to support the model interacting directly with the computer. If enabled, it automatically populates computer-use specific Function Declarations.
388
+
"environment": "A String", # Required. The environment being operated.
389
+
},
384
390
"enterpriseWebSearch": { # Tool to search public web data, powered by Vertex AI Search and Sec4 compliance. # Optional. Tool to support searching public web data, powered by Vertex AI Search and Sec4 compliance.
385
391
},
386
392
"functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided.
@@ -560,6 +566,8 @@ <h3>Method Details</h3>
560
566
"vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold.
561
567
},
562
568
},
569
+
"urlContext": { # Tool to support URL context. # Optional. Tool to support URL context retrieval.
570
+
},
563
571
},
564
572
],
565
573
}
@@ -672,7 +680,8 @@ <h3>Method Details</h3>
672
680
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
673
681
},
674
682
"text": "A String", # Optional. Text part (can be code).
675
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
683
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
684
+
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
676
685
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
677
686
"endOffset": "A String", # Optional. The end offset of the video.
678
687
"startOffset": "A String", # Optional. The start offset of the video.
@@ -808,7 +817,8 @@ <h3>Method Details</h3>
808
817
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
809
818
},
810
819
"text": "A String", # Optional. Text part (can be code).
811
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
820
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
821
+
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
812
822
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
813
823
"endOffset": "A String", # Optional. The end offset of the video.
814
824
"startOffset": "A String", # Optional. The start offset of the video.
@@ -836,6 +846,9 @@ <h3>Method Details</h3>
836
846
{ # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval).
837
847
"codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation.
838
848
},
849
+
"computerUse": { # Tool to support computer use. # Optional. Tool to support the model interacting directly with the computer. If enabled, it automatically populates computer-use specific Function Declarations.
850
+
"environment": "A String", # Required. The environment being operated.
851
+
},
839
852
"enterpriseWebSearch": { # Tool to search public web data, powered by Vertex AI Search and Sec4 compliance. # Optional. Tool to support searching public web data, powered by Vertex AI Search and Sec4 compliance.
840
853
},
841
854
"functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided.
@@ -1015,6 +1028,8 @@ <h3>Method Details</h3>
1015
1028
"vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold.
1016
1029
},
1017
1030
},
1031
+
"urlContext": { # Tool to support URL context. # Optional. Tool to support URL context retrieval.
1032
+
},
1018
1033
},
1019
1034
],
1020
1035
}
@@ -1081,7 +1096,8 @@ <h3>Method Details</h3>
1081
1096
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1082
1097
},
1083
1098
"text": "A String", # Optional. Text part (can be code).
1084
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
1099
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
1100
+
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
1085
1101
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
1086
1102
"endOffset": "A String", # Optional. The end offset of the video.
1087
1103
"startOffset": "A String", # Optional. The start offset of the video.
@@ -1354,7 +1370,8 @@ <h3>Method Details</h3>
1354
1370
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1355
1371
},
1356
1372
"text": "A String", # Optional. Text part (can be code).
1357
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
1373
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
1374
+
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
1358
1375
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
1359
1376
"endOffset": "A String", # Optional. The end offset of the video.
1360
1377
"startOffset": "A String", # Optional. The start offset of the video.
@@ -1490,7 +1507,8 @@ <h3>Method Details</h3>
1490
1507
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1491
1508
},
1492
1509
"text": "A String", # Optional. Text part (can be code).
1493
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
1510
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
1511
+
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
1494
1512
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
1495
1513
"endOffset": "A String", # Optional. The end offset of the video.
1496
1514
"startOffset": "A String", # Optional. The start offset of the video.
@@ -1518,6 +1536,9 @@ <h3>Method Details</h3>
1518
1536
{ # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval).
1519
1537
"codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation.
1520
1538
},
1539
+
"computerUse": { # Tool to support computer use. # Optional. Tool to support the model interacting directly with the computer. If enabled, it automatically populates computer-use specific Function Declarations.
1540
+
"environment": "A String", # Required. The environment being operated.
1541
+
},
1521
1542
"enterpriseWebSearch": { # Tool to search public web data, powered by Vertex AI Search and Sec4 compliance. # Optional. Tool to support searching public web data, powered by Vertex AI Search and Sec4 compliance.
1522
1543
},
1523
1544
"functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 128 function declarations can be provided.
@@ -1697,6 +1718,8 @@ <h3>Method Details</h3>
1697
1718
"vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold.
1698
1719
},
1699
1720
},
1721
+
"urlContext": { # Tool to support URL context. # Optional. Tool to support URL context retrieval.
1722
+
},
1700
1723
},
1701
1724
],
1702
1725
}
@@ -1763,7 +1786,8 @@ <h3>Method Details</h3>
1763
1786
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1764
1787
},
1765
1788
"text": "A String", # Optional. Text part (can be code).
1766
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
1789
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
1790
+
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
1767
1791
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
1768
1792
"endOffset": "A String", # Optional. The end offset of the video.
1769
1793
"startOffset": "A String", # Optional. The start offset of the video.
0 commit comments