mirror of https://github.com/databricks/cli.git
Update Go SDK to v0.41.0 (#1445)
## Changes Release notes at https://github.com/databricks/databricks-sdk-go/releases/tag/v0.41.0. ## Tests n/a
This commit is contained in:
parent
c5032644a0
commit
63ceede335
|
@ -1 +1 @@
|
||||||
9bb7950fa3390afb97abaa552934bc0a2e069de5
|
7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92
|
|
@ -62,7 +62,6 @@ cmd/workspace/instance-pools/instance-pools.go linguist-generated=true
|
||||||
cmd/workspace/instance-profiles/instance-profiles.go linguist-generated=true
|
cmd/workspace/instance-profiles/instance-profiles.go linguist-generated=true
|
||||||
cmd/workspace/ip-access-lists/ip-access-lists.go linguist-generated=true
|
cmd/workspace/ip-access-lists/ip-access-lists.go linguist-generated=true
|
||||||
cmd/workspace/jobs/jobs.go linguist-generated=true
|
cmd/workspace/jobs/jobs.go linguist-generated=true
|
||||||
cmd/workspace/lakehouse-monitors/lakehouse-monitors.go linguist-generated=true
|
|
||||||
cmd/workspace/lakeview/lakeview.go linguist-generated=true
|
cmd/workspace/lakeview/lakeview.go linguist-generated=true
|
||||||
cmd/workspace/libraries/libraries.go linguist-generated=true
|
cmd/workspace/libraries/libraries.go linguist-generated=true
|
||||||
cmd/workspace/metastores/metastores.go linguist-generated=true
|
cmd/workspace/metastores/metastores.go linguist-generated=true
|
||||||
|
@ -81,6 +80,7 @@ cmd/workspace/provider-personalization-requests/provider-personalization-request
|
||||||
cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true
|
cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true
|
||||||
cmd/workspace/provider-providers/provider-providers.go linguist-generated=true
|
cmd/workspace/provider-providers/provider-providers.go linguist-generated=true
|
||||||
cmd/workspace/providers/providers.go linguist-generated=true
|
cmd/workspace/providers/providers.go linguist-generated=true
|
||||||
|
cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true
|
||||||
cmd/workspace/queries/queries.go linguist-generated=true
|
cmd/workspace/queries/queries.go linguist-generated=true
|
||||||
cmd/workspace/query-history/query-history.go linguist-generated=true
|
cmd/workspace/query-history/query-history.go linguist-generated=true
|
||||||
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true
|
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true
|
||||||
|
|
|
@ -348,7 +348,7 @@
|
||||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"apply_policy_default_values": {
|
"apply_policy_default_values": {
|
||||||
"description": ""
|
"description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied."
|
||||||
},
|
},
|
||||||
"autoscale": {
|
"autoscale": {
|
||||||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||||
|
@ -424,14 +424,6 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clone_from": {
|
|
||||||
"description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.",
|
|
||||||
"properties": {
|
|
||||||
"source_cluster_id": {
|
|
||||||
"description": "The cluster that is being cloned."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cluster_log_conf": {
|
"cluster_log_conf": {
|
||||||
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -474,9 +466,6 @@
|
||||||
"cluster_name": {
|
"cluster_name": {
|
||||||
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
|
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
|
||||||
},
|
},
|
||||||
"cluster_source": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"custom_tags": {
|
"custom_tags": {
|
||||||
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
|
@ -975,7 +964,7 @@
|
||||||
"description": "If new_cluster, a description of a new cluster that is created for each run.",
|
"description": "If new_cluster, a description of a new cluster that is created for each run.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"apply_policy_default_values": {
|
"apply_policy_default_values": {
|
||||||
"description": ""
|
"description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied."
|
||||||
},
|
},
|
||||||
"autoscale": {
|
"autoscale": {
|
||||||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||||
|
@ -1051,14 +1040,6 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clone_from": {
|
|
||||||
"description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.",
|
|
||||||
"properties": {
|
|
||||||
"source_cluster_id": {
|
|
||||||
"description": "The cluster that is being cloned."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cluster_log_conf": {
|
"cluster_log_conf": {
|
||||||
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -1101,9 +1082,6 @@
|
||||||
"cluster_name": {
|
"cluster_name": {
|
||||||
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
|
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
|
||||||
},
|
},
|
||||||
"cluster_source": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"custom_tags": {
|
"custom_tags": {
|
||||||
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
|
@ -1419,7 +1397,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"python_named_params": {
|
"python_named_params": {
|
||||||
"description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.",
|
"description": "",
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -1853,6 +1831,15 @@
|
||||||
"openai_config": {
|
"openai_config": {
|
||||||
"description": "OpenAI Config. Only required if the provider is 'openai'.",
|
"description": "OpenAI Config. Only required if the provider is 'openai'.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"microsoft_entra_client_id": {
|
||||||
|
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n"
|
||||||
|
},
|
||||||
|
"microsoft_entra_client_secret": {
|
||||||
|
"description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n"
|
||||||
|
},
|
||||||
|
"microsoft_entra_tenant_id": {
|
||||||
|
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n"
|
||||||
|
},
|
||||||
"openai_api_base": {
|
"openai_api_base": {
|
||||||
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
|
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
|
||||||
},
|
},
|
||||||
|
@ -2009,6 +1996,9 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"route_optimized": {
|
||||||
|
"description": "Enable route optimization for the serving endpoint."
|
||||||
|
},
|
||||||
"tags": {
|
"tags": {
|
||||||
"description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.",
|
"description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.",
|
||||||
"items": {
|
"items": {
|
||||||
|
@ -2469,6 +2459,23 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"gateway_definition": {
|
||||||
|
"description": "The definition of a gateway pipeline to support CDC.",
|
||||||
|
"properties": {
|
||||||
|
"connection_id": {
|
||||||
|
"description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source."
|
||||||
|
},
|
||||||
|
"gateway_storage_catalog": {
|
||||||
|
"description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location."
|
||||||
|
},
|
||||||
|
"gateway_storage_name": {
|
||||||
|
"description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n"
|
||||||
|
},
|
||||||
|
"gateway_storage_schema": {
|
||||||
|
"description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"id": {
|
"id": {
|
||||||
"description": "Unique identifier for this pipeline."
|
"description": "Unique identifier for this pipeline."
|
||||||
},
|
},
|
||||||
|
@ -2500,6 +2507,23 @@
|
||||||
},
|
},
|
||||||
"source_schema": {
|
"source_schema": {
|
||||||
"description": "Required. Schema name in the source database."
|
"description": "Required. Schema name in the source database."
|
||||||
|
},
|
||||||
|
"table_configuration": {
|
||||||
|
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.",
|
||||||
|
"properties": {
|
||||||
|
"primary_keys": {
|
||||||
|
"description": "The primary key of the table used to apply changes.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"salesforce_include_formula_fields": {
|
||||||
|
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
|
||||||
|
},
|
||||||
|
"scd_type": {
|
||||||
|
"description": "The SCD type to use to ingest the table."
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -2523,11 +2547,45 @@
|
||||||
},
|
},
|
||||||
"source_table": {
|
"source_table": {
|
||||||
"description": "Required. Table name in the source database."
|
"description": "Required. Table name in the source database."
|
||||||
|
},
|
||||||
|
"table_configuration": {
|
||||||
|
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.",
|
||||||
|
"properties": {
|
||||||
|
"primary_keys": {
|
||||||
|
"description": "The primary key of the table used to apply changes.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"salesforce_include_formula_fields": {
|
||||||
|
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
|
||||||
|
},
|
||||||
|
"scd_type": {
|
||||||
|
"description": "The SCD type to use to ingest the table."
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"table_configuration": {
|
||||||
|
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.",
|
||||||
|
"properties": {
|
||||||
|
"primary_keys": {
|
||||||
|
"description": "The primary key of the table used to apply changes.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"salesforce_include_formula_fields": {
|
||||||
|
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
|
||||||
|
},
|
||||||
|
"scd_type": {
|
||||||
|
"description": "The SCD type to use to ingest the table."
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -3071,7 +3129,7 @@
|
||||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"apply_policy_default_values": {
|
"apply_policy_default_values": {
|
||||||
"description": ""
|
"description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied."
|
||||||
},
|
},
|
||||||
"autoscale": {
|
"autoscale": {
|
||||||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||||
|
@ -3147,14 +3205,6 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clone_from": {
|
|
||||||
"description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.",
|
|
||||||
"properties": {
|
|
||||||
"source_cluster_id": {
|
|
||||||
"description": "The cluster that is being cloned."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cluster_log_conf": {
|
"cluster_log_conf": {
|
||||||
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -3197,9 +3247,6 @@
|
||||||
"cluster_name": {
|
"cluster_name": {
|
||||||
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
|
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
|
||||||
},
|
},
|
||||||
"cluster_source": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"custom_tags": {
|
"custom_tags": {
|
||||||
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
|
@ -3698,7 +3745,7 @@
|
||||||
"description": "If new_cluster, a description of a new cluster that is created for each run.",
|
"description": "If new_cluster, a description of a new cluster that is created for each run.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"apply_policy_default_values": {
|
"apply_policy_default_values": {
|
||||||
"description": ""
|
"description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied."
|
||||||
},
|
},
|
||||||
"autoscale": {
|
"autoscale": {
|
||||||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||||
|
@ -3774,14 +3821,6 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clone_from": {
|
|
||||||
"description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.",
|
|
||||||
"properties": {
|
|
||||||
"source_cluster_id": {
|
|
||||||
"description": "The cluster that is being cloned."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cluster_log_conf": {
|
"cluster_log_conf": {
|
||||||
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -3824,9 +3863,6 @@
|
||||||
"cluster_name": {
|
"cluster_name": {
|
||||||
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
|
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
|
||||||
},
|
},
|
||||||
"cluster_source": {
|
|
||||||
"description": ""
|
|
||||||
},
|
|
||||||
"custom_tags": {
|
"custom_tags": {
|
||||||
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
|
@ -4142,7 +4178,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"python_named_params": {
|
"python_named_params": {
|
||||||
"description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.",
|
"description": "",
|
||||||
"additionalproperties": {
|
"additionalproperties": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -4576,6 +4612,15 @@
|
||||||
"openai_config": {
|
"openai_config": {
|
||||||
"description": "OpenAI Config. Only required if the provider is 'openai'.",
|
"description": "OpenAI Config. Only required if the provider is 'openai'.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"microsoft_entra_client_id": {
|
||||||
|
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n"
|
||||||
|
},
|
||||||
|
"microsoft_entra_client_secret": {
|
||||||
|
"description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n"
|
||||||
|
},
|
||||||
|
"microsoft_entra_tenant_id": {
|
||||||
|
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n"
|
||||||
|
},
|
||||||
"openai_api_base": {
|
"openai_api_base": {
|
||||||
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
|
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
|
||||||
},
|
},
|
||||||
|
@ -4732,6 +4777,9 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"route_optimized": {
|
||||||
|
"description": "Enable route optimization for the serving endpoint."
|
||||||
|
},
|
||||||
"tags": {
|
"tags": {
|
||||||
"description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.",
|
"description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.",
|
||||||
"items": {
|
"items": {
|
||||||
|
@ -5192,6 +5240,23 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"gateway_definition": {
|
||||||
|
"description": "The definition of a gateway pipeline to support CDC.",
|
||||||
|
"properties": {
|
||||||
|
"connection_id": {
|
||||||
|
"description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source."
|
||||||
|
},
|
||||||
|
"gateway_storage_catalog": {
|
||||||
|
"description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location."
|
||||||
|
},
|
||||||
|
"gateway_storage_name": {
|
||||||
|
"description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n"
|
||||||
|
},
|
||||||
|
"gateway_storage_schema": {
|
||||||
|
"description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"id": {
|
"id": {
|
||||||
"description": "Unique identifier for this pipeline."
|
"description": "Unique identifier for this pipeline."
|
||||||
},
|
},
|
||||||
|
@ -5223,6 +5288,23 @@
|
||||||
},
|
},
|
||||||
"source_schema": {
|
"source_schema": {
|
||||||
"description": "Required. Schema name in the source database."
|
"description": "Required. Schema name in the source database."
|
||||||
|
},
|
||||||
|
"table_configuration": {
|
||||||
|
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.",
|
||||||
|
"properties": {
|
||||||
|
"primary_keys": {
|
||||||
|
"description": "The primary key of the table used to apply changes.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"salesforce_include_formula_fields": {
|
||||||
|
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
|
||||||
|
},
|
||||||
|
"scd_type": {
|
||||||
|
"description": "The SCD type to use to ingest the table."
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -5246,11 +5328,45 @@
|
||||||
},
|
},
|
||||||
"source_table": {
|
"source_table": {
|
||||||
"description": "Required. Table name in the source database."
|
"description": "Required. Table name in the source database."
|
||||||
|
},
|
||||||
|
"table_configuration": {
|
||||||
|
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.",
|
||||||
|
"properties": {
|
||||||
|
"primary_keys": {
|
||||||
|
"description": "The primary key of the table used to apply changes.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"salesforce_include_formula_fields": {
|
||||||
|
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
|
||||||
|
},
|
||||||
|
"scd_type": {
|
||||||
|
"description": "The SCD type to use to ingest the table."
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"table_configuration": {
|
||||||
|
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.",
|
||||||
|
"properties": {
|
||||||
|
"primary_keys": {
|
||||||
|
"description": "The primary key of the table used to apply changes.",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"salesforce_include_formula_fields": {
|
||||||
|
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
|
||||||
|
},
|
||||||
|
"scd_type": {
|
||||||
|
"description": "The SCD type to use to ingest the table."
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -25,9 +25,6 @@ func New() *cobra.Command {
|
||||||
setting is disabled for new workspaces. After workspace creation, account
|
setting is disabled for new workspaces. After workspace creation, account
|
||||||
admins can enable enhanced security monitoring individually for each
|
admins can enable enhanced security monitoring individually for each
|
||||||
workspace.`,
|
workspace.`,
|
||||||
|
|
||||||
// This service is being previewed; hide from help output.
|
|
||||||
Hidden: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add methods
|
// Add methods
|
||||||
|
|
|
@ -22,9 +22,6 @@ func New() *cobra.Command {
|
||||||
Short: `Controls whether automatic cluster update is enabled for the current workspace.`,
|
Short: `Controls whether automatic cluster update is enabled for the current workspace.`,
|
||||||
Long: `Controls whether automatic cluster update is enabled for the current
|
Long: `Controls whether automatic cluster update is enabled for the current
|
||||||
workspace. By default, it is turned off.`,
|
workspace. By default, it is turned off.`,
|
||||||
|
|
||||||
// This service is being previewed; hide from help output.
|
|
||||||
Hidden: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add methods
|
// Add methods
|
||||||
|
|
|
@ -188,7 +188,7 @@ func newCreate() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, ``)
|
cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, `When set to true, fixed and default values from the policy will be used for fields that are omitted.`)
|
||||||
// TODO: complex arg: autoscale
|
// TODO: complex arg: autoscale
|
||||||
cmd.Flags().IntVar(&createReq.AutoterminationMinutes, "autotermination-minutes", createReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`)
|
cmd.Flags().IntVar(&createReq.AutoterminationMinutes, "autotermination-minutes", createReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`)
|
||||||
// TODO: complex arg: aws_attributes
|
// TODO: complex arg: aws_attributes
|
||||||
|
@ -196,15 +196,6 @@ func newCreate() *cobra.Command {
|
||||||
// TODO: complex arg: clone_from
|
// TODO: complex arg: clone_from
|
||||||
// TODO: complex arg: cluster_log_conf
|
// TODO: complex arg: cluster_log_conf
|
||||||
cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`)
|
cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`)
|
||||||
cmd.Flags().Var(&createReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [
|
|
||||||
API,
|
|
||||||
JOB,
|
|
||||||
MODELS,
|
|
||||||
PIPELINE,
|
|
||||||
PIPELINE_MAINTENANCE,
|
|
||||||
SQL,
|
|
||||||
UI,
|
|
||||||
]`)
|
|
||||||
// TODO: map via StringToStringVar: custom_tags
|
// TODO: map via StringToStringVar: custom_tags
|
||||||
cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [
|
cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [
|
||||||
LEGACY_PASSTHROUGH,
|
LEGACY_PASSTHROUGH,
|
||||||
|
@ -443,23 +434,13 @@ func newEdit() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
cmd.Flags().Var(&editJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&editJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, ``)
|
cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, `When set to true, fixed and default values from the policy will be used for fields that are omitted.`)
|
||||||
// TODO: complex arg: autoscale
|
// TODO: complex arg: autoscale
|
||||||
cmd.Flags().IntVar(&editReq.AutoterminationMinutes, "autotermination-minutes", editReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`)
|
cmd.Flags().IntVar(&editReq.AutoterminationMinutes, "autotermination-minutes", editReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`)
|
||||||
// TODO: complex arg: aws_attributes
|
// TODO: complex arg: aws_attributes
|
||||||
// TODO: complex arg: azure_attributes
|
// TODO: complex arg: azure_attributes
|
||||||
// TODO: complex arg: clone_from
|
|
||||||
// TODO: complex arg: cluster_log_conf
|
// TODO: complex arg: cluster_log_conf
|
||||||
cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`)
|
cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`)
|
||||||
cmd.Flags().Var(&editReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [
|
|
||||||
API,
|
|
||||||
JOB,
|
|
||||||
MODELS,
|
|
||||||
PIPELINE,
|
|
||||||
PIPELINE_MAINTENANCE,
|
|
||||||
SQL,
|
|
||||||
UI,
|
|
||||||
]`)
|
|
||||||
// TODO: map via StringToStringVar: custom_tags
|
// TODO: map via StringToStringVar: custom_tags
|
||||||
cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [
|
cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [
|
||||||
LEGACY_PASSTHROUGH,
|
LEGACY_PASSTHROUGH,
|
||||||
|
|
|
@ -32,7 +32,6 @@ import (
|
||||||
instance_profiles "github.com/databricks/cli/cmd/workspace/instance-profiles"
|
instance_profiles "github.com/databricks/cli/cmd/workspace/instance-profiles"
|
||||||
ip_access_lists "github.com/databricks/cli/cmd/workspace/ip-access-lists"
|
ip_access_lists "github.com/databricks/cli/cmd/workspace/ip-access-lists"
|
||||||
jobs "github.com/databricks/cli/cmd/workspace/jobs"
|
jobs "github.com/databricks/cli/cmd/workspace/jobs"
|
||||||
lakehouse_monitors "github.com/databricks/cli/cmd/workspace/lakehouse-monitors"
|
|
||||||
lakeview "github.com/databricks/cli/cmd/workspace/lakeview"
|
lakeview "github.com/databricks/cli/cmd/workspace/lakeview"
|
||||||
libraries "github.com/databricks/cli/cmd/workspace/libraries"
|
libraries "github.com/databricks/cli/cmd/workspace/libraries"
|
||||||
metastores "github.com/databricks/cli/cmd/workspace/metastores"
|
metastores "github.com/databricks/cli/cmd/workspace/metastores"
|
||||||
|
@ -51,6 +50,7 @@ import (
|
||||||
provider_provider_analytics_dashboards "github.com/databricks/cli/cmd/workspace/provider-provider-analytics-dashboards"
|
provider_provider_analytics_dashboards "github.com/databricks/cli/cmd/workspace/provider-provider-analytics-dashboards"
|
||||||
provider_providers "github.com/databricks/cli/cmd/workspace/provider-providers"
|
provider_providers "github.com/databricks/cli/cmd/workspace/provider-providers"
|
||||||
providers "github.com/databricks/cli/cmd/workspace/providers"
|
providers "github.com/databricks/cli/cmd/workspace/providers"
|
||||||
|
quality_monitors "github.com/databricks/cli/cmd/workspace/quality-monitors"
|
||||||
queries "github.com/databricks/cli/cmd/workspace/queries"
|
queries "github.com/databricks/cli/cmd/workspace/queries"
|
||||||
query_history "github.com/databricks/cli/cmd/workspace/query-history"
|
query_history "github.com/databricks/cli/cmd/workspace/query-history"
|
||||||
query_visualizations "github.com/databricks/cli/cmd/workspace/query-visualizations"
|
query_visualizations "github.com/databricks/cli/cmd/workspace/query-visualizations"
|
||||||
|
@ -113,7 +113,6 @@ func All() []*cobra.Command {
|
||||||
out = append(out, instance_profiles.New())
|
out = append(out, instance_profiles.New())
|
||||||
out = append(out, ip_access_lists.New())
|
out = append(out, ip_access_lists.New())
|
||||||
out = append(out, jobs.New())
|
out = append(out, jobs.New())
|
||||||
out = append(out, lakehouse_monitors.New())
|
|
||||||
out = append(out, lakeview.New())
|
out = append(out, lakeview.New())
|
||||||
out = append(out, libraries.New())
|
out = append(out, libraries.New())
|
||||||
out = append(out, metastores.New())
|
out = append(out, metastores.New())
|
||||||
|
@ -132,6 +131,7 @@ func All() []*cobra.Command {
|
||||||
out = append(out, provider_provider_analytics_dashboards.New())
|
out = append(out, provider_provider_analytics_dashboards.New())
|
||||||
out = append(out, provider_providers.New())
|
out = append(out, provider_providers.New())
|
||||||
out = append(out, providers.New())
|
out = append(out, providers.New())
|
||||||
|
out = append(out, quality_monitors.New())
|
||||||
out = append(out, queries.New())
|
out = append(out, queries.New())
|
||||||
out = append(out, query_history.New())
|
out = append(out, query_history.New())
|
||||||
out = append(out, query_visualizations.New())
|
out = append(out, query_visualizations.New())
|
||||||
|
|
|
@ -25,9 +25,6 @@ func New() *cobra.Command {
|
||||||
off.
|
off.
|
||||||
|
|
||||||
This settings can NOT be disabled once it is enabled.`,
|
This settings can NOT be disabled once it is enabled.`,
|
||||||
|
|
||||||
// This service is being previewed; hide from help output.
|
|
||||||
Hidden: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add methods
|
// Add methods
|
||||||
|
|
|
@ -154,7 +154,7 @@ func newDelete() *cobra.Command {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
promptSpinner := cmdio.Spinner(ctx)
|
promptSpinner := cmdio.Spinner(ctx)
|
||||||
promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down."
|
promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down."
|
||||||
names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx)
|
names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx, catalog.ListConnectionsRequest{})
|
||||||
close(promptSpinner)
|
close(promptSpinner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to load names for Connections drop-down. Please manually specify required arguments. Original error: %w", err)
|
return fmt.Errorf("failed to load names for Connections drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||||
|
@ -224,7 +224,7 @@ func newGet() *cobra.Command {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
promptSpinner := cmdio.Spinner(ctx)
|
promptSpinner := cmdio.Spinner(ctx)
|
||||||
promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down."
|
promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down."
|
||||||
names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx)
|
names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx, catalog.ListConnectionsRequest{})
|
||||||
close(promptSpinner)
|
close(promptSpinner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to load names for Connections drop-down. Please manually specify required arguments. Original error: %w", err)
|
return fmt.Errorf("failed to load names for Connections drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||||
|
@ -265,11 +265,19 @@ func newGet() *cobra.Command {
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
var listOverrides []func(
|
var listOverrides []func(
|
||||||
*cobra.Command,
|
*cobra.Command,
|
||||||
|
*catalog.ListConnectionsRequest,
|
||||||
)
|
)
|
||||||
|
|
||||||
func newList() *cobra.Command {
|
func newList() *cobra.Command {
|
||||||
cmd := &cobra.Command{}
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var listReq catalog.ListConnectionsRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of connections to return.`)
|
||||||
|
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
|
||||||
|
|
||||||
cmd.Use = "list"
|
cmd.Use = "list"
|
||||||
cmd.Short = `List connections.`
|
cmd.Short = `List connections.`
|
||||||
cmd.Long = `List connections.
|
cmd.Long = `List connections.
|
||||||
|
@ -278,11 +286,17 @@ func newList() *cobra.Command {
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(0)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
w := root.WorkspaceClient(ctx)
|
w := root.WorkspaceClient(ctx)
|
||||||
response := w.Connections.List(ctx)
|
|
||||||
|
response := w.Connections.List(ctx, listReq)
|
||||||
return cmdio.RenderIterator(ctx, response)
|
return cmdio.RenderIterator(ctx, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,7 +306,7 @@ func newList() *cobra.Command {
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
// Apply optional overrides to this command.
|
||||||
for _, fn := range listOverrides {
|
for _, fn := range listOverrides {
|
||||||
fn(cmd)
|
fn(cmd, &listReq)
|
||||||
}
|
}
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
|
@ -129,13 +129,14 @@ func newList() *cobra.Command {
|
||||||
|
|
||||||
// TODO: array: assets
|
// TODO: array: assets
|
||||||
// TODO: array: categories
|
// TODO: array: categories
|
||||||
|
cmd.Flags().BoolVar(&listReq.IsAscending, "is-ascending", listReq.IsAscending, ``)
|
||||||
cmd.Flags().BoolVar(&listReq.IsFree, "is-free", listReq.IsFree, `Filters each listing based on if it is free.`)
|
cmd.Flags().BoolVar(&listReq.IsFree, "is-free", listReq.IsFree, `Filters each listing based on if it is free.`)
|
||||||
cmd.Flags().BoolVar(&listReq.IsPrivateExchange, "is-private-exchange", listReq.IsPrivateExchange, `Filters each listing based on if it is a private exchange.`)
|
cmd.Flags().BoolVar(&listReq.IsPrivateExchange, "is-private-exchange", listReq.IsPrivateExchange, `Filters each listing based on if it is a private exchange.`)
|
||||||
cmd.Flags().BoolVar(&listReq.IsStaffPick, "is-staff-pick", listReq.IsStaffPick, `Filters each listing based on whether it is a staff pick.`)
|
cmd.Flags().BoolVar(&listReq.IsStaffPick, "is-staff-pick", listReq.IsStaffPick, `Filters each listing based on whether it is a staff pick.`)
|
||||||
cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``)
|
cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``)
|
||||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``)
|
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``)
|
||||||
// TODO: array: provider_ids
|
// TODO: array: provider_ids
|
||||||
// TODO: complex arg: sort_by_spec
|
cmd.Flags().Var(&listReq.SortBy, "sort-by", `Criteria for sorting the resulting set of listings. Supported values: [SORT_BY_DATE, SORT_BY_RELEVANCE, SORT_BY_TITLE, SORT_BY_UNSPECIFIED]`)
|
||||||
// TODO: array: tags
|
// TODO: array: tags
|
||||||
|
|
||||||
cmd.Use = "list"
|
cmd.Use = "list"
|
||||||
|
@ -191,6 +192,7 @@ func newSearch() *cobra.Command {
|
||||||
|
|
||||||
// TODO: array: assets
|
// TODO: array: assets
|
||||||
// TODO: array: categories
|
// TODO: array: categories
|
||||||
|
cmd.Flags().BoolVar(&searchReq.IsAscending, "is-ascending", searchReq.IsAscending, ``)
|
||||||
cmd.Flags().BoolVar(&searchReq.IsFree, "is-free", searchReq.IsFree, ``)
|
cmd.Flags().BoolVar(&searchReq.IsFree, "is-free", searchReq.IsFree, ``)
|
||||||
cmd.Flags().BoolVar(&searchReq.IsPrivateExchange, "is-private-exchange", searchReq.IsPrivateExchange, ``)
|
cmd.Flags().BoolVar(&searchReq.IsPrivateExchange, "is-private-exchange", searchReq.IsPrivateExchange, ``)
|
||||||
cmd.Flags().IntVar(&searchReq.PageSize, "page-size", searchReq.PageSize, ``)
|
cmd.Flags().IntVar(&searchReq.PageSize, "page-size", searchReq.PageSize, ``)
|
||||||
|
|
|
@ -27,9 +27,6 @@ func New() *cobra.Command {
|
||||||
|
|
||||||
If the compliance security profile is disabled, you can enable or disable this
|
If the compliance security profile is disabled, you can enable or disable this
|
||||||
setting and it is not permanent.`,
|
setting and it is not permanent.`,
|
||||||
|
|
||||||
// This service is being previewed; hide from help output.
|
|
||||||
Hidden: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add methods
|
// Add methods
|
||||||
|
|
|
@ -80,11 +80,8 @@ func newAllClusterStatuses() *cobra.Command {
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
w := root.WorkspaceClient(ctx)
|
w := root.WorkspaceClient(ctx)
|
||||||
response, err := w.Libraries.AllClusterStatuses(ctx)
|
response := w.Libraries.AllClusterStatuses(ctx)
|
||||||
if err != nil {
|
return cmdio.RenderIterator(ctx, response)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return cmdio.Render(ctx, response)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
// Disable completions since they are not applicable.
|
||||||
|
|
|
@ -945,6 +945,7 @@ func newUpdate() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&updateReq.Edition, "edition", updateReq.Edition, `Pipeline product edition.`)
|
cmd.Flags().StringVar(&updateReq.Edition, "edition", updateReq.Edition, `Pipeline product edition.`)
|
||||||
cmd.Flags().Int64Var(&updateReq.ExpectedLastModified, "expected-last-modified", updateReq.ExpectedLastModified, `If present, the last-modified time of the pipeline settings before the edit.`)
|
cmd.Flags().Int64Var(&updateReq.ExpectedLastModified, "expected-last-modified", updateReq.ExpectedLastModified, `If present, the last-modified time of the pipeline settings before the edit.`)
|
||||||
// TODO: complex arg: filters
|
// TODO: complex arg: filters
|
||||||
|
// TODO: complex arg: gateway_definition
|
||||||
cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Unique identifier for this pipeline.`)
|
cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Unique identifier for this pipeline.`)
|
||||||
// TODO: complex arg: ingestion_definition
|
// TODO: complex arg: ingestion_definition
|
||||||
// TODO: array: libraries
|
// TODO: array: libraries
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||||
|
|
||||||
package lakehouse_monitors
|
package quality_monitors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -18,7 +18,7 @@ var cmdOverrides []func(*cobra.Command)
|
||||||
|
|
||||||
func New() *cobra.Command {
|
func New() *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "lakehouse-monitors",
|
Use: "quality-monitors",
|
||||||
Short: `A monitor computes and monitors data or model quality metrics for a table over time.`,
|
Short: `A monitor computes and monitors data or model quality metrics for a table over time.`,
|
||||||
Long: `A monitor computes and monitors data or model quality metrics for a table over
|
Long: `A monitor computes and monitors data or model quality metrics for a table over
|
||||||
time. It generates metrics tables and a dashboard that you can use to monitor
|
time. It generates metrics tables and a dashboard that you can use to monitor
|
||||||
|
@ -105,7 +105,7 @@ func newCancelRefresh() *cobra.Command {
|
||||||
cancelRefreshReq.TableName = args[0]
|
cancelRefreshReq.TableName = args[0]
|
||||||
cancelRefreshReq.RefreshId = args[1]
|
cancelRefreshReq.RefreshId = args[1]
|
||||||
|
|
||||||
err = w.LakehouseMonitors.CancelRefresh(ctx, cancelRefreshReq)
|
err = w.QualityMonitors.CancelRefresh(ctx, cancelRefreshReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -208,7 +208,7 @@ func newCreate() *cobra.Command {
|
||||||
createReq.OutputSchemaName = args[2]
|
createReq.OutputSchemaName = args[2]
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := w.LakehouseMonitors.Create(ctx, createReq)
|
response, err := w.QualityMonitors.Create(ctx, createReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -233,13 +233,13 @@ func newCreate() *cobra.Command {
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
var deleteOverrides []func(
|
var deleteOverrides []func(
|
||||||
*cobra.Command,
|
*cobra.Command,
|
||||||
*catalog.DeleteLakehouseMonitorRequest,
|
*catalog.DeleteQualityMonitorRequest,
|
||||||
)
|
)
|
||||||
|
|
||||||
func newDelete() *cobra.Command {
|
func newDelete() *cobra.Command {
|
||||||
cmd := &cobra.Command{}
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
var deleteReq catalog.DeleteLakehouseMonitorRequest
|
var deleteReq catalog.DeleteQualityMonitorRequest
|
||||||
|
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
|
@ -278,7 +278,7 @@ func newDelete() *cobra.Command {
|
||||||
|
|
||||||
deleteReq.TableName = args[0]
|
deleteReq.TableName = args[0]
|
||||||
|
|
||||||
err = w.LakehouseMonitors.Delete(ctx, deleteReq)
|
err = w.QualityMonitors.Delete(ctx, deleteReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -303,13 +303,13 @@ func newDelete() *cobra.Command {
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
var getOverrides []func(
|
var getOverrides []func(
|
||||||
*cobra.Command,
|
*cobra.Command,
|
||||||
*catalog.GetLakehouseMonitorRequest,
|
*catalog.GetQualityMonitorRequest,
|
||||||
)
|
)
|
||||||
|
|
||||||
func newGet() *cobra.Command {
|
func newGet() *cobra.Command {
|
||||||
cmd := &cobra.Command{}
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
var getReq catalog.GetLakehouseMonitorRequest
|
var getReq catalog.GetQualityMonitorRequest
|
||||||
|
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
|
@ -347,7 +347,7 @@ func newGet() *cobra.Command {
|
||||||
|
|
||||||
getReq.TableName = args[0]
|
getReq.TableName = args[0]
|
||||||
|
|
||||||
response, err := w.LakehouseMonitors.Get(ctx, getReq)
|
response, err := w.QualityMonitors.Get(ctx, getReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -416,7 +416,7 @@ func newGetRefresh() *cobra.Command {
|
||||||
getRefreshReq.TableName = args[0]
|
getRefreshReq.TableName = args[0]
|
||||||
getRefreshReq.RefreshId = args[1]
|
getRefreshReq.RefreshId = args[1]
|
||||||
|
|
||||||
response, err := w.LakehouseMonitors.GetRefresh(ctx, getRefreshReq)
|
response, err := w.QualityMonitors.GetRefresh(ctx, getRefreshReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -484,7 +484,7 @@ func newListRefreshes() *cobra.Command {
|
||||||
|
|
||||||
listRefreshesReq.TableName = args[0]
|
listRefreshesReq.TableName = args[0]
|
||||||
|
|
||||||
response, err := w.LakehouseMonitors.ListRefreshes(ctx, listRefreshesReq)
|
response, err := w.QualityMonitors.ListRefreshes(ctx, listRefreshesReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -552,7 +552,7 @@ func newRunRefresh() *cobra.Command {
|
||||||
|
|
||||||
runRefreshReq.TableName = args[0]
|
runRefreshReq.TableName = args[0]
|
||||||
|
|
||||||
response, err := w.LakehouseMonitors.RunRefresh(ctx, runRefreshReq)
|
response, err := w.QualityMonitors.RunRefresh(ctx, runRefreshReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -591,6 +591,7 @@ func newUpdate() *cobra.Command {
|
||||||
|
|
||||||
cmd.Flags().StringVar(&updateReq.BaselineTableName, "baseline-table-name", updateReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`)
|
cmd.Flags().StringVar(&updateReq.BaselineTableName, "baseline-table-name", updateReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`)
|
||||||
// TODO: array: custom_metrics
|
// TODO: array: custom_metrics
|
||||||
|
cmd.Flags().StringVar(&updateReq.DashboardId, "dashboard-id", updateReq.DashboardId, `Id of dashboard that visualizes the computed metrics.`)
|
||||||
// TODO: complex arg: data_classification_config
|
// TODO: complex arg: data_classification_config
|
||||||
// TODO: complex arg: inference_log
|
// TODO: complex arg: inference_log
|
||||||
// TODO: complex arg: notifications
|
// TODO: complex arg: notifications
|
||||||
|
@ -651,7 +652,7 @@ func newUpdate() *cobra.Command {
|
||||||
updateReq.OutputSchemaName = args[1]
|
updateReq.OutputSchemaName = args[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := w.LakehouseMonitors.Update(ctx, updateReq)
|
response, err := w.QualityMonitors.Update(ctx, updateReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -670,4 +671,4 @@ func newUpdate() *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
// end service LakehouseMonitors
|
// end service QualityMonitors
|
|
@ -152,6 +152,7 @@ func newCreate() *cobra.Command {
|
||||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
// TODO: array: rate_limits
|
// TODO: array: rate_limits
|
||||||
|
cmd.Flags().BoolVar(&createReq.RouteOptimized, "route-optimized", createReq.RouteOptimized, `Enable route optimization for the serving endpoint.`)
|
||||||
// TODO: array: tags
|
// TODO: array: tags
|
||||||
|
|
||||||
cmd.Use = "create"
|
cmd.Use = "create"
|
||||||
|
@ -303,11 +304,12 @@ func newExportMetrics() *cobra.Command {
|
||||||
|
|
||||||
exportMetricsReq.Name = args[0]
|
exportMetricsReq.Name = args[0]
|
||||||
|
|
||||||
err = w.ServingEndpoints.ExportMetrics(ctx, exportMetricsReq)
|
response, err := w.ServingEndpoints.ExportMetrics(ctx, exportMetricsReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
defer response.Contents.Close()
|
||||||
|
return cmdio.Render(ctx, response.Contents)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
// Disable completions since they are not applicable.
|
||||||
|
|
|
@ -67,6 +67,7 @@ func newCreate() *cobra.Command {
|
||||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`)
|
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`)
|
||||||
|
cmd.Flags().StringVar(&createReq.StorageRoot, "storage-root", createReq.StorageRoot, `Storage root URL for the share.`)
|
||||||
|
|
||||||
cmd.Use = "create NAME"
|
cmd.Use = "create NAME"
|
||||||
cmd.Short = `Create a share.`
|
cmd.Short = `Create a share.`
|
||||||
|
@ -368,6 +369,7 @@ func newUpdate() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
||||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the share.`)
|
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the share.`)
|
||||||
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of share.`)
|
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of share.`)
|
||||||
|
cmd.Flags().StringVar(&updateReq.StorageRoot, "storage-root", updateReq.StorageRoot, `Storage root URL for the share.`)
|
||||||
// TODO: array: updates
|
// TODO: array: updates
|
||||||
|
|
||||||
cmd.Use = "update NAME"
|
cmd.Use = "update NAME"
|
||||||
|
@ -382,6 +384,9 @@ func newUpdate() *cobra.Command {
|
||||||
In the case that the share name is changed, **updateShare** requires that the
|
In the case that the share name is changed, **updateShare** requires that the
|
||||||
caller is both the share owner and a metastore admin.
|
caller is both the share owner and a metastore admin.
|
||||||
|
|
||||||
|
If there are notebook files in the share, the __storage_root__ field cannot be
|
||||||
|
updated.
|
||||||
|
|
||||||
For each table that is added through this method, the share owner must also
|
For each table that is added through this method, the share owner must also
|
||||||
have **SELECT** privilege on the table. This privilege must be maintained
|
have **SELECT** privilege on the table. This privilege must be maintained
|
||||||
indefinitely for recipients to be able to access the table. Typically, you
|
indefinitely for recipients to be able to access the table. Typically, you
|
||||||
|
|
|
@ -3,8 +3,6 @@
|
||||||
package system_schemas
|
package system_schemas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
|
@ -81,10 +79,7 @@ func newDisable() *cobra.Command {
|
||||||
w := root.WorkspaceClient(ctx)
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
disableReq.MetastoreId = args[0]
|
disableReq.MetastoreId = args[0]
|
||||||
_, err = fmt.Sscan(args[1], &disableReq.SchemaName)
|
disableReq.SchemaName = args[1]
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid SCHEMA_NAME: %s", args[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
err = w.SystemSchemas.Disable(ctx, disableReq)
|
err = w.SystemSchemas.Disable(ctx, disableReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -145,10 +140,7 @@ func newEnable() *cobra.Command {
|
||||||
w := root.WorkspaceClient(ctx)
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
enableReq.MetastoreId = args[0]
|
enableReq.MetastoreId = args[0]
|
||||||
_, err = fmt.Sscan(args[1], &enableReq.SchemaName)
|
enableReq.SchemaName = args[1]
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid SCHEMA_NAME: %s", args[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
err = w.SystemSchemas.Enable(ctx, enableReq)
|
err = w.SystemSchemas.Enable(ctx, enableReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -42,6 +42,7 @@ func New() *cobra.Command {
|
||||||
cmd.AddCommand(newGetIndex())
|
cmd.AddCommand(newGetIndex())
|
||||||
cmd.AddCommand(newListIndexes())
|
cmd.AddCommand(newListIndexes())
|
||||||
cmd.AddCommand(newQueryIndex())
|
cmd.AddCommand(newQueryIndex())
|
||||||
|
cmd.AddCommand(newScanIndex())
|
||||||
cmd.AddCommand(newSyncIndex())
|
cmd.AddCommand(newSyncIndex())
|
||||||
cmd.AddCommand(newUpsertDataVectorIndex())
|
cmd.AddCommand(newUpsertDataVectorIndex())
|
||||||
|
|
||||||
|
@ -468,6 +469,76 @@ func newQueryIndex() *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start scan-index command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var scanIndexOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*vectorsearch.ScanVectorIndexRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newScanIndex() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var scanIndexReq vectorsearch.ScanVectorIndexRequest
|
||||||
|
var scanIndexJson flags.JsonFlag
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
cmd.Flags().Var(&scanIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
cmd.Flags().StringVar(&scanIndexReq.LastPrimaryKey, "last-primary-key", scanIndexReq.LastPrimaryKey, `Primary key of the last entry returned in the previous scan.`)
|
||||||
|
cmd.Flags().IntVar(&scanIndexReq.NumResults, "num-results", scanIndexReq.NumResults, `Number of results to return.`)
|
||||||
|
|
||||||
|
cmd.Use = "scan-index INDEX_NAME"
|
||||||
|
cmd.Short = `Scan an index.`
|
||||||
|
cmd.Long = `Scan an index.
|
||||||
|
|
||||||
|
Scan the specified vector index and return the first num_results entries
|
||||||
|
after the exclusive primary_key.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
INDEX_NAME: Name of the vector index to scan.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = scanIndexJson.Unmarshal(&scanIndexReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scanIndexReq.IndexName = args[0]
|
||||||
|
|
||||||
|
response, err := w.VectorSearchIndexes.ScanIndex(ctx, scanIndexReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range scanIndexOverrides {
|
||||||
|
fn(cmd, &scanIndexReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
// start sync-index command
|
// start sync-index command
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
// Slice with functions to override default command behavior.
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -5,7 +5,7 @@ go 1.21
|
||||||
require (
|
require (
|
||||||
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
||||||
github.com/briandowns/spinner v1.23.0 // Apache 2.0
|
github.com/briandowns/spinner v1.23.0 // Apache 2.0
|
||||||
github.com/databricks/databricks-sdk-go v0.40.1 // Apache 2.0
|
github.com/databricks/databricks-sdk-go v0.41.0 // Apache 2.0
|
||||||
github.com/fatih/color v1.17.0 // MIT
|
github.com/fatih/color v1.17.0 // MIT
|
||||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||||
|
|
|
@ -28,8 +28,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||||
github.com/databricks/databricks-sdk-go v0.40.1 h1:rE5yP9gIW2oap+6CnumixnZSDIsXwVojAuDBuKUl5GU=
|
github.com/databricks/databricks-sdk-go v0.41.0 h1:OyhYY+Q6+gqkWeXmpGEiacoU2RStTeWPF0x4vmqbQdc=
|
||||||
github.com/databricks/databricks-sdk-go v0.40.1/go.mod h1:rLIhh7DvifVLmf2QxMr/vMRGqdrTZazn8VYo4LilfCo=
|
github.com/databricks/databricks-sdk-go v0.41.0/go.mod h1:rLIhh7DvifVLmf2QxMr/vMRGqdrTZazn8VYo4LilfCo=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
|
Loading…
Reference in New Issue