Upgrade Go SDK to 0.27.0 (#1064)

## Changes
Upgrade Go SDK to 0.27.0
This commit is contained in:
Andrew Nester 2023-12-14 09:15:00 +01:00 committed by GitHub
parent dc12b3f1cf
commit a6ec9ac08b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 490 additions and 306 deletions

View File

@ -1 +1 @@
22f09783eb8a84d52026f856be3b2068f9498db3
63caa3cb0c05045e81d3dcf2451fa990d8670f36

1
.gitattributes vendored
View File

@ -11,7 +11,6 @@ cmd/account/log-delivery/log-delivery.go linguist-generated=true
cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true
cmd/account/metastores/metastores.go linguist-generated=true
cmd/account/network-connectivity/network-connectivity.go linguist-generated=true
cmd/account/network-policy/network-policy.go linguist-generated=true
cmd/account/networks/networks.go linguist-generated=true
cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true
cmd/account/private-access/private-access.go linguist-generated=true

View File

@ -1275,9 +1275,12 @@
"description": "ID of the job to trigger."
},
"job_parameters": {
"description": "Job-level parameters used to trigger the job.",
"additionalproperties": {
"description": ""
}
}
}
},
"spark_jar_task": {
"description": "If spark_jar_task, indicates that this task must run a JAR.",
@ -1414,7 +1417,7 @@
"description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout."
},
"webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.",
"properties": {
"on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -1491,7 +1494,7 @@
}
},
"webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.",
"properties": {
"on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -1550,8 +1553,162 @@
"config": {
"description": "The core config of the serving endpoint.",
"properties": {
"auto_capture_config": {
"description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.",
"properties": {
"catalog_name": {
"description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set."
},
"enabled": {
"description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again."
},
"schema_name": {
"description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set."
},
"table_name_prefix": {
"description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set."
}
}
},
"served_entities": {
"description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 10 served entities.",
"items": {
"description": "",
"properties": {
"entity_name": {
"description": "The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC),\nor a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of\n__catalog_name__.__schema_name__.__model_name__.\n"
},
"entity_version": {
"description": "The version of the model in Databricks Model Registry to be served or empty if the entity is a FEATURE_SPEC."
},
"environment_vars": {
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity.\nNote: this is an experimental feature and subject to change. \nExample entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`",
"additionalproperties": {
"description": ""
}
},
"external_model": {
"description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n",
"properties": {
"config": {
"description": "The config for the external model, which must match the provider.",
"properties": {
"ai21labs_config": {
"description": "AI21Labs Config",
"properties": {
"ai21labs_api_key": {
"description": "The Databricks secret key reference for an AI21Labs API key."
}
}
},
"anthropic_config": {
"description": "Anthropic Config",
"properties": {
"anthropic_api_key": {
"description": "The Databricks secret key reference for an Anthropic API key."
}
}
},
"aws_bedrock_config": {
"description": "AWS Bedrock Config",
"properties": {
"aws_access_key_id": {
"description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services."
},
"aws_region": {
"description": "The AWS region to use. Bedrock has to be enabled there."
},
"aws_secret_access_key": {
"description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services."
},
"bedrock_provider": {
"description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon."
}
}
},
"cohere_config": {
"description": "Cohere Config",
"properties": {
"cohere_api_key": {
"description": "The Databricks secret key reference for a Cohere API key."
}
}
},
"databricks_model_serving_config": {
"description": "Databricks Model Serving Config",
"properties": {
"databricks_api_token": {
"description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n"
},
"databricks_workspace_url": {
"description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n"
}
}
},
"openai_config": {
"description": "OpenAI Config",
"properties": {
"openai_api_base": {
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
},
"openai_api_key": {
"description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key."
},
"openai_api_type": {
"description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n"
},
"openai_api_version": {
"description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n"
},
"openai_deployment_name": {
"description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n"
},
"openai_organization": {
"description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n"
}
}
},
"palm_config": {
"description": "PaLM Config",
"properties": {
"palm_api_key": {
"description": "The Databricks secret key reference for a PaLM API key."
}
}
}
}
},
"name": {
"description": "The name of the external model."
},
"provider": {
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n"
},
"task": {
"description": "The task type of the external model."
}
}
},
"instance_profile_arn": {
"description": "ARN of the instance profile that the served entity uses to access AWS resources."
},
"name": {
"description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.\nIf not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other\nentities, it defaults to \u003centity-name\u003e-\u003centity-version\u003e.\n"
},
"scale_to_zero_enabled": {
"description": "Whether the compute resources for the served entity should scale down to zero."
},
"workload_size": {
"description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.\n"
},
"workload_type": {
"description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n"
}
}
}
},
"served_models": {
"description": "A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.",
"description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.",
"items": {
"description": "",
"properties": {
@ -1580,7 +1737,7 @@
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n"
},
"workload_type": {
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See documentation for all\noptions.\n"
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n"
}
}
}
@ -1589,7 +1746,7 @@
"description": "The traffic config defining how invocations to the serving endpoint should be routed.",
"properties": {
"routes": {
"description": "The list of routes that define traffic to each served model.",
"description": "The list of routes that define traffic to each served entity.",
"items": {
"description": "",
"properties": {
@ -1629,6 +1786,23 @@
}
}
},
"rate_limits": {
"description": "Rate limits to be applied to the serving endpoint. NOTE: only external and foundation model endpoints are supported as of now.",
"items": {
"description": "",
"properties": {
"calls": {
"description": "Used to specify how many calls are allowed for a key within the renewal_period."
},
"key": {
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified."
},
"renewal_period": {
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported."
}
}
}
},
"tags": {
"description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.",
"items": {
@ -3460,9 +3634,12 @@
"description": "ID of the job to trigger."
},
"job_parameters": {
"description": "Job-level parameters used to trigger the job.",
"additionalproperties": {
"description": ""
}
}
}
},
"spark_jar_task": {
"description": "If spark_jar_task, indicates that this task must run a JAR.",
@ -3599,7 +3776,7 @@
"description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout."
},
"webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.",
"properties": {
"on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -3676,7 +3853,7 @@
}
},
"webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.",
"properties": {
"on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -3735,8 +3912,162 @@
"config": {
"description": "The core config of the serving endpoint.",
"properties": {
"auto_capture_config": {
"description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.",
"properties": {
"catalog_name": {
"description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set."
},
"enabled": {
"description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again."
},
"schema_name": {
"description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set."
},
"table_name_prefix": {
"description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set."
}
}
},
"served_entities": {
"description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 10 served entities.",
"items": {
"description": "",
"properties": {
"entity_name": {
"description": "The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC),\nor a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of\n__catalog_name__.__schema_name__.__model_name__.\n"
},
"entity_version": {
"description": "The version of the model in Databricks Model Registry to be served or empty if the entity is a FEATURE_SPEC."
},
"environment_vars": {
"description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity.\nNote: this is an experimental feature and subject to change. \nExample entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`",
"additionalproperties": {
"description": ""
}
},
"external_model": {
"description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n",
"properties": {
"config": {
"description": "The config for the external model, which must match the provider.",
"properties": {
"ai21labs_config": {
"description": "AI21Labs Config",
"properties": {
"ai21labs_api_key": {
"description": "The Databricks secret key reference for an AI21Labs API key."
}
}
},
"anthropic_config": {
"description": "Anthropic Config",
"properties": {
"anthropic_api_key": {
"description": "The Databricks secret key reference for an Anthropic API key."
}
}
},
"aws_bedrock_config": {
"description": "AWS Bedrock Config",
"properties": {
"aws_access_key_id": {
"description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services."
},
"aws_region": {
"description": "The AWS region to use. Bedrock has to be enabled there."
},
"aws_secret_access_key": {
"description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services."
},
"bedrock_provider": {
"description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon."
}
}
},
"cohere_config": {
"description": "Cohere Config",
"properties": {
"cohere_api_key": {
"description": "The Databricks secret key reference for a Cohere API key."
}
}
},
"databricks_model_serving_config": {
"description": "Databricks Model Serving Config",
"properties": {
"databricks_api_token": {
"description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n"
},
"databricks_workspace_url": {
"description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n"
}
}
},
"openai_config": {
"description": "OpenAI Config",
"properties": {
"openai_api_base": {
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
},
"openai_api_key": {
"description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key."
},
"openai_api_type": {
"description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n"
},
"openai_api_version": {
"description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n"
},
"openai_deployment_name": {
"description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n"
},
"openai_organization": {
"description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n"
}
}
},
"palm_config": {
"description": "PaLM Config",
"properties": {
"palm_api_key": {
"description": "The Databricks secret key reference for a PaLM API key."
}
}
}
}
},
"name": {
"description": "The name of the external model."
},
"provider": {
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n"
},
"task": {
"description": "The task type of the external model."
}
}
},
"instance_profile_arn": {
"description": "ARN of the instance profile that the served entity uses to access AWS resources."
},
"name": {
"description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.\nIf not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other\nentities, it defaults to \u003centity-name\u003e-\u003centity-version\u003e.\n"
},
"scale_to_zero_enabled": {
"description": "Whether the compute resources for the served entity should scale down to zero."
},
"workload_size": {
"description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.\n"
},
"workload_type": {
"description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n"
}
}
}
},
"served_models": {
"description": "A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.",
"description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.",
"items": {
"description": "",
"properties": {
@ -3765,7 +4096,7 @@
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n"
},
"workload_type": {
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See documentation for all\noptions.\n"
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n"
}
}
}
@ -3774,7 +4105,7 @@
"description": "The traffic config defining how invocations to the serving endpoint should be routed.",
"properties": {
"routes": {
"description": "The list of routes that define traffic to each served model.",
"description": "The list of routes that define traffic to each served entity.",
"items": {
"description": "",
"properties": {
@ -3814,6 +4145,23 @@
}
}
},
"rate_limits": {
"description": "Rate limits to be applied to the serving endpoint. NOTE: only external and foundation model endpoints are supported as of now.",
"items": {
"description": "",
"properties": {
"calls": {
"description": "Used to specify how many calls are allowed for a key within the renewal_period."
},
"key": {
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified."
},
"renewal_period": {
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported."
}
}
}
},
"tags": {
"description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.",
"items": {

2
cmd/account/cmd.go generated
View File

@ -17,7 +17,6 @@ import (
account_metastore_assignments "github.com/databricks/cli/cmd/account/metastore-assignments"
account_metastores "github.com/databricks/cli/cmd/account/metastores"
network_connectivity "github.com/databricks/cli/cmd/account/network-connectivity"
account_network_policy "github.com/databricks/cli/cmd/account/network-policy"
networks "github.com/databricks/cli/cmd/account/networks"
o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps"
private_access "github.com/databricks/cli/cmd/account/private-access"
@ -51,7 +50,6 @@ func New() *cobra.Command {
cmd.AddCommand(account_metastore_assignments.New())
cmd.AddCommand(account_metastores.New())
cmd.AddCommand(network_connectivity.New())
cmd.AddCommand(account_network_policy.New())
cmd.AddCommand(networks.New())
cmd.AddCommand(o_auth_published_apps.New())
cmd.AddCommand(private_access.New())

View File

@ -1,257 +0,0 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package network_policy
import (
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/settings"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "network-policy",
Short: `Network policy is a set of rules that defines what can be accessed from your Databricks network.`,
Long: `Network policy is a set of rules that defines what can be accessed from your
Databricks network. E.g.: You can choose to block your SQL UDF to access
internet from your Databricks serverless clusters.
There is only one instance of this setting per account. Since this setting has
a default value, this setting is present on all accounts even though it's
never set on a given account. Deletion reverts the value of the setting back
to the default value.`,
GroupID: "settings",
Annotations: map[string]string{
"package": "settings",
},
// This service is being previewed; hide from help output.
Hidden: true,
}
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start delete-account-network-policy command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var deleteAccountNetworkPolicyOverrides []func(
*cobra.Command,
*settings.DeleteAccountNetworkPolicyRequest,
)
func newDeleteAccountNetworkPolicy() *cobra.Command {
cmd := &cobra.Command{}
var deleteAccountNetworkPolicyReq settings.DeleteAccountNetworkPolicyRequest
// TODO: short flags
cmd.Use = "delete-account-network-policy ETAG"
cmd.Short = `Delete Account Network Policy.`
cmd.Long = `Delete Account Network Policy.
Reverts back all the account network policies back to default.
Arguments:
ETAG: etag used for versioning. The response is at least as fresh as the eTag
provided. This is used for optimistic concurrency control as a way to help
prevent simultaneous writes of a setting overwriting each other. It is
strongly suggested that systems make use of the etag in the read -> delete
pattern to perform setting deletions in order to avoid race conditions.
That is, get an etag from a GET request, and pass it with the DELETE
request to identify the rule set version you are deleting.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustAccountClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
deleteAccountNetworkPolicyReq.Etag = args[0]
response, err := a.NetworkPolicy.DeleteAccountNetworkPolicy(ctx, deleteAccountNetworkPolicyReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range deleteAccountNetworkPolicyOverrides {
fn(cmd, &deleteAccountNetworkPolicyReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newDeleteAccountNetworkPolicy())
})
}
// start read-account-network-policy command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var readAccountNetworkPolicyOverrides []func(
*cobra.Command,
*settings.ReadAccountNetworkPolicyRequest,
)
func newReadAccountNetworkPolicy() *cobra.Command {
cmd := &cobra.Command{}
var readAccountNetworkPolicyReq settings.ReadAccountNetworkPolicyRequest
// TODO: short flags
cmd.Use = "read-account-network-policy ETAG"
cmd.Short = `Get Account Network Policy.`
cmd.Long = `Get Account Network Policy.
Gets the value of Account level Network Policy.
Arguments:
ETAG: etag used for versioning. The response is at least as fresh as the eTag
provided. This is used for optimistic concurrency control as a way to help
prevent simultaneous writes of a setting overwriting each other. It is
strongly suggested that systems make use of the etag in the read -> delete
pattern to perform setting deletions in order to avoid race conditions.
That is, get an etag from a GET request, and pass it with the DELETE
request to identify the rule set version you are deleting.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustAccountClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
readAccountNetworkPolicyReq.Etag = args[0]
response, err := a.NetworkPolicy.ReadAccountNetworkPolicy(ctx, readAccountNetworkPolicyReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range readAccountNetworkPolicyOverrides {
fn(cmd, &readAccountNetworkPolicyReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newReadAccountNetworkPolicy())
})
}
// start update-account-network-policy command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var updateAccountNetworkPolicyOverrides []func(
*cobra.Command,
*settings.UpdateAccountNetworkPolicyRequest,
)
func newUpdateAccountNetworkPolicy() *cobra.Command {
cmd := &cobra.Command{}
var updateAccountNetworkPolicyReq settings.UpdateAccountNetworkPolicyRequest
var updateAccountNetworkPolicyJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&updateAccountNetworkPolicyJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().BoolVar(&updateAccountNetworkPolicyReq.AllowMissing, "allow-missing", updateAccountNetworkPolicyReq.AllowMissing, `This should always be set to true for Settings RPCs.`)
// TODO: complex arg: setting
cmd.Use = "update-account-network-policy"
cmd.Short = `Update Account Network Policy.`
cmd.Long = `Update Account Network Policy.
Updates the policy content of Account level Network Policy.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(0)
return check(cmd, args)
}
cmd.PreRunE = root.MustAccountClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
if cmd.Flags().Changed("json") {
err = updateAccountNetworkPolicyJson.Unmarshal(&updateAccountNetworkPolicyReq)
if err != nil {
return err
}
}
response, err := a.NetworkPolicy.UpdateAccountNetworkPolicy(ctx, updateAccountNetworkPolicyReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range updateAccountNetworkPolicyOverrides {
fn(cmd, &updateAccountNetworkPolicyReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newUpdateAccountNetworkPolicy())
})
}
// end service AccountNetworkPolicy

View File

@ -163,7 +163,7 @@ func newDelete() *cobra.Command {
is accessed over [AWS PrivateLink].
Before configuring PrivateLink, read the [Databricks article about
PrivateLink].
PrivateLink].",
[AWS PrivateLink]: https://aws.amazon.com/privatelink
[Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
@ -246,7 +246,7 @@ func newGet() *cobra.Command {
accessed over [AWS PrivateLink].
Before configuring PrivateLink, read the [Databricks article about
PrivateLink].
PrivateLink].",
[AWS PrivateLink]: https://aws.amazon.com/privatelink
[Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html

View File

@ -338,7 +338,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`)
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATED, OPEN]`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of catalog.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the catalog.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of catalog.`)
// TODO: map via StringToStringVar: properties
@ -351,7 +351,7 @@ func newUpdate() *cobra.Command {
of the catalog).
Arguments:
NAME: Name of catalog.`
NAME: The name of the catalog.`
cmd.Annotations = make(map[string]string)

View File

@ -327,7 +327,6 @@ func newUpdate() *cobra.Command {
// TODO: array: catalog_updates
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the clean room.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of clean room.`)
cmd.Use = "update NAME_ARG"

View File

@ -336,6 +336,8 @@ func newUpdate() *cobra.Command {
// TODO: short flags
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the connection.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the connection.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of the connection.`)
cmd.Use = "update"

View File

@ -351,7 +351,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`)
// TODO: complex arg: encryption_details
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the external location.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the external location.`)
cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Indicates whether the external location is read-only.`)
cmd.Flags().BoolVar(&updateReq.SkipValidation, "skip-validation", updateReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`)

View File

@ -620,6 +620,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().Int64Var(&updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, "delta-sharing-recipient-token-lifetime-in-seconds", updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, `The lifetime of delta sharing recipient token in seconds.`)
cmd.Flags().Var(&updateReq.DeltaSharingScope, "delta-sharing-scope", `The scope of Delta Sharing enabled for the metastore. Supported values: [INTERNAL, INTERNAL_AND_EXTERNAL]`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The user-specified name of the metastore.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the metastore.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the metastore.`)
cmd.Flags().StringVar(&updateReq.PrivilegeModelVersion, "privilege-model-version", updateReq.PrivilegeModelVersion, `Privilege model version of the metastore, of the form major.minor (e.g., 1.0).`)
cmd.Flags().StringVar(&updateReq.StorageRootCredentialId, "storage-root-credential-id", updateReq.StorageRootCredentialId, `UUID of storage credential to access the metastore storage_root.`)

View File

@ -445,7 +445,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Description about the provider.`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the Provider.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the provider.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of Provider owner.`)
cmd.Flags().StringVar(&updateReq.RecipientProfileStr, "recipient-profile-str", updateReq.RecipientProfileStr, `This field is required when the __authentication_type__ is **TOKEN** or not provided.`)
@ -459,7 +459,7 @@ func newUpdate() *cobra.Command {
provider.
Arguments:
NAME: The name of the Provider.`
NAME: Name of the provider.`
cmd.Annotations = make(map[string]string)
@ -482,14 +482,14 @@ func newUpdate() *cobra.Command {
if err != nil {
return fmt.Errorf("failed to load names for Providers drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "The name of the Provider")
id, err := cmdio.Select(ctx, names, "Name of the provider")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have the name of the provider")
return fmt.Errorf("expected to have name of the provider")
}
updateReq.Name = args[0]

View File

@ -554,7 +554,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Description about the recipient.`)
// TODO: complex arg: ip_access_list
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of Recipient.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the recipient.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of the recipient owner.`)
// TODO: complex arg: properties_kvpairs
@ -567,7 +567,7 @@ func newUpdate() *cobra.Command {
the user must be both a metastore admin and the owner of the recipient.
Arguments:
NAME: Name of Recipient.`
NAME: Name of the recipient.`
cmd.Annotations = make(map[string]string)
@ -590,14 +590,14 @@ func newUpdate() *cobra.Command {
if err != nil {
return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "Name of Recipient")
id, err := cmdio.Select(ctx, names, "Name of the recipient")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have name of recipient")
return fmt.Errorf("expected to have name of the recipient")
}
updateReq.Name = args[0]

View File

@ -588,6 +588,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the registered model.`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the registered model.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the registered model.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the registered model.`)
cmd.Use = "update FULL_NAME"

View File

@ -374,6 +374,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of schema, relative to parent catalog.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the schema.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of schema.`)
// TODO: map via StringToStringVar: properties

View File

@ -29,11 +29,11 @@ func New() *cobra.Command {
scalable REST API endpoints using serverless compute. This means the endpoints
and associated compute resources are fully managed by Databricks and will not
appear in your cloud account. A serving endpoint can consist of one or more
MLflow models from the Databricks Model Registry, called served models. A
serving endpoint can have at most ten served models. You can configure traffic
settings to define how requests should be routed to your served models behind
an endpoint. Additionally, you can configure the scale of resources that
should be applied to each served model.`,
MLflow models from the Databricks Model Registry, called served entities. A
serving endpoint can have at most ten served entities. You can configure
traffic settings to define how requests should be routed to your served
entities behind an endpoint. Additionally, you can configure the scale of
resources that should be applied to each served entity.`,
GroupID: "serving",
Annotations: map[string]string{
"package": "serving",
@ -140,6 +140,7 @@ func newCreate() *cobra.Command {
// TODO: short flags
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: array: rate_limits
// TODO: array: tags
cmd.Use = "create"
@ -713,6 +714,82 @@ func init() {
})
}
// start put command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var putOverrides []func(
*cobra.Command,
*serving.PutRequest,
)
func newPut() *cobra.Command {
cmd := &cobra.Command{}
var putReq serving.PutRequest
var putJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&putJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: array: rate_limits
cmd.Use = "put NAME"
cmd.Short = `Update the rate limits of a serving endpoint.`
cmd.Long = `Update the rate limits of a serving endpoint.
Used to update the rate limits of a serving endpoint. NOTE: only external and
foundation model endpoints are supported as of now.
Arguments:
NAME: The name of the serving endpoint whose rate limits are being updated. This
field is required.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = putJson.Unmarshal(&putReq)
if err != nil {
return err
}
}
putReq.Name = args[0]
response, err := w.ServingEndpoints.Put(ctx, putReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range putOverrides {
fn(cmd, &putReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newPut())
})
}
// start query command
// Slice with functions to override default command behavior.
@ -733,8 +810,17 @@ func newQuery() *cobra.Command {
// TODO: array: dataframe_records
// TODO: complex arg: dataframe_split
// TODO: map via StringToStringVar: extra_params
// TODO: any: input
// TODO: any: inputs
// TODO: array: instances
cmd.Flags().IntVar(&queryReq.MaxTokens, "max-tokens", queryReq.MaxTokens, `The max tokens field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`)
// TODO: array: messages
cmd.Flags().IntVar(&queryReq.N, "n", queryReq.N, `The n (number of candidates) field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`)
// TODO: any: prompt
// TODO: array: stop
cmd.Flags().BoolVar(&queryReq.Stream, "stream", queryReq.Stream, `The stream field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`)
cmd.Flags().Float64Var(&queryReq.Temperature, "temperature", queryReq.Temperature, `The temperature field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`)
cmd.Use = "query NAME"
cmd.Short = `Query a serving endpoint with provided model input.`
@ -886,14 +972,16 @@ func newUpdateConfig() *cobra.Command {
// TODO: short flags
cmd.Flags().Var(&updateConfigJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: complex arg: auto_capture_config
// TODO: array: served_models
// TODO: complex arg: traffic_config
cmd.Use = "update-config"
cmd.Short = `Update a serving endpoint with a new config.`
cmd.Long = `Update a serving endpoint with a new config.
Updates any combination of the serving endpoint's served models, the compute
configuration of those served models, and the endpoint's traffic config. An
Updates any combination of the serving endpoint's served entities, the compute
configuration of those served entities, and the endpoint's traffic config. An
endpoint that already has an update in progress can not be updated until the
current update completes or fails.`

View File

@ -390,7 +390,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the share.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the share.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of share.`)
// TODO: array: updates
@ -414,7 +414,7 @@ func newUpdate() *cobra.Command {
Table removals through **update** do not require additional privileges.
Arguments:
NAME: Name of the share.`
NAME: The name of the share.`
cmd.Annotations = make(map[string]string)

View File

@ -68,6 +68,7 @@ func newCreate() *cobra.Command {
// TODO: complex arg: aws_iam_role
// TODO: complex arg: azure_managed_identity
// TODO: complex arg: azure_service_principal
// TODO: complex arg: cloudflare_api_token
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `Comment associated with the credential.`)
// TODO: output-only field
cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Whether the storage credential is only usable for read operations.`)
@ -366,10 +367,11 @@ func newUpdate() *cobra.Command {
// TODO: complex arg: aws_iam_role
// TODO: complex arg: azure_managed_identity
// TODO: complex arg: azure_service_principal
// TODO: complex arg: cloudflare_api_token
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`)
// TODO: output-only field
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The credential name.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`)
cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`)
cmd.Flags().BoolVar(&updateReq.SkipValidation, "skip-validation", updateReq.SkipValidation, `Supplying true to this argument skips validation of the updated credential.`)
@ -381,7 +383,7 @@ func newUpdate() *cobra.Command {
Updates a storage credential on the metastore.
Arguments:
NAME: The credential name. The name must be unique within the metastore.`
NAME: Name of the storage credential.`
cmd.Annotations = make(map[string]string)
@ -404,14 +406,14 @@ func newUpdate() *cobra.Command {
if err != nil {
return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "The credential name")
id, err := cmdio.Select(ctx, names, "Name of the storage credential")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have the credential name")
return fmt.Errorf("expected to have name of the storage credential")
}
updateReq.Name = args[0]
@ -461,6 +463,7 @@ func newValidate() *cobra.Command {
// TODO: complex arg: aws_iam_role
// TODO: complex arg: azure_managed_identity
// TODO: complex arg: azure_service_principal
// TODO: complex arg: cloudflare_api_token
// TODO: output-only field
cmd.Flags().StringVar(&validateReq.ExternalLocationName, "external-location-name", validateReq.ExternalLocationName, `The name of an existing external location to validate.`)
cmd.Flags().BoolVar(&validateReq.ReadOnly, "read-only", validateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`)

View File

@ -418,6 +418,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the volume.`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the volume.`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the volume.`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the volume.`)
cmd.Use = "update FULL_NAME_ARG"

4
go.mod
View File

@ -4,7 +4,7 @@ go 1.21
require (
github.com/briandowns/spinner v1.23.0 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.26.2 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.27.0 // Apache 2.0
github.com/fatih/color v1.16.0 // MIT
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
github.com/google/uuid v1.4.0 // BSD-3-Clause
@ -58,7 +58,7 @@ require (
golang.org/x/net v0.19.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/api v0.152.0 // indirect
google.golang.org/api v0.153.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
google.golang.org/grpc v1.59.0 // indirect

8
go.sum generated
View File

@ -29,8 +29,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/databricks/databricks-sdk-go v0.26.2 h1:OcA8aOpwCqCs+brATOuOR6BmqCK/Boye21+1rYw2MOg=
github.com/databricks/databricks-sdk-go v0.26.2/go.mod h1:cyFYsqaDiIdaKPdNAuh+YsMUL1k9Lt02JB/72+zgCxg=
github.com/databricks/databricks-sdk-go v0.27.0 h1:JJ9CxVE7Js08Ug/gafM1gGYx+u/je2g2I4bSYeMPPaY=
github.com/databricks/databricks-sdk-go v0.27.0/go.mod h1:AGzQDmVUcf/J9ARx2FgObcRI5RO2VZ1jehhxFM6tA60=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -246,8 +246,8 @@ golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.152.0 h1:t0r1vPnfMc260S2Ci+en7kfCZaLOPs5KI0sVV/6jZrY=
google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4=
google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=