mirror of https://github.com/databricks/cli.git
Upgrade Go SDK to 0.59.0 (#2425)
## Changes - Added `service-principal-secrets` command - Added `budget-policy-id` for apps - `experiments.log-inputs` now requires `ID` parameter as an input - Added `genie.get-space` command - Added `providers.list-provider-share-assets` command For the whole list of SDK changes see: https://github.com/databricks/databricks-sdk-go/releases/tag/v0.59.0
This commit is contained in:
parent
b21fdac209
commit
294db2ecca
|
@ -1 +1 @@
|
|||
99f644e72261ef5ecf8d74db20f4b7a1e09723cc
|
||||
e5c870006a536121442cfd2441bdc8a5fb76ae1e
|
|
@ -35,6 +35,7 @@ Usage:
|
|||
databricks apps update NAME [flags]
|
||||
|
||||
Flags:
|
||||
--budget-policy-id string
|
||||
--description string The description of the app.
|
||||
-h, --help help for update
|
||||
--json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes))
|
||||
|
|
|
@ -5,6 +5,7 @@ github.com/databricks/cli/bundle/config/resources.App:
|
|||
The active deployment of the app. A deployment is considered active when it has been deployed
|
||||
to the app compute.
|
||||
"app_status": {}
|
||||
"budget_policy_id": {}
|
||||
"compute_status": {}
|
||||
"create_time":
|
||||
"description": |-
|
||||
|
@ -19,6 +20,7 @@ github.com/databricks/cli/bundle/config/resources.App:
|
|||
"description":
|
||||
"description": |-
|
||||
The description of the app.
|
||||
"effective_budget_policy_id": {}
|
||||
"id":
|
||||
"description": |-
|
||||
The unique identifier of the app.
|
||||
|
@ -118,7 +120,7 @@ github.com/databricks/cli/bundle/config/resources.Cluster:
|
|||
The optional ID of the instance pool to which the cluster belongs.
|
||||
"is_single_node":
|
||||
"description": |
|
||||
This field can only be used with `kind`.
|
||||
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
||||
|
||||
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`
|
||||
"kind": {}
|
||||
|
@ -175,7 +177,7 @@ github.com/databricks/cli/bundle/config/resources.Cluster:
|
|||
Up to 10 keys can be specified.
|
||||
"use_ml_runtime":
|
||||
"description": |
|
||||
This field can only be used with `kind`.
|
||||
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
||||
|
||||
`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
||||
"workload_type": {}
|
||||
|
@ -311,6 +313,9 @@ github.com/databricks/cli/bundle/config/resources.Job:
|
|||
"description": |-
|
||||
A collection of system notification IDs to notify when runs of this job begin or complete.
|
||||
github.com/databricks/cli/bundle/config/resources.MlflowExperiment:
|
||||
"_":
|
||||
"description": |-
|
||||
An experiment and its metadata.
|
||||
"artifact_location":
|
||||
"description": |-
|
||||
Location where artifacts for the experiment are stored.
|
||||
|
@ -1089,7 +1094,7 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec:
|
|||
The optional ID of the instance pool to which the cluster belongs.
|
||||
"is_single_node":
|
||||
"description": |
|
||||
This field can only be used with `kind`.
|
||||
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
||||
|
||||
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`
|
||||
"kind": {}
|
||||
|
@ -1146,7 +1151,7 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec:
|
|||
Up to 10 keys can be specified.
|
||||
"use_ml_runtime":
|
||||
"description": |
|
||||
This field can only be used with `kind`.
|
||||
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
||||
|
||||
`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
||||
"workload_type": {}
|
||||
|
@ -1156,7 +1161,7 @@ github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode:
|
|||
Data security mode decides what data governance model to use when accessing data
|
||||
from a cluster.
|
||||
|
||||
The following modes can only be used with `kind`.
|
||||
The following modes can only be used when `kind = CLASSIC_PREVIEW`.
|
||||
* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
|
||||
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.
|
||||
* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.
|
||||
|
@ -1465,6 +1470,19 @@ github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask:
|
|||
"notebook_name":
|
||||
"description": |-
|
||||
Name of the notebook being run.
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.ComputeConfig:
|
||||
"_":
|
||||
"description": |-
|
||||
Next field: 4
|
||||
"gpu_node_pool_id":
|
||||
"description": |-
|
||||
IDof the GPU pool to use.
|
||||
"gpu_type":
|
||||
"description": |-
|
||||
GPU type.
|
||||
"num_gpus":
|
||||
"description": |-
|
||||
Number of GPUs.
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.Condition:
|
||||
"_":
|
||||
"enum":
|
||||
|
@ -1579,6 +1597,37 @@ github.com/databricks/databricks-sdk-go/service/jobs.Format:
|
|||
SINGLE_TASK
|
||||
- |-
|
||||
MULTI_TASK
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.GenAiComputeTask:
|
||||
"_":
|
||||
"description": |-
|
||||
Next field: 9
|
||||
"command":
|
||||
"description": |-
|
||||
Command launcher to run the actual script, e.g. bash, python etc.
|
||||
"compute": {}
|
||||
"dl_runtime_image":
|
||||
"description": |-
|
||||
Runtime image
|
||||
"mlflow_experiment_name":
|
||||
"description": |-
|
||||
Optional string containing the name of the MLflow experiment to log the run to. If name is not
|
||||
found, backend will create the mlflow experiment using the name.
|
||||
"source":
|
||||
"description": |-
|
||||
Optional location type of the training script. When set to `WORKSPACE`, the script will be retrieved from the local Databricks workspace. When set to `GIT`, the script will be retrieved from a Git repository
|
||||
defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.
|
||||
* `WORKSPACE`: Script is located in Databricks workspace.
|
||||
* `GIT`: Script is located in cloud Git provider.
|
||||
"training_script_path":
|
||||
"description": |-
|
||||
The training script file path to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required.
|
||||
"yaml_parameters":
|
||||
"description": |-
|
||||
Optional string containing model parameters passed to the training script in yaml format.
|
||||
If present, then the content in yaml_parameters_file_path will be ignored.
|
||||
"yaml_parameters_file_path":
|
||||
"description": |-
|
||||
Optional path to a YAML file containing model parameters passed to the training script.
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.GitProvider:
|
||||
"_":
|
||||
"enum":
|
||||
|
@ -2144,6 +2193,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.Task:
|
|||
"for_each_task":
|
||||
"description": |-
|
||||
The task executes a nested task for every input provided when the `for_each_task` field is present.
|
||||
"gen_ai_compute_task": {}
|
||||
"health": {}
|
||||
"job_cluster_key":
|
||||
"description": |-
|
||||
|
@ -2296,6 +2346,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.WebhookNotifications:
|
|||
"description": |-
|
||||
An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.
|
||||
github.com/databricks/databricks-sdk-go/service/ml.ExperimentTag:
|
||||
"_":
|
||||
"description": |-
|
||||
A tag for an experiment.
|
||||
"key":
|
||||
"description": |-
|
||||
The tag key.
|
||||
|
@ -2864,6 +2917,12 @@ github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfig:
|
|||
"description": |-
|
||||
The underlying provider in Amazon Bedrock. Supported values (case
|
||||
insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.
|
||||
"instance_profile_arn":
|
||||
"description": |-
|
||||
ARN of the instance profile that the external model will use to access AWS resources.
|
||||
You must authenticate using an instance profile or access keys.
|
||||
If you prefer to authenticate using access keys, see `aws_access_key_id`,
|
||||
`aws_access_key_id_plaintext`, `aws_secret_access_key` and `aws_secret_access_key_plaintext`.
|
||||
github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider:
|
||||
"_":
|
||||
"enum":
|
||||
|
|
|
@ -5,6 +5,9 @@ github.com/databricks/cli/bundle/config/resources.App:
|
|||
"app_status":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"budget_policy_id":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"compute_status":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
|
@ -23,6 +26,9 @@ github.com/databricks/cli/bundle/config/resources.App:
|
|||
"description":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"effective_budget_policy_id":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"name":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
|
@ -506,6 +512,10 @@ github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes:
|
|||
"availability":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.GenAiComputeTask:
|
||||
"compute":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.GitSource:
|
||||
"git_snapshot":
|
||||
"description": |-
|
||||
|
@ -530,6 +540,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask:
|
|||
"description": |-
|
||||
PLACEHOLDER
|
||||
github.com/databricks/databricks-sdk-go/service/jobs.Task:
|
||||
"gen_ai_compute_task":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
"health":
|
||||
"description": |-
|
||||
PLACEHOLDER
|
||||
|
|
|
@ -70,6 +70,9 @@
|
|||
"app_status": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.ApplicationStatus"
|
||||
},
|
||||
"budget_policy_id": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"compute_status": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.ComputeStatus"
|
||||
},
|
||||
|
@ -88,6 +91,9 @@
|
|||
"description": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"effective_budget_policy_id": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"id": {
|
||||
"description": "The unique identifier of the app.",
|
||||
"$ref": "#/$defs/string"
|
||||
|
@ -210,7 +216,7 @@
|
|||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"is_single_node": {
|
||||
"description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
||||
"description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"kind": {
|
||||
|
@ -255,7 +261,7 @@
|
|||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"use_ml_runtime": {
|
||||
"description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
||||
"description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"workload_type": {
|
||||
|
@ -465,6 +471,7 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"description": "An experiment and its metadata.",
|
||||
"properties": {
|
||||
"artifact_location": {
|
||||
"description": "Location where artifacts for the experiment are stored.",
|
||||
|
@ -2585,7 +2592,7 @@
|
|||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"is_single_node": {
|
||||
"description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
||||
"description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"kind": {
|
||||
|
@ -2627,7 +2634,7 @@
|
|||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"use_ml_runtime": {
|
||||
"description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
||||
"description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"workload_type": {
|
||||
|
@ -2646,7 +2653,7 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used with `kind`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n",
|
||||
"description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n",
|
||||
"enum": [
|
||||
"DATA_SECURITY_MODE_AUTO",
|
||||
"DATA_SECURITY_MODE_STANDARD",
|
||||
|
@ -3230,6 +3237,37 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"jobs.ComputeConfig": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"description": "Next field: 4",
|
||||
"properties": {
|
||||
"gpu_node_pool_id": {
|
||||
"description": "IDof the GPU pool to use.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"gpu_type": {
|
||||
"description": "GPU type.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"num_gpus": {
|
||||
"description": "Number of GPUs.",
|
||||
"$ref": "#/$defs/int"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"gpu_node_pool_id",
|
||||
"num_gpus"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"jobs.Condition": {
|
||||
"oneOf": [
|
||||
{
|
||||
|
@ -3463,6 +3501,55 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"jobs.GenAiComputeTask": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"description": "Next field: 9",
|
||||
"properties": {
|
||||
"command": {
|
||||
"description": "Command launcher to run the actual script, e.g. bash, python etc.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"compute": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ComputeConfig"
|
||||
},
|
||||
"dl_runtime_image": {
|
||||
"description": "Runtime image",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"mlflow_experiment_name": {
|
||||
"description": "Optional string containing the name of the MLflow experiment to log the run to. If name is not\nfound, backend will create the mlflow experiment using the name.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"source": {
|
||||
"description": "Optional location type of the training script. When set to `WORKSPACE`, the script will be retrieved from the local Databricks workspace. When set to `GIT`, the script will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Script is located in Databricks workspace.\n* `GIT`: Script is located in cloud Git provider.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Source"
|
||||
},
|
||||
"training_script_path": {
|
||||
"description": "The training script file path to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"yaml_parameters": {
|
||||
"description": "Optional string containing model parameters passed to the training script in yaml format.\nIf present, then the content in yaml_parameters_file_path will be ignored.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"yaml_parameters_file_path": {
|
||||
"description": "Optional path to a YAML file containing model parameters passed to the training script.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"dl_runtime_image"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"jobs.GitProvider": {
|
||||
"oneOf": [
|
||||
{
|
||||
|
@ -4504,6 +4591,9 @@
|
|||
"description": "The task executes a nested task for every input provided when the `for_each_task` field is present.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask"
|
||||
},
|
||||
"gen_ai_compute_task": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.GenAiComputeTask"
|
||||
},
|
||||
"health": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRules"
|
||||
},
|
||||
|
@ -4775,6 +4865,7 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"description": "A tag for an experiment.",
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "The tag key.",
|
||||
|
@ -5850,6 +5941,10 @@
|
|||
"bedrock_provider": {
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case\ninsensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider"
|
||||
},
|
||||
"instance_profile_arn": {
|
||||
"description": "ARN of the instance profile that the external model will use to access AWS resources.\nYou must authenticate using an instance profile or access keys.\nIf you prefer to authenticate using access keys, see `aws_access_key_id`,\n`aws_access_key_id_plaintext`, `aws_secret_access_key` and `aws_secret_access_key_plaintext`.",
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
|
|
@ -58,8 +58,7 @@ func newCreate() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: array: custom_tags
|
||||
cmd.Flags().StringVar(&createReq.PolicyName, "policy-name", createReq.PolicyName, `The name of the policy.`)
|
||||
// TODO: complex arg: policy
|
||||
cmd.Flags().StringVar(&createReq.RequestId, "request-id", createReq.RequestId, `A unique identifier for this request.`)
|
||||
|
||||
cmd.Use = "create"
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/oauth2"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -64,8 +65,12 @@ func newCreate() *cobra.Command {
|
|||
cmd := &cobra.Command{}
|
||||
|
||||
var createReq oauth2.CreateServicePrincipalSecretRequest
|
||||
var createJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createReq.Lifetime, "lifetime", createReq.Lifetime, `The lifetime of the secret in seconds.`)
|
||||
|
||||
cmd.Use = "create SERVICE_PRINCIPAL_ID"
|
||||
cmd.Short = `Create service principal secret.`
|
||||
|
@ -88,6 +93,18 @@ func newCreate() *cobra.Command {
|
|||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
diags := createJson.Unmarshal(&createReq)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0])
|
||||
|
|
|
@ -81,6 +81,7 @@ func newCreate() *cobra.Command {
|
|||
cmd.Flags().BoolVar(&createReq.NoCompute, "no-compute", createReq.NoCompute, `If true, the app will not be started after creation.`)
|
||||
// TODO: complex arg: active_deployment
|
||||
// TODO: complex arg: app_status
|
||||
cmd.Flags().StringVar(&createReq.App.BudgetPolicyId, "budget-policy-id", createReq.App.BudgetPolicyId, ``)
|
||||
// TODO: complex arg: compute_status
|
||||
cmd.Flags().StringVar(&createReq.App.Description, "description", createReq.App.Description, `The description of the app.`)
|
||||
// TODO: complex arg: pending_deployment
|
||||
|
@ -938,6 +939,7 @@ func newUpdate() *cobra.Command {
|
|||
|
||||
// TODO: complex arg: active_deployment
|
||||
// TODO: complex arg: app_status
|
||||
cmd.Flags().StringVar(&updateReq.App.BudgetPolicyId, "budget-policy-id", updateReq.App.BudgetPolicyId, ``)
|
||||
// TODO: complex arg: compute_status
|
||||
cmd.Flags().StringVar(&updateReq.App.Description, "description", updateReq.App.Description, `The description of the app.`)
|
||||
// TODO: complex arg: pending_deployment
|
||||
|
|
|
@ -223,7 +223,7 @@ func newCreate() *cobra.Command {
|
|||
// TODO: complex arg: gcp_attributes
|
||||
// TODO: array: init_scripts
|
||||
cmd.Flags().StringVar(&createReq.InstancePoolId, "instance-pool-id", createReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`)
|
||||
cmd.Flags().BoolVar(&createReq.IsSingleNode, "is-single-node", createReq.IsSingleNode, `This field can only be used with kind.`)
|
||||
cmd.Flags().BoolVar(&createReq.IsSingleNode, "is-single-node", createReq.IsSingleNode, `This field can only be used when kind = CLASSIC_PREVIEW.`)
|
||||
cmd.Flags().Var(&createReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`)
|
||||
cmd.Flags().StringVar(&createReq.NodeTypeId, "node-type-id", createReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
||||
cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
||||
|
@ -233,7 +233,7 @@ func newCreate() *cobra.Command {
|
|||
// TODO: map via StringToStringVar: spark_conf
|
||||
// TODO: map via StringToStringVar: spark_env_vars
|
||||
// TODO: array: ssh_public_keys
|
||||
cmd.Flags().BoolVar(&createReq.UseMlRuntime, "use-ml-runtime", createReq.UseMlRuntime, `This field can only be used with kind.`)
|
||||
cmd.Flags().BoolVar(&createReq.UseMlRuntime, "use-ml-runtime", createReq.UseMlRuntime, `This field can only be used when kind = CLASSIC_PREVIEW.`)
|
||||
// TODO: complex arg: workload_type
|
||||
|
||||
cmd.Use = "create SPARK_VERSION"
|
||||
|
@ -493,7 +493,7 @@ func newEdit() *cobra.Command {
|
|||
// TODO: complex arg: gcp_attributes
|
||||
// TODO: array: init_scripts
|
||||
cmd.Flags().StringVar(&editReq.InstancePoolId, "instance-pool-id", editReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`)
|
||||
cmd.Flags().BoolVar(&editReq.IsSingleNode, "is-single-node", editReq.IsSingleNode, `This field can only be used with kind.`)
|
||||
cmd.Flags().BoolVar(&editReq.IsSingleNode, "is-single-node", editReq.IsSingleNode, `This field can only be used when kind = CLASSIC_PREVIEW.`)
|
||||
cmd.Flags().Var(&editReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`)
|
||||
cmd.Flags().StringVar(&editReq.NodeTypeId, "node-type-id", editReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
||||
cmd.Flags().IntVar(&editReq.NumWorkers, "num-workers", editReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
||||
|
@ -503,7 +503,7 @@ func newEdit() *cobra.Command {
|
|||
// TODO: map via StringToStringVar: spark_conf
|
||||
// TODO: map via StringToStringVar: spark_env_vars
|
||||
// TODO: array: ssh_public_keys
|
||||
cmd.Flags().BoolVar(&editReq.UseMlRuntime, "use-ml-runtime", editReq.UseMlRuntime, `This field can only be used with kind.`)
|
||||
cmd.Flags().BoolVar(&editReq.UseMlRuntime, "use-ml-runtime", editReq.UseMlRuntime, `This field can only be used when kind = CLASSIC_PREVIEW.`)
|
||||
// TODO: complex arg: workload_type
|
||||
|
||||
cmd.Use = "edit CLUSTER_ID SPARK_VERSION"
|
||||
|
|
|
@ -522,6 +522,7 @@ func newValidateCredential() *cobra.Command {
|
|||
// TODO: complex arg: aws_iam_role
|
||||
// TODO: complex arg: azure_managed_identity
|
||||
cmd.Flags().StringVar(&validateCredentialReq.CredentialName, "credential-name", validateCredentialReq.CredentialName, `Required.`)
|
||||
// TODO: complex arg: databricks_gcp_service_account
|
||||
cmd.Flags().StringVar(&validateCredentialReq.ExternalLocationName, "external-location-name", validateCredentialReq.ExternalLocationName, `The name of an existing external location to validate.`)
|
||||
cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE, STORAGE]`)
|
||||
cmd.Flags().BoolVar(&validateCredentialReq.ReadOnly, "read-only", validateCredentialReq.ReadOnly, `Whether the credential is only usable for read operations.`)
|
||||
|
|
|
@ -105,7 +105,7 @@ func newCreateExperiment() *cobra.Command {
|
|||
already exist and fails if another experiment with the same name already
|
||||
exists.
|
||||
|
||||
Throws RESOURCE_ALREADY_EXISTS if a experiment with the given name exists.
|
||||
Throws RESOURCE_ALREADY_EXISTS if an experiment with the given name exists.
|
||||
|
||||
Arguments:
|
||||
NAME: Experiment name.`
|
||||
|
@ -183,6 +183,7 @@ func newCreateRun() *cobra.Command {
|
|||
cmd.Flags().Var(&createRunJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&createRunReq.ExperimentId, "experiment-id", createRunReq.ExperimentId, `ID of the associated experiment.`)
|
||||
cmd.Flags().StringVar(&createRunReq.RunName, "run-name", createRunReq.RunName, `The name of the run.`)
|
||||
cmd.Flags().Int64Var(&createRunReq.StartTime, "start-time", createRunReq.StartTime, `Unix timestamp in milliseconds of when the run started.`)
|
||||
// TODO: array: tags
|
||||
cmd.Flags().StringVar(&createRunReq.UserId, "user-id", createRunReq.UserId, `ID of the user executing the run.`)
|
||||
|
@ -193,7 +194,7 @@ func newCreateRun() *cobra.Command {
|
|||
|
||||
Creates a new run within an experiment. A run is usually a single execution of
|
||||
a machine learning or data ETL pipeline. MLflow uses runs to track the
|
||||
mlflowParam, mlflowMetric and mlflowRunTag associated with a single
|
||||
mlflowParam, mlflowMetric, and mlflowRunTag associated with a single
|
||||
execution.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
@ -263,7 +264,7 @@ func newDeleteExperiment() *cobra.Command {
|
|||
cmd.Long = `Delete an experiment.
|
||||
|
||||
Marks an experiment and associated metadata, runs, metrics, params, and tags
|
||||
for deletion. If the experiment uses FileStore, artifacts associated with
|
||||
for deletion. If the experiment uses FileStore, artifacts associated with the
|
||||
experiment are also deleted.
|
||||
|
||||
Arguments:
|
||||
|
@ -431,7 +432,6 @@ func newDeleteRuns() *cobra.Command {
|
|||
Bulk delete runs in an experiment that were created prior to or at the
|
||||
specified timestamp. Deletes at most max_runs per request. To call this API
|
||||
from a Databricks Notebook in Python, you can use the client code snippet on
|
||||
https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete.
|
||||
|
||||
Arguments:
|
||||
EXPERIMENT_ID: The ID of the experiment containing the runs to delete.
|
||||
|
@ -518,8 +518,8 @@ func newDeleteTag() *cobra.Command {
|
|||
cmd.Flags().Var(&deleteTagJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "delete-tag RUN_ID KEY"
|
||||
cmd.Short = `Delete a tag.`
|
||||
cmd.Long = `Delete a tag.
|
||||
cmd.Short = `Delete a tag on a run.`
|
||||
cmd.Long = `Delete a tag on a run.
|
||||
|
||||
Deletes a tag on a run. Tags are run metadata that can be updated during a run
|
||||
and after a run completes.
|
||||
|
@ -602,8 +602,8 @@ func newGetByName() *cobra.Command {
|
|||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get-by-name EXPERIMENT_NAME"
|
||||
cmd.Short = `Get metadata.`
|
||||
cmd.Long = `Get metadata.
|
||||
cmd.Short = `Get an experiment by name.`
|
||||
cmd.Long = `Get an experiment by name.
|
||||
|
||||
Gets metadata for an experiment.
|
||||
|
||||
|
@ -731,8 +731,8 @@ func newGetHistory() *cobra.Command {
|
|||
cmd.Flags().StringVar(&getHistoryReq.RunUuid, "run-uuid", getHistoryReq.RunUuid, `[Deprecated, use run_id instead] ID of the run from which to fetch metric values.`)
|
||||
|
||||
cmd.Use = "get-history METRIC_KEY"
|
||||
cmd.Short = `Get history of a given metric within a run.`
|
||||
cmd.Long = `Get history of a given metric within a run.
|
||||
cmd.Short = `Get metric history for a run.`
|
||||
cmd.Long = `Get metric history for a run.
|
||||
|
||||
Gets a list of all values for the specified metric for a given run.
|
||||
|
||||
|
@ -973,12 +973,11 @@ func newListArtifacts() *cobra.Command {
|
|||
cmd.Flags().StringVar(&listArtifactsReq.RunUuid, "run-uuid", listArtifactsReq.RunUuid, `[Deprecated, use run_id instead] ID of the run whose artifacts to list.`)
|
||||
|
||||
cmd.Use = "list-artifacts"
|
||||
cmd.Short = `Get all artifacts.`
|
||||
cmd.Long = `Get all artifacts.
|
||||
cmd.Short = `List artifacts.`
|
||||
cmd.Long = `List artifacts.
|
||||
|
||||
List artifacts for a run. Takes an optional artifact_path prefix. If it is
|
||||
specified, the response contains only artifacts with the specified prefix.
|
||||
This API does not support pagination when listing artifacts in UC Volumes. A
|
||||
List artifacts for a run. Takes an optional artifact_path prefix which if
|
||||
specified, the response contains only artifacts with the specified prefix. A
|
||||
maximum of 1000 artifacts will be retrieved for UC Volumes. Please call
|
||||
/api/2.0/fs/directories{directory_path} for listing artifacts in UC Volumes,
|
||||
which supports pagination. See [List directory contents | Files
|
||||
|
@ -1028,9 +1027,9 @@ func newListExperiments() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listExperimentsReq.MaxResults, "max-results", listExperimentsReq.MaxResults, `Maximum number of experiments desired.`)
|
||||
cmd.Flags().Int64Var(&listExperimentsReq.MaxResults, "max-results", listExperimentsReq.MaxResults, `Maximum number of experiments desired.`)
|
||||
cmd.Flags().StringVar(&listExperimentsReq.PageToken, "page-token", listExperimentsReq.PageToken, `Token indicating the page of experiments to fetch.`)
|
||||
cmd.Flags().StringVar(&listExperimentsReq.ViewType, "view-type", listExperimentsReq.ViewType, `Qualifier for type of experiments to be returned.`)
|
||||
cmd.Flags().Var(&listExperimentsReq.ViewType, "view-type", `Qualifier for type of experiments to be returned. Supported values: [ACTIVE_ONLY, ALL, DELETED_ONLY]`)
|
||||
|
||||
cmd.Use = "list-experiments"
|
||||
cmd.Short = `List experiments.`
|
||||
|
@ -1090,8 +1089,8 @@ func newLogBatch() *cobra.Command {
|
|||
// TODO: array: tags
|
||||
|
||||
cmd.Use = "log-batch"
|
||||
cmd.Short = `Log a batch.`
|
||||
cmd.Long = `Log a batch.
|
||||
cmd.Short = `Log a batch of metrics/params/tags for a run.`
|
||||
cmd.Long = `Log a batch of metrics/params/tags for a run.
|
||||
|
||||
Logs a batch of metrics, params, and tags for a run. If any data failed to be
|
||||
persisted, the server will respond with an error (non-200 status code).
|
||||
|
@ -1120,8 +1119,13 @@ func newLogBatch() *cobra.Command {
|
|||
Request Limits ------------------------------- A single JSON-serialized API
|
||||
request may be up to 1 MB in size and contain:
|
||||
|
||||
* No more than 1000 metrics, params, and tags in total * Up to 1000 metrics *
|
||||
Up to 100 params * Up to 100 tags
|
||||
* No more than 1000 metrics, params, and tags in total
|
||||
|
||||
* Up to 1000 metrics
|
||||
|
||||
* Up to 100 params
|
||||
|
||||
* Up to 100 tags
|
||||
|
||||
For example, a valid request might contain 900 metrics, 50 params, and 50
|
||||
tags, but logging 900 metrics, 50 params, and 51 tags is invalid.
|
||||
|
@ -1129,6 +1133,7 @@ func newLogBatch() *cobra.Command {
|
|||
The following limits also apply to metric, param, and tag keys and values:
|
||||
|
||||
* Metric keys, param keys, and tag keys can be up to 250 characters in length
|
||||
|
||||
* Parameter and tag values can be up to 250 characters in length`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
@ -1194,19 +1199,30 @@ func newLogInputs() *cobra.Command {
|
|||
cmd.Flags().Var(&logInputsJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: array: datasets
|
||||
cmd.Flags().StringVar(&logInputsReq.RunId, "run-id", logInputsReq.RunId, `ID of the run to log under.`)
|
||||
|
||||
cmd.Use = "log-inputs"
|
||||
cmd.Use = "log-inputs RUN_ID"
|
||||
cmd.Short = `Log inputs to a run.`
|
||||
cmd.Long = `Log inputs to a run.
|
||||
|
||||
**NOTE:** Experimental: This API may change or be removed in a future release
|
||||
without warning.`
|
||||
without warning.
|
||||
|
||||
Logs inputs, such as datasets and models, to an MLflow Run.
|
||||
|
||||
Arguments:
|
||||
RUN_ID: ID of the run to log under`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(0)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
|
@ -1227,6 +1243,9 @@ func newLogInputs() *cobra.Command {
|
|||
}
|
||||
}
|
||||
}
|
||||
if !cmd.Flags().Changed("json") {
|
||||
logInputsReq.RunId = args[0]
|
||||
}
|
||||
|
||||
err = w.Experiments.LogInputs(ctx, logInputsReq)
|
||||
if err != nil {
|
||||
|
@ -1270,11 +1289,11 @@ func newLogMetric() *cobra.Command {
|
|||
cmd.Flags().Int64Var(&logMetricReq.Step, "step", logMetricReq.Step, `Step at which to log the metric.`)
|
||||
|
||||
cmd.Use = "log-metric KEY VALUE TIMESTAMP"
|
||||
cmd.Short = `Log a metric.`
|
||||
cmd.Long = `Log a metric.
|
||||
cmd.Short = `Log a metric for a run.`
|
||||
cmd.Long = `Log a metric for a run.
|
||||
|
||||
Logs a metric for a run. A metric is a key-value pair (string key, float
|
||||
value) with an associated timestamp. Examples include the various metrics that
|
||||
Log a metric for a run. A metric is a key-value pair (string key, float value)
|
||||
with an associated timestamp. Examples include the various metrics that
|
||||
represent ML model accuracy. A metric can be logged multiple times.
|
||||
|
||||
Arguments:
|
||||
|
@ -1442,8 +1461,8 @@ func newLogParam() *cobra.Command {
|
|||
cmd.Flags().StringVar(&logParamReq.RunUuid, "run-uuid", logParamReq.RunUuid, `[Deprecated, use run_id instead] ID of the run under which to log the param.`)
|
||||
|
||||
cmd.Use = "log-param KEY VALUE"
|
||||
cmd.Short = `Log a param.`
|
||||
cmd.Long = `Log a param.
|
||||
cmd.Short = `Log a param for a run.`
|
||||
cmd.Long = `Log a param for a run.
|
||||
|
||||
Logs a param used for a run. A param is a key-value pair (string key, string
|
||||
value). Examples include hyperparameters used for ML model training and
|
||||
|
@ -1530,8 +1549,8 @@ func newRestoreExperiment() *cobra.Command {
|
|||
cmd.Flags().Var(&restoreExperimentJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "restore-experiment EXPERIMENT_ID"
|
||||
cmd.Short = `Restores an experiment.`
|
||||
cmd.Long = `Restores an experiment.
|
||||
cmd.Short = `Restore an experiment.`
|
||||
cmd.Long = `Restore an experiment.
|
||||
|
||||
Restore an experiment marked for deletion. This also restores associated
|
||||
metadata, runs, metrics, params, and tags. If experiment uses FileStore,
|
||||
|
@ -1619,7 +1638,11 @@ func newRestoreRun() *cobra.Command {
|
|||
cmd.Short = `Restore a run.`
|
||||
cmd.Long = `Restore a run.
|
||||
|
||||
Restores a deleted run.
|
||||
Restores a deleted run. This also restores associated metadata, runs, metrics,
|
||||
params, and tags.
|
||||
|
||||
Throws RESOURCE_DOES_NOT_EXIST if the run was never created or was
|
||||
permanently deleted.
|
||||
|
||||
Arguments:
|
||||
RUN_ID: ID of the run to restore.`
|
||||
|
@ -1705,7 +1728,6 @@ func newRestoreRuns() *cobra.Command {
|
|||
Bulk restore runs in an experiment that were deleted no earlier than the
|
||||
specified timestamp. Restores at most max_runs per request. To call this API
|
||||
from a Databricks Notebook in Python, you can use the client code snippet on
|
||||
https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore.
|
||||
|
||||
Arguments:
|
||||
EXPERIMENT_ID: The ID of the experiment containing the runs to restore.
|
||||
|
@ -1875,7 +1897,7 @@ func newSearchRuns() *cobra.Command {
|
|||
|
||||
Searches for runs that satisfy expressions.
|
||||
|
||||
Search expressions can use mlflowMetric and mlflowParam keys.",`
|
||||
Search expressions can use mlflowMetric and mlflowParam keys.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -1937,18 +1959,16 @@ func newSetExperimentTag() *cobra.Command {
|
|||
cmd.Flags().Var(&setExperimentTagJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "set-experiment-tag EXPERIMENT_ID KEY VALUE"
|
||||
cmd.Short = `Set a tag.`
|
||||
cmd.Long = `Set a tag.
|
||||
cmd.Short = `Set a tag for an experiment.`
|
||||
cmd.Long = `Set a tag for an experiment.
|
||||
|
||||
Sets a tag on an experiment. Experiment tags are metadata that can be updated.
|
||||
|
||||
Arguments:
|
||||
EXPERIMENT_ID: ID of the experiment under which to log the tag. Must be provided.
|
||||
KEY: Name of the tag. Maximum size depends on storage backend. All storage
|
||||
backends are guaranteed to support key values up to 250 bytes in size.
|
||||
VALUE: String value of the tag being logged. Maximum size depends on storage
|
||||
backend. All storage backends are guaranteed to support key values up to
|
||||
5000 bytes in size.`
|
||||
KEY: Name of the tag. Keys up to 250 bytes in size are supported.
|
||||
VALUE: String value of the tag being logged. Values up to 64KB in size are
|
||||
supported.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -2108,18 +2128,16 @@ func newSetTag() *cobra.Command {
|
|||
cmd.Flags().StringVar(&setTagReq.RunUuid, "run-uuid", setTagReq.RunUuid, `[Deprecated, use run_id instead] ID of the run under which to log the tag.`)
|
||||
|
||||
cmd.Use = "set-tag KEY VALUE"
|
||||
cmd.Short = `Set a tag.`
|
||||
cmd.Long = `Set a tag.
|
||||
cmd.Short = `Set a tag for a run.`
|
||||
cmd.Long = `Set a tag for a run.
|
||||
|
||||
Sets a tag on a run. Tags are run metadata that can be updated during a run
|
||||
and after a run completes.
|
||||
|
||||
Arguments:
|
||||
KEY: Name of the tag. Maximum size depends on storage backend. All storage
|
||||
backends are guaranteed to support key values up to 250 bytes in size.
|
||||
VALUE: String value of the tag being logged. Maximum size depends on storage
|
||||
backend. All storage backends are guaranteed to support key values up to
|
||||
5000 bytes in size.`
|
||||
KEY: Name of the tag. Keys up to 250 bytes in size are supported.
|
||||
VALUE: String value of the tag being logged. Values up to 64KB in size are
|
||||
supported.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
|
@ -2356,6 +2374,7 @@ func newUpdateRun() *cobra.Command {
|
|||
|
||||
cmd.Flags().Int64Var(&updateRunReq.EndTime, "end-time", updateRunReq.EndTime, `Unix timestamp in milliseconds of when the run ended.`)
|
||||
cmd.Flags().StringVar(&updateRunReq.RunId, "run-id", updateRunReq.RunId, `ID of the run to update.`)
|
||||
cmd.Flags().StringVar(&updateRunReq.RunName, "run-name", updateRunReq.RunName, `Updated name of the run.`)
|
||||
cmd.Flags().StringVar(&updateRunReq.RunUuid, "run-uuid", updateRunReq.RunUuid, `[Deprecated, use run_id instead] ID of the run to update.`)
|
||||
cmd.Flags().Var(&updateRunReq.Status, "status", `Updated status of the run. Supported values: [FAILED, FINISHED, KILLED, RUNNING, SCHEDULED]`)
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ func New() *cobra.Command {
|
|||
cmd.AddCommand(newGetMessage())
|
||||
cmd.AddCommand(newGetMessageQueryResult())
|
||||
cmd.AddCommand(newGetMessageQueryResultByAttachment())
|
||||
cmd.AddCommand(newGetSpace())
|
||||
cmd.AddCommand(newStartConversation())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
|
@ -78,8 +79,9 @@ func newCreateMessage() *cobra.Command {
|
|||
cmd.Short = `Create conversation message.`
|
||||
cmd.Long = `Create conversation message.
|
||||
|
||||
Create new message in [conversation](:method:genie/startconversation). The AI
|
||||
response uses all previously created messages in the conversation to respond.
|
||||
Create new message in a [conversation](:method:genie/startconversation). The
|
||||
AI response uses all previously created messages in the conversation to
|
||||
respond.
|
||||
|
||||
Arguments:
|
||||
SPACE_ID: The ID associated with the Genie space where the conversation is started.
|
||||
|
@ -298,8 +300,8 @@ func newGetMessageQueryResult() *cobra.Command {
|
|||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get-message-query-result SPACE_ID CONVERSATION_ID MESSAGE_ID"
|
||||
cmd.Short = `Get conversation message SQL query result.`
|
||||
cmd.Long = `Get conversation message SQL query result.
|
||||
cmd.Short = `[Deprecated] Get conversation message SQL query result.`
|
||||
cmd.Long = `[Deprecated] Get conversation message SQL query result.
|
||||
|
||||
Get the result of SQL query if the message has a query attachment. This is
|
||||
only available if a message has a query attachment and the message status is
|
||||
|
@ -362,11 +364,12 @@ func newGetMessageQueryResultByAttachment() *cobra.Command {
|
|||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get-message-query-result-by-attachment SPACE_ID CONVERSATION_ID MESSAGE_ID ATTACHMENT_ID"
|
||||
cmd.Short = `Get conversation message SQL query result by attachment id.`
|
||||
cmd.Long = `Get conversation message SQL query result by attachment id.
|
||||
cmd.Short = `Get conversation message SQL query result.`
|
||||
cmd.Long = `Get conversation message SQL query result.
|
||||
|
||||
Get the result of SQL query by attachment id This is only available if a
|
||||
message has a query attachment and the message status is EXECUTING_QUERY.
|
||||
Get the result of SQL query if the message has a query attachment. This is
|
||||
only available if a message has a query attachment and the message status is
|
||||
EXECUTING_QUERY OR COMPLETED.
|
||||
|
||||
Arguments:
|
||||
SPACE_ID: Genie space ID
|
||||
|
@ -410,6 +413,64 @@ func newGetMessageQueryResultByAttachment() *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
// start get-space command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getSpaceOverrides []func(
|
||||
*cobra.Command,
|
||||
*dashboards.GenieGetSpaceRequest,
|
||||
)
|
||||
|
||||
func newGetSpace() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getSpaceReq dashboards.GenieGetSpaceRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "get-space SPACE_ID"
|
||||
cmd.Short = `Get details of a Genie Space.`
|
||||
cmd.Long = `Get details of a Genie Space.
|
||||
|
||||
Get a Genie Space.
|
||||
|
||||
Arguments:
|
||||
SPACE_ID: The ID associated with the Genie space`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
getSpaceReq.SpaceId = args[0]
|
||||
|
||||
response, err := w.Genie.GetSpace(ctx, getSpaceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getSpaceOverrides {
|
||||
fn(cmd, &getSpaceReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start start-conversation command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
|
|
|
@ -34,6 +34,7 @@ func New() *cobra.Command {
|
|||
cmd.AddCommand(newDelete())
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newList())
|
||||
cmd.AddCommand(newListProviderShareAssets())
|
||||
cmd.AddCommand(newListShares())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
|
@ -337,6 +338,72 @@ func newList() *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
// start list-provider-share-assets command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var listProviderShareAssetsOverrides []func(
|
||||
*cobra.Command,
|
||||
*sharing.ListProviderShareAssetsRequest,
|
||||
)
|
||||
|
||||
func newListProviderShareAssets() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var listProviderShareAssetsReq sharing.ListProviderShareAssetsRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listProviderShareAssetsReq.FunctionMaxResults, "function-max-results", listProviderShareAssetsReq.FunctionMaxResults, `Maximum number of functions to return.`)
|
||||
cmd.Flags().IntVar(&listProviderShareAssetsReq.NotebookMaxResults, "notebook-max-results", listProviderShareAssetsReq.NotebookMaxResults, `Maximum number of notebooks to return.`)
|
||||
cmd.Flags().IntVar(&listProviderShareAssetsReq.TableMaxResults, "table-max-results", listProviderShareAssetsReq.TableMaxResults, `Maximum number of tables to return.`)
|
||||
cmd.Flags().IntVar(&listProviderShareAssetsReq.VolumeMaxResults, "volume-max-results", listProviderShareAssetsReq.VolumeMaxResults, `Maximum number of volumes to return.`)
|
||||
|
||||
cmd.Use = "list-provider-share-assets PROVIDER_NAME SHARE_NAME"
|
||||
cmd.Short = `List assets by provider share.`
|
||||
cmd.Long = `List assets by provider share.
|
||||
|
||||
Get arrays of assets associated with a specified provider's share. The caller
|
||||
is the recipient of the share.
|
||||
|
||||
Arguments:
|
||||
PROVIDER_NAME: The name of the provider who owns the share.
|
||||
SHARE_NAME: The name of the share.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(2)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
listProviderShareAssetsReq.ProviderName = args[0]
|
||||
listProviderShareAssetsReq.ShareName = args[1]
|
||||
|
||||
response, err := w.Providers.ListProviderShareAssets(ctx, listProviderShareAssetsReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range listProviderShareAssetsOverrides {
|
||||
fn(cmd, &listProviderShareAssetsReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start list-shares command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
|
|
|
@ -484,8 +484,6 @@ func newUpdatePermissions() *cobra.Command {
|
|||
cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: array: changes
|
||||
cmd.Flags().IntVar(&updatePermissionsReq.MaxResults, "max-results", updatePermissionsReq.MaxResults, `Maximum number of permissions to return.`)
|
||||
cmd.Flags().StringVar(&updatePermissionsReq.PageToken, "page-token", updatePermissionsReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
|
||||
|
||||
cmd.Use = "update-permissions NAME"
|
||||
cmd.Short = `Update permissions.`
|
||||
|
@ -494,8 +492,8 @@ func newUpdatePermissions() *cobra.Command {
|
|||
Updates the permissions for a data share in the metastore. The caller must be
|
||||
a metastore admin or an owner of the share.
|
||||
|
||||
For new recipient grants, the user must also be the owner of the recipients.
|
||||
recipient revocations do not require additional privileges.
|
||||
For new recipient grants, the user must also be the recipient owner or
|
||||
metastore admin. recipient revocations do not require additional privileges.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the share.`
|
||||
|
@ -526,11 +524,11 @@ func newUpdatePermissions() *cobra.Command {
|
|||
}
|
||||
updatePermissionsReq.Name = args[0]
|
||||
|
||||
err = w.Shares.UpdatePermissions(ctx, updatePermissionsReq)
|
||||
response, err := w.Shares.UpdatePermissions(ctx, updatePermissionsReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
|
|
|
@ -426,6 +426,7 @@ func newQueryIndex() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&queryIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: array: columns_to_rerank
|
||||
cmd.Flags().StringVar(&queryIndexReq.FiltersJson, "filters-json", queryIndexReq.FiltersJson, `JSON string representing query filters.`)
|
||||
cmd.Flags().IntVar(&queryIndexReq.NumResults, "num-results", queryIndexReq.NumResults, `Number of results to return.`)
|
||||
cmd.Flags().StringVar(&queryIndexReq.QueryText, "query-text", queryIndexReq.QueryText, `Query text.`)
|
||||
|
|
|
@ -177,6 +177,7 @@ func newExport() *cobra.Command {
|
|||
DBC,
|
||||
HTML,
|
||||
JUPYTER,
|
||||
RAW,
|
||||
R_MARKDOWN,
|
||||
SOURCE,
|
||||
]`)
|
||||
|
@ -539,7 +540,7 @@ func newList() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().IntVar(&listReq.NotebooksModifiedAfter, "notebooks-modified-after", listReq.NotebooksModifiedAfter, `UTC timestamp in milliseconds.`)
|
||||
cmd.Flags().Int64Var(&listReq.NotebooksModifiedAfter, "notebooks-modified-after", listReq.NotebooksModifiedAfter, `UTC timestamp in milliseconds.`)
|
||||
|
||||
cmd.Use = "list PATH"
|
||||
cmd.Short = `List contents.`
|
||||
|
|
2
go.mod
2
go.mod
|
@ -9,7 +9,7 @@ require (
|
|||
github.com/BurntSushi/toml v1.4.0 // MIT
|
||||
github.com/Masterminds/semver/v3 v3.3.1 // MIT
|
||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.58.1 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.59.0 // Apache 2.0
|
||||
github.com/fatih/color v1.18.0 // MIT
|
||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||
github.com/gorilla/mux v1.8.1 // BSD 3-Clause
|
||||
|
|
|
@ -34,8 +34,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
|
||||
github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/databricks/databricks-sdk-go v0.58.1 h1:dUs9ZmFi7hYiL3NwLSAbxqQu66E3BzwM8EU/wcCTJ10=
|
||||
github.com/databricks/databricks-sdk-go v0.58.1/go.mod h1:JpLizplEs+up9/Z4Xf2x++o3sM9eTTWFGzIXAptKJzI=
|
||||
github.com/databricks/databricks-sdk-go v0.59.0 h1:m87rbnoeO7A6+QKo4QzwyPE5AzEeGvopEaavn3F5y/o=
|
||||
github.com/databricks/databricks-sdk-go v0.59.0/go.mod h1:JpLizplEs+up9/Z4Xf2x++o3sM9eTTWFGzIXAptKJzI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
|
@ -257,8 +257,9 @@ func (w *FilesClient) deleteDirectory(ctx context.Context, name string) error {
|
|||
// The directory delete API returns a 400 if the directory is not empty
|
||||
if aerr.StatusCode == http.StatusBadRequest {
|
||||
reasons := []string{}
|
||||
for _, detail := range aerr.Details {
|
||||
reasons = append(reasons, detail.Reason)
|
||||
details := aerr.ErrorDetails()
|
||||
if details.ErrorInfo != nil {
|
||||
reasons = append(reasons, details.ErrorInfo.Reason)
|
||||
}
|
||||
// Error code 400 is generic and can be returned for other reasons. Make
|
||||
// sure one of the reasons for the error is that the directory is not empty.
|
||||
|
|
|
@ -91,6 +91,7 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri
|
|||
})
|
||||
|
||||
b.Tagging = tags.ForCloud(w.Config)
|
||||
b.SetWorkpaceClient(w)
|
||||
b.WorkspaceClient()
|
||||
|
||||
diags = phases.Initialize(ctx, b)
|
||||
|
|
Loading…
Reference in New Issue