mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into bundle-exec
This commit is contained in:
commit
de5a348b4e
|
@ -1 +1 @@
|
||||||
99f644e72261ef5ecf8d74db20f4b7a1e09723cc
|
e5c870006a536121442cfd2441bdc8a5fb76ae1e
|
|
@ -17,5 +17,8 @@
|
||||||
"python.envFile": "${workspaceRoot}/.env",
|
"python.envFile": "${workspaceRoot}/.env",
|
||||||
"python.analysis.stubPath": ".vscode",
|
"python.analysis.stubPath": ".vscode",
|
||||||
"jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\<codecell\\>|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])",
|
"jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\<codecell\\>|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])",
|
||||||
"jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------"
|
"jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------",
|
||||||
|
"files.associations": {
|
||||||
|
"script": "shellscript"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ Usage:
|
||||||
databricks apps update NAME [flags]
|
databricks apps update NAME [flags]
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
|
--budget-policy-id string
|
||||||
--description string The description of the app.
|
--description string The description of the app.
|
||||||
-h, --help help for update
|
-h, --help help for update
|
||||||
--json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes))
|
--json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes))
|
||||||
|
|
|
@ -2,7 +2,7 @@ terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
databricks = {
|
databricks = {
|
||||||
source = "databricks/databricks"
|
source = "databricks/databricks"
|
||||||
version = "1.65.1"
|
version = "1.68.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,9 +4,9 @@
|
||||||
Initializing the backend...
|
Initializing the backend...
|
||||||
|
|
||||||
Initializing provider plugins...
|
Initializing provider plugins...
|
||||||
- Finding databricks/databricks versions matching "1.65.1"...
|
- Finding databricks/databricks versions matching "1.68.0"...
|
||||||
- Installing databricks/databricks v1.65.1...
|
- Installing databricks/databricks v1.68.0...
|
||||||
- Installed databricks/databricks v1.65.1 (unauthenticated)
|
- Installed databricks/databricks v1.68.0 (unauthenticated)
|
||||||
|
|
||||||
Terraform has created a lock file .terraform.lock.hcl to record the provider
|
Terraform has created a lock file .terraform.lock.hcl to record the provider
|
||||||
selections it made above. Include this file in your version control repository
|
selections it made above. Include this file in your version control repository
|
||||||
|
|
|
@ -5,6 +5,7 @@ github.com/databricks/cli/bundle/config/resources.App:
|
||||||
The active deployment of the app. A deployment is considered active when it has been deployed
|
The active deployment of the app. A deployment is considered active when it has been deployed
|
||||||
to the app compute.
|
to the app compute.
|
||||||
"app_status": {}
|
"app_status": {}
|
||||||
|
"budget_policy_id": {}
|
||||||
"compute_status": {}
|
"compute_status": {}
|
||||||
"create_time":
|
"create_time":
|
||||||
"description": |-
|
"description": |-
|
||||||
|
@ -19,6 +20,7 @@ github.com/databricks/cli/bundle/config/resources.App:
|
||||||
"description":
|
"description":
|
||||||
"description": |-
|
"description": |-
|
||||||
The description of the app.
|
The description of the app.
|
||||||
|
"effective_budget_policy_id": {}
|
||||||
"id":
|
"id":
|
||||||
"description": |-
|
"description": |-
|
||||||
The unique identifier of the app.
|
The unique identifier of the app.
|
||||||
|
@ -118,7 +120,7 @@ github.com/databricks/cli/bundle/config/resources.Cluster:
|
||||||
The optional ID of the instance pool to which the cluster belongs.
|
The optional ID of the instance pool to which the cluster belongs.
|
||||||
"is_single_node":
|
"is_single_node":
|
||||||
"description": |
|
"description": |
|
||||||
This field can only be used with `kind`.
|
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
||||||
|
|
||||||
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`
|
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`
|
||||||
"kind": {}
|
"kind": {}
|
||||||
|
@ -175,7 +177,7 @@ github.com/databricks/cli/bundle/config/resources.Cluster:
|
||||||
Up to 10 keys can be specified.
|
Up to 10 keys can be specified.
|
||||||
"use_ml_runtime":
|
"use_ml_runtime":
|
||||||
"description": |
|
"description": |
|
||||||
This field can only be used with `kind`.
|
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
||||||
|
|
||||||
`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
||||||
"workload_type": {}
|
"workload_type": {}
|
||||||
|
@ -311,6 +313,9 @@ github.com/databricks/cli/bundle/config/resources.Job:
|
||||||
"description": |-
|
"description": |-
|
||||||
A collection of system notification IDs to notify when runs of this job begin or complete.
|
A collection of system notification IDs to notify when runs of this job begin or complete.
|
||||||
github.com/databricks/cli/bundle/config/resources.MlflowExperiment:
|
github.com/databricks/cli/bundle/config/resources.MlflowExperiment:
|
||||||
|
"_":
|
||||||
|
"description": |-
|
||||||
|
An experiment and its metadata.
|
||||||
"artifact_location":
|
"artifact_location":
|
||||||
"description": |-
|
"description": |-
|
||||||
Location where artifacts for the experiment are stored.
|
Location where artifacts for the experiment are stored.
|
||||||
|
@ -1089,7 +1094,7 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec:
|
||||||
The optional ID of the instance pool to which the cluster belongs.
|
The optional ID of the instance pool to which the cluster belongs.
|
||||||
"is_single_node":
|
"is_single_node":
|
||||||
"description": |
|
"description": |
|
||||||
This field can only be used with `kind`.
|
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
||||||
|
|
||||||
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`
|
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`
|
||||||
"kind": {}
|
"kind": {}
|
||||||
|
@ -1146,7 +1151,7 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec:
|
||||||
Up to 10 keys can be specified.
|
Up to 10 keys can be specified.
|
||||||
"use_ml_runtime":
|
"use_ml_runtime":
|
||||||
"description": |
|
"description": |
|
||||||
This field can only be used with `kind`.
|
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
||||||
|
|
||||||
`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
||||||
"workload_type": {}
|
"workload_type": {}
|
||||||
|
@ -1156,7 +1161,7 @@ github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode:
|
||||||
Data security mode decides what data governance model to use when accessing data
|
Data security mode decides what data governance model to use when accessing data
|
||||||
from a cluster.
|
from a cluster.
|
||||||
|
|
||||||
The following modes can only be used with `kind`.
|
The following modes can only be used when `kind = CLASSIC_PREVIEW`.
|
||||||
* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
|
* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
|
||||||
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.
|
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.
|
||||||
* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.
|
* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.
|
||||||
|
@ -1465,6 +1470,19 @@ github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask:
|
||||||
"notebook_name":
|
"notebook_name":
|
||||||
"description": |-
|
"description": |-
|
||||||
Name of the notebook being run.
|
Name of the notebook being run.
|
||||||
|
github.com/databricks/databricks-sdk-go/service/jobs.ComputeConfig:
|
||||||
|
"_":
|
||||||
|
"description": |-
|
||||||
|
Next field: 4
|
||||||
|
"gpu_node_pool_id":
|
||||||
|
"description": |-
|
||||||
|
IDof the GPU pool to use.
|
||||||
|
"gpu_type":
|
||||||
|
"description": |-
|
||||||
|
GPU type.
|
||||||
|
"num_gpus":
|
||||||
|
"description": |-
|
||||||
|
Number of GPUs.
|
||||||
github.com/databricks/databricks-sdk-go/service/jobs.Condition:
|
github.com/databricks/databricks-sdk-go/service/jobs.Condition:
|
||||||
"_":
|
"_":
|
||||||
"enum":
|
"enum":
|
||||||
|
@ -1579,6 +1597,37 @@ github.com/databricks/databricks-sdk-go/service/jobs.Format:
|
||||||
SINGLE_TASK
|
SINGLE_TASK
|
||||||
- |-
|
- |-
|
||||||
MULTI_TASK
|
MULTI_TASK
|
||||||
|
github.com/databricks/databricks-sdk-go/service/jobs.GenAiComputeTask:
|
||||||
|
"_":
|
||||||
|
"description": |-
|
||||||
|
Next field: 9
|
||||||
|
"command":
|
||||||
|
"description": |-
|
||||||
|
Command launcher to run the actual script, e.g. bash, python etc.
|
||||||
|
"compute": {}
|
||||||
|
"dl_runtime_image":
|
||||||
|
"description": |-
|
||||||
|
Runtime image
|
||||||
|
"mlflow_experiment_name":
|
||||||
|
"description": |-
|
||||||
|
Optional string containing the name of the MLflow experiment to log the run to. If name is not
|
||||||
|
found, backend will create the mlflow experiment using the name.
|
||||||
|
"source":
|
||||||
|
"description": |-
|
||||||
|
Optional location type of the training script. When set to `WORKSPACE`, the script will be retrieved from the local Databricks workspace. When set to `GIT`, the script will be retrieved from a Git repository
|
||||||
|
defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.
|
||||||
|
* `WORKSPACE`: Script is located in Databricks workspace.
|
||||||
|
* `GIT`: Script is located in cloud Git provider.
|
||||||
|
"training_script_path":
|
||||||
|
"description": |-
|
||||||
|
The training script file path to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required.
|
||||||
|
"yaml_parameters":
|
||||||
|
"description": |-
|
||||||
|
Optional string containing model parameters passed to the training script in yaml format.
|
||||||
|
If present, then the content in yaml_parameters_file_path will be ignored.
|
||||||
|
"yaml_parameters_file_path":
|
||||||
|
"description": |-
|
||||||
|
Optional path to a YAML file containing model parameters passed to the training script.
|
||||||
github.com/databricks/databricks-sdk-go/service/jobs.GitProvider:
|
github.com/databricks/databricks-sdk-go/service/jobs.GitProvider:
|
||||||
"_":
|
"_":
|
||||||
"enum":
|
"enum":
|
||||||
|
@ -2144,6 +2193,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.Task:
|
||||||
"for_each_task":
|
"for_each_task":
|
||||||
"description": |-
|
"description": |-
|
||||||
The task executes a nested task for every input provided when the `for_each_task` field is present.
|
The task executes a nested task for every input provided when the `for_each_task` field is present.
|
||||||
|
"gen_ai_compute_task": {}
|
||||||
"health": {}
|
"health": {}
|
||||||
"job_cluster_key":
|
"job_cluster_key":
|
||||||
"description": |-
|
"description": |-
|
||||||
|
@ -2296,6 +2346,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.WebhookNotifications:
|
||||||
"description": |-
|
"description": |-
|
||||||
An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.
|
An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.
|
||||||
github.com/databricks/databricks-sdk-go/service/ml.ExperimentTag:
|
github.com/databricks/databricks-sdk-go/service/ml.ExperimentTag:
|
||||||
|
"_":
|
||||||
|
"description": |-
|
||||||
|
A tag for an experiment.
|
||||||
"key":
|
"key":
|
||||||
"description": |-
|
"description": |-
|
||||||
The tag key.
|
The tag key.
|
||||||
|
@ -2864,6 +2917,12 @@ github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfig:
|
||||||
"description": |-
|
"description": |-
|
||||||
The underlying provider in Amazon Bedrock. Supported values (case
|
The underlying provider in Amazon Bedrock. Supported values (case
|
||||||
insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.
|
insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.
|
||||||
|
"instance_profile_arn":
|
||||||
|
"description": |-
|
||||||
|
ARN of the instance profile that the external model will use to access AWS resources.
|
||||||
|
You must authenticate using an instance profile or access keys.
|
||||||
|
If you prefer to authenticate using access keys, see `aws_access_key_id`,
|
||||||
|
`aws_access_key_id_plaintext`, `aws_secret_access_key` and `aws_secret_access_key_plaintext`.
|
||||||
github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider:
|
github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider:
|
||||||
"_":
|
"_":
|
||||||
"enum":
|
"enum":
|
||||||
|
|
|
@ -5,6 +5,9 @@ github.com/databricks/cli/bundle/config/resources.App:
|
||||||
"app_status":
|
"app_status":
|
||||||
"description": |-
|
"description": |-
|
||||||
PLACEHOLDER
|
PLACEHOLDER
|
||||||
|
"budget_policy_id":
|
||||||
|
"description": |-
|
||||||
|
PLACEHOLDER
|
||||||
"compute_status":
|
"compute_status":
|
||||||
"description": |-
|
"description": |-
|
||||||
PLACEHOLDER
|
PLACEHOLDER
|
||||||
|
@ -23,6 +26,9 @@ github.com/databricks/cli/bundle/config/resources.App:
|
||||||
"description":
|
"description":
|
||||||
"description": |-
|
"description": |-
|
||||||
PLACEHOLDER
|
PLACEHOLDER
|
||||||
|
"effective_budget_policy_id":
|
||||||
|
"description": |-
|
||||||
|
PLACEHOLDER
|
||||||
"name":
|
"name":
|
||||||
"description": |-
|
"description": |-
|
||||||
PLACEHOLDER
|
PLACEHOLDER
|
||||||
|
@ -506,6 +512,10 @@ github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes:
|
||||||
"availability":
|
"availability":
|
||||||
"description": |-
|
"description": |-
|
||||||
PLACEHOLDER
|
PLACEHOLDER
|
||||||
|
github.com/databricks/databricks-sdk-go/service/jobs.GenAiComputeTask:
|
||||||
|
"compute":
|
||||||
|
"description": |-
|
||||||
|
PLACEHOLDER
|
||||||
github.com/databricks/databricks-sdk-go/service/jobs.GitSource:
|
github.com/databricks/databricks-sdk-go/service/jobs.GitSource:
|
||||||
"git_snapshot":
|
"git_snapshot":
|
||||||
"description": |-
|
"description": |-
|
||||||
|
@ -530,6 +540,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask:
|
||||||
"description": |-
|
"description": |-
|
||||||
PLACEHOLDER
|
PLACEHOLDER
|
||||||
github.com/databricks/databricks-sdk-go/service/jobs.Task:
|
github.com/databricks/databricks-sdk-go/service/jobs.Task:
|
||||||
|
"gen_ai_compute_task":
|
||||||
|
"description": |-
|
||||||
|
PLACEHOLDER
|
||||||
"health":
|
"health":
|
||||||
"description": |-
|
"description": |-
|
||||||
PLACEHOLDER
|
PLACEHOLDER
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
const ProviderVersion = "1.65.1"
|
const ProviderVersion = "1.68.0"
|
||||||
|
|
|
@ -90,6 +90,7 @@ type DataSourceAppApp struct {
|
||||||
Creator string `json:"creator,omitempty"`
|
Creator string `json:"creator,omitempty"`
|
||||||
DefaultSourceCodePath string `json:"default_source_code_path,omitempty"`
|
DefaultSourceCodePath string `json:"default_source_code_path,omitempty"`
|
||||||
Description string `json:"description,omitempty"`
|
Description string `json:"description,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
PendingDeployment *DataSourceAppAppPendingDeployment `json:"pending_deployment,omitempty"`
|
PendingDeployment *DataSourceAppAppPendingDeployment `json:"pending_deployment,omitempty"`
|
||||||
Resources []DataSourceAppAppResources `json:"resources,omitempty"`
|
Resources []DataSourceAppAppResources `json:"resources,omitempty"`
|
||||||
|
|
|
@ -90,6 +90,7 @@ type DataSourceAppsApp struct {
|
||||||
Creator string `json:"creator,omitempty"`
|
Creator string `json:"creator,omitempty"`
|
||||||
DefaultSourceCodePath string `json:"default_source_code_path,omitempty"`
|
DefaultSourceCodePath string `json:"default_source_code_path,omitempty"`
|
||||||
Description string `json:"description,omitempty"`
|
Description string `json:"description,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
PendingDeployment *DataSourceAppsAppPendingDeployment `json:"pending_deployment,omitempty"`
|
PendingDeployment *DataSourceAppsAppPendingDeployment `json:"pending_deployment,omitempty"`
|
||||||
Resources []DataSourceAppsAppResources `json:"resources,omitempty"`
|
Resources []DataSourceAppsAppResources `json:"resources,omitempty"`
|
||||||
|
|
|
@ -46,9 +46,14 @@ type DataSourceClusterClusterInfoClusterLogConfS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceClusterClusterInfoClusterLogConfVolumes struct {
|
||||||
|
Destination string `json:"destination"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceClusterClusterInfoClusterLogConf struct {
|
type DataSourceClusterClusterInfoClusterLogConf struct {
|
||||||
Dbfs *DataSourceClusterClusterInfoClusterLogConfDbfs `json:"dbfs,omitempty"`
|
Dbfs *DataSourceClusterClusterInfoClusterLogConfDbfs `json:"dbfs,omitempty"`
|
||||||
S3 *DataSourceClusterClusterInfoClusterLogConfS3 `json:"s3,omitempty"`
|
S3 *DataSourceClusterClusterInfoClusterLogConfS3 `json:"s3,omitempty"`
|
||||||
|
Volumes *DataSourceClusterClusterInfoClusterLogConfVolumes `json:"volumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSourceClusterClusterInfoClusterLogStatus struct {
|
type DataSourceClusterClusterInfoClusterLogStatus struct {
|
||||||
|
@ -191,9 +196,14 @@ type DataSourceClusterClusterInfoSpecClusterLogConfS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceClusterClusterInfoSpecClusterLogConfVolumes struct {
|
||||||
|
Destination string `json:"destination"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceClusterClusterInfoSpecClusterLogConf struct {
|
type DataSourceClusterClusterInfoSpecClusterLogConf struct {
|
||||||
Dbfs *DataSourceClusterClusterInfoSpecClusterLogConfDbfs `json:"dbfs,omitempty"`
|
Dbfs *DataSourceClusterClusterInfoSpecClusterLogConfDbfs `json:"dbfs,omitempty"`
|
||||||
S3 *DataSourceClusterClusterInfoSpecClusterLogConfS3 `json:"s3,omitempty"`
|
S3 *DataSourceClusterClusterInfoSpecClusterLogConfS3 `json:"s3,omitempty"`
|
||||||
|
Volumes *DataSourceClusterClusterInfoSpecClusterLogConfVolumes `json:"volumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSourceClusterClusterInfoSpecClusterMountInfoNetworkFilesystemInfo struct {
|
type DataSourceClusterClusterInfoSpecClusterMountInfoNetworkFilesystemInfo struct {
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
type DataSourceDashboardsDashboards struct {
|
||||||
|
CreateTime string `json:"create_time,omitempty"`
|
||||||
|
DashboardId string `json:"dashboard_id,omitempty"`
|
||||||
|
DisplayName string `json:"display_name,omitempty"`
|
||||||
|
Etag string `json:"etag,omitempty"`
|
||||||
|
LifecycleState string `json:"lifecycle_state,omitempty"`
|
||||||
|
ParentPath string `json:"parent_path,omitempty"`
|
||||||
|
Path string `json:"path,omitempty"`
|
||||||
|
SerializedDashboard string `json:"serialized_dashboard,omitempty"`
|
||||||
|
UpdateTime string `json:"update_time,omitempty"`
|
||||||
|
WarehouseId string `json:"warehouse_id,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataSourceDashboards struct {
|
||||||
|
DashboardNameContains string `json:"dashboard_name_contains,omitempty"`
|
||||||
|
Dashboards []DataSourceDashboardsDashboards `json:"dashboards,omitempty"`
|
||||||
|
}
|
|
@ -18,6 +18,7 @@ type DataSources struct {
|
||||||
CurrentConfig map[string]any `json:"databricks_current_config,omitempty"`
|
CurrentConfig map[string]any `json:"databricks_current_config,omitempty"`
|
||||||
CurrentMetastore map[string]any `json:"databricks_current_metastore,omitempty"`
|
CurrentMetastore map[string]any `json:"databricks_current_metastore,omitempty"`
|
||||||
CurrentUser map[string]any `json:"databricks_current_user,omitempty"`
|
CurrentUser map[string]any `json:"databricks_current_user,omitempty"`
|
||||||
|
Dashboards map[string]any `json:"databricks_dashboards,omitempty"`
|
||||||
DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"`
|
DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"`
|
||||||
DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"`
|
DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"`
|
||||||
Directory map[string]any `json:"databricks_directory,omitempty"`
|
Directory map[string]any `json:"databricks_directory,omitempty"`
|
||||||
|
@ -83,6 +84,7 @@ func NewDataSources() *DataSources {
|
||||||
CurrentConfig: make(map[string]any),
|
CurrentConfig: make(map[string]any),
|
||||||
CurrentMetastore: make(map[string]any),
|
CurrentMetastore: make(map[string]any),
|
||||||
CurrentUser: make(map[string]any),
|
CurrentUser: make(map[string]any),
|
||||||
|
Dashboards: make(map[string]any),
|
||||||
DbfsFile: make(map[string]any),
|
DbfsFile: make(map[string]any),
|
||||||
DbfsFilePaths: make(map[string]any),
|
DbfsFilePaths: make(map[string]any),
|
||||||
Directory: make(map[string]any),
|
Directory: make(map[string]any),
|
||||||
|
|
|
@ -90,6 +90,7 @@ type ResourceApp struct {
|
||||||
Creator string `json:"creator,omitempty"`
|
Creator string `json:"creator,omitempty"`
|
||||||
DefaultSourceCodePath string `json:"default_source_code_path,omitempty"`
|
DefaultSourceCodePath string `json:"default_source_code_path,omitempty"`
|
||||||
Description string `json:"description,omitempty"`
|
Description string `json:"description,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
NoCompute bool `json:"no_compute,omitempty"`
|
NoCompute bool `json:"no_compute,omitempty"`
|
||||||
PendingDeployment *ResourceAppPendingDeployment `json:"pending_deployment,omitempty"`
|
PendingDeployment *ResourceAppPendingDeployment `json:"pending_deployment,omitempty"`
|
||||||
|
|
|
@ -46,9 +46,14 @@ type ResourceClusterClusterLogConfS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceClusterClusterLogConfVolumes struct {
|
||||||
|
Destination string `json:"destination"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceClusterClusterLogConf struct {
|
type ResourceClusterClusterLogConf struct {
|
||||||
Dbfs *ResourceClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourceClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
||||||
S3 *ResourceClusterClusterLogConfS3 `json:"s3,omitempty"`
|
S3 *ResourceClusterClusterLogConfS3 `json:"s3,omitempty"`
|
||||||
|
Volumes *ResourceClusterClusterLogConfVolumes `json:"volumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceClusterClusterMountInfoNetworkFilesystemInfo struct {
|
type ResourceClusterClusterMountInfoNetworkFilesystemInfo struct {
|
||||||
|
|
|
@ -4,12 +4,22 @@ package schema
|
||||||
|
|
||||||
type ResourceConnection struct {
|
type ResourceConnection struct {
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
ConnectionType string `json:"connection_type"`
|
ConnectionId string `json:"connection_id,omitempty"`
|
||||||
|
ConnectionType string `json:"connection_type,omitempty"`
|
||||||
|
CreatedAt int `json:"created_at,omitempty"`
|
||||||
|
CreatedBy string `json:"created_by,omitempty"`
|
||||||
|
CredentialType string `json:"credential_type,omitempty"`
|
||||||
|
FullName string `json:"full_name,omitempty"`
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
MetastoreId string `json:"metastore_id,omitempty"`
|
MetastoreId string `json:"metastore_id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name,omitempty"`
|
||||||
Options map[string]string `json:"options"`
|
Options map[string]string `json:"options,omitempty"`
|
||||||
Owner string `json:"owner,omitempty"`
|
Owner string `json:"owner,omitempty"`
|
||||||
Properties map[string]string `json:"properties,omitempty"`
|
Properties map[string]string `json:"properties,omitempty"`
|
||||||
|
ProvisioningInfo []any `json:"provisioning_info,omitempty"`
|
||||||
ReadOnly bool `json:"read_only,omitempty"`
|
ReadOnly bool `json:"read_only,omitempty"`
|
||||||
|
SecurableType string `json:"securable_type,omitempty"`
|
||||||
|
UpdatedAt int `json:"updated_at,omitempty"`
|
||||||
|
UpdatedBy string `json:"updated_by,omitempty"`
|
||||||
|
Url string `json:"url,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,9 +114,14 @@ type ResourceJobJobClusterNewClusterClusterLogConfS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobJobClusterNewClusterClusterLogConfVolumes struct {
|
||||||
|
Destination string `json:"destination"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobJobClusterNewClusterClusterLogConf struct {
|
type ResourceJobJobClusterNewClusterClusterLogConf struct {
|
||||||
Dbfs *ResourceJobJobClusterNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourceJobJobClusterNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
||||||
S3 *ResourceJobJobClusterNewClusterClusterLogConfS3 `json:"s3,omitempty"`
|
S3 *ResourceJobJobClusterNewClusterClusterLogConfS3 `json:"s3,omitempty"`
|
||||||
|
Volumes *ResourceJobJobClusterNewClusterClusterLogConfVolumes `json:"volumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceJobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfo struct {
|
type ResourceJobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfo struct {
|
||||||
|
@ -339,9 +344,14 @@ type ResourceJobNewClusterClusterLogConfS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobNewClusterClusterLogConfVolumes struct {
|
||||||
|
Destination string `json:"destination"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobNewClusterClusterLogConf struct {
|
type ResourceJobNewClusterClusterLogConf struct {
|
||||||
Dbfs *ResourceJobNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourceJobNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
||||||
S3 *ResourceJobNewClusterClusterLogConfS3 `json:"s3,omitempty"`
|
S3 *ResourceJobNewClusterClusterLogConfS3 `json:"s3,omitempty"`
|
||||||
|
Volumes *ResourceJobNewClusterClusterLogConfVolumes `json:"volumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceJobNewClusterClusterMountInfoNetworkFilesystemInfo struct {
|
type ResourceJobNewClusterClusterMountInfoNetworkFilesystemInfo struct {
|
||||||
|
@ -708,9 +718,14 @@ type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfVolumes struct {
|
||||||
|
Destination string `json:"destination"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConf struct {
|
type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConf struct {
|
||||||
Dbfs *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
||||||
S3 *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfS3 `json:"s3,omitempty"`
|
S3 *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfS3 `json:"s3,omitempty"`
|
||||||
|
Volumes *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfVolumes `json:"volumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo struct {
|
type ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo struct {
|
||||||
|
@ -1104,9 +1119,14 @@ type ResourceJobTaskNewClusterClusterLogConfS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobTaskNewClusterClusterLogConfVolumes struct {
|
||||||
|
Destination string `json:"destination"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobTaskNewClusterClusterLogConf struct {
|
type ResourceJobTaskNewClusterClusterLogConf struct {
|
||||||
Dbfs *ResourceJobTaskNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourceJobTaskNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
||||||
S3 *ResourceJobTaskNewClusterClusterLogConfS3 `json:"s3,omitempty"`
|
S3 *ResourceJobTaskNewClusterClusterLogConfS3 `json:"s3,omitempty"`
|
||||||
|
Volumes *ResourceJobTaskNewClusterClusterLogConfVolumes `json:"volumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceJobTaskNewClusterClusterMountInfoNetworkFilesystemInfo struct {
|
type ResourceJobTaskNewClusterClusterMountInfoNetworkFilesystemInfo struct {
|
||||||
|
|
|
@ -47,9 +47,14 @@ type ResourcePipelineClusterClusterLogConfS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourcePipelineClusterClusterLogConfVolumes struct {
|
||||||
|
Destination string `json:"destination"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourcePipelineClusterClusterLogConf struct {
|
type ResourcePipelineClusterClusterLogConf struct {
|
||||||
Dbfs *ResourcePipelineClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourcePipelineClusterClusterLogConfDbfs `json:"dbfs,omitempty"`
|
||||||
S3 *ResourcePipelineClusterClusterLogConfS3 `json:"s3,omitempty"`
|
S3 *ResourcePipelineClusterClusterLogConfS3 `json:"s3,omitempty"`
|
||||||
|
Volumes *ResourcePipelineClusterClusterLogConfVolumes `json:"volumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourcePipelineClusterGcpAttributes struct {
|
type ResourcePipelineClusterGcpAttributes struct {
|
||||||
|
|
|
@ -21,7 +21,7 @@ type Root struct {
|
||||||
|
|
||||||
const ProviderHost = "registry.terraform.io"
|
const ProviderHost = "registry.terraform.io"
|
||||||
const ProviderSource = "databricks/databricks"
|
const ProviderSource = "databricks/databricks"
|
||||||
const ProviderVersion = "1.65.1"
|
const ProviderVersion = "1.68.0"
|
||||||
|
|
||||||
func NewRoot() *Root {
|
func NewRoot() *Root {
|
||||||
return &Root{
|
return &Root{
|
||||||
|
|
|
@ -70,6 +70,9 @@
|
||||||
"app_status": {
|
"app_status": {
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.ApplicationStatus"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.ApplicationStatus"
|
||||||
},
|
},
|
||||||
|
"budget_policy_id": {
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"compute_status": {
|
"compute_status": {
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.ComputeStatus"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.ComputeStatus"
|
||||||
},
|
},
|
||||||
|
@ -88,6 +91,9 @@
|
||||||
"description": {
|
"description": {
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
|
"effective_budget_policy_id": {
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"id": {
|
"id": {
|
||||||
"description": "The unique identifier of the app.",
|
"description": "The unique identifier of the app.",
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
|
@ -210,7 +216,7 @@
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
"is_single_node": {
|
"is_single_node": {
|
||||||
"description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
"description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
||||||
"$ref": "#/$defs/bool"
|
"$ref": "#/$defs/bool"
|
||||||
},
|
},
|
||||||
"kind": {
|
"kind": {
|
||||||
|
@ -255,7 +261,7 @@
|
||||||
"$ref": "#/$defs/slice/string"
|
"$ref": "#/$defs/slice/string"
|
||||||
},
|
},
|
||||||
"use_ml_runtime": {
|
"use_ml_runtime": {
|
||||||
"description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
"description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
||||||
"$ref": "#/$defs/bool"
|
"$ref": "#/$defs/bool"
|
||||||
},
|
},
|
||||||
"workload_type": {
|
"workload_type": {
|
||||||
|
@ -465,6 +471,7 @@
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
|
"description": "An experiment and its metadata.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"artifact_location": {
|
"artifact_location": {
|
||||||
"description": "Location where artifacts for the experiment are stored.",
|
"description": "Location where artifacts for the experiment are stored.",
|
||||||
|
@ -2585,7 +2592,7 @@
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
"is_single_node": {
|
"is_single_node": {
|
||||||
"description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
"description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n",
|
||||||
"$ref": "#/$defs/bool"
|
"$ref": "#/$defs/bool"
|
||||||
},
|
},
|
||||||
"kind": {
|
"kind": {
|
||||||
|
@ -2627,7 +2634,7 @@
|
||||||
"$ref": "#/$defs/slice/string"
|
"$ref": "#/$defs/slice/string"
|
||||||
},
|
},
|
||||||
"use_ml_runtime": {
|
"use_ml_runtime": {
|
||||||
"description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
"description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n",
|
||||||
"$ref": "#/$defs/bool"
|
"$ref": "#/$defs/bool"
|
||||||
},
|
},
|
||||||
"workload_type": {
|
"workload_type": {
|
||||||
|
@ -2646,7 +2653,7 @@
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used with `kind`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n",
|
"description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n",
|
||||||
"enum": [
|
"enum": [
|
||||||
"DATA_SECURITY_MODE_AUTO",
|
"DATA_SECURITY_MODE_AUTO",
|
||||||
"DATA_SECURITY_MODE_STANDARD",
|
"DATA_SECURITY_MODE_STANDARD",
|
||||||
|
@ -3230,6 +3237,37 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"jobs.ComputeConfig": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"description": "Next field: 4",
|
||||||
|
"properties": {
|
||||||
|
"gpu_node_pool_id": {
|
||||||
|
"description": "IDof the GPU pool to use.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"gpu_type": {
|
||||||
|
"description": "GPU type.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"num_gpus": {
|
||||||
|
"description": "Number of GPUs.",
|
||||||
|
"$ref": "#/$defs/int"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"gpu_node_pool_id",
|
||||||
|
"num_gpus"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"jobs.Condition": {
|
"jobs.Condition": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
|
@ -3463,6 +3501,55 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"jobs.GenAiComputeTask": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"description": "Next field: 9",
|
||||||
|
"properties": {
|
||||||
|
"command": {
|
||||||
|
"description": "Command launcher to run the actual script, e.g. bash, python etc.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"compute": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ComputeConfig"
|
||||||
|
},
|
||||||
|
"dl_runtime_image": {
|
||||||
|
"description": "Runtime image",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"mlflow_experiment_name": {
|
||||||
|
"description": "Optional string containing the name of the MLflow experiment to log the run to. If name is not\nfound, backend will create the mlflow experiment using the name.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"source": {
|
||||||
|
"description": "Optional location type of the training script. When set to `WORKSPACE`, the script will be retrieved from the local Databricks workspace. When set to `GIT`, the script will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Script is located in Databricks workspace.\n* `GIT`: Script is located in cloud Git provider.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Source"
|
||||||
|
},
|
||||||
|
"training_script_path": {
|
||||||
|
"description": "The training script file path to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"yaml_parameters": {
|
||||||
|
"description": "Optional string containing model parameters passed to the training script in yaml format.\nIf present, then the content in yaml_parameters_file_path will be ignored.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"yaml_parameters_file_path": {
|
||||||
|
"description": "Optional path to a YAML file containing model parameters passed to the training script.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"dl_runtime_image"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"jobs.GitProvider": {
|
"jobs.GitProvider": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
|
@ -4504,6 +4591,9 @@
|
||||||
"description": "The task executes a nested task for every input provided when the `for_each_task` field is present.",
|
"description": "The task executes a nested task for every input provided when the `for_each_task` field is present.",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask"
|
||||||
},
|
},
|
||||||
|
"gen_ai_compute_task": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.GenAiComputeTask"
|
||||||
|
},
|
||||||
"health": {
|
"health": {
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRules"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRules"
|
||||||
},
|
},
|
||||||
|
@ -4775,6 +4865,7 @@
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
|
"description": "A tag for an experiment.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"key": {
|
"key": {
|
||||||
"description": "The tag key.",
|
"description": "The tag key.",
|
||||||
|
@ -5850,6 +5941,10 @@
|
||||||
"bedrock_provider": {
|
"bedrock_provider": {
|
||||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case\ninsensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
"description": "The underlying provider in Amazon Bedrock. Supported values (case\ninsensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider"
|
||||||
|
},
|
||||||
|
"instance_profile_arn": {
|
||||||
|
"description": "ARN of the instance profile that the external model will use to access AWS resources.\nYou must authenticate using an instance profile or access keys.\nIf you prefer to authenticate using access keys, see `aws_access_key_id`,\n`aws_access_key_id_plaintext`, `aws_secret_access_key` and `aws_secret_access_key_plaintext`.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
|
|
|
@ -58,8 +58,7 @@ func newCreate() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
// TODO: array: custom_tags
|
// TODO: complex arg: policy
|
||||||
cmd.Flags().StringVar(&createReq.PolicyName, "policy-name", createReq.PolicyName, `The name of the policy.`)
|
|
||||||
cmd.Flags().StringVar(&createReq.RequestId, "request-id", createReq.RequestId, `A unique identifier for this request.`)
|
cmd.Flags().StringVar(&createReq.RequestId, "request-id", createReq.RequestId, `A unique identifier for this request.`)
|
||||||
|
|
||||||
cmd.Use = "create"
|
cmd.Use = "create"
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/flags"
|
||||||
"github.com/databricks/databricks-sdk-go/service/oauth2"
|
"github.com/databricks/databricks-sdk-go/service/oauth2"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
@ -64,8 +65,12 @@ func newCreate() *cobra.Command {
|
||||||
cmd := &cobra.Command{}
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
var createReq oauth2.CreateServicePrincipalSecretRequest
|
var createReq oauth2.CreateServicePrincipalSecretRequest
|
||||||
|
var createJson flags.JsonFlag
|
||||||
|
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
cmd.Flags().StringVar(&createReq.Lifetime, "lifetime", createReq.Lifetime, `The lifetime of the secret in seconds.`)
|
||||||
|
|
||||||
cmd.Use = "create SERVICE_PRINCIPAL_ID"
|
cmd.Use = "create SERVICE_PRINCIPAL_ID"
|
||||||
cmd.Short = `Create service principal secret.`
|
cmd.Short = `Create service principal secret.`
|
||||||
|
@ -88,6 +93,18 @@ func newCreate() *cobra.Command {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
diags := createJson.Unmarshal(&createReq)
|
||||||
|
if diags.HasError() {
|
||||||
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
_, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId)
|
_, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0])
|
return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0])
|
||||||
|
|
|
@ -81,6 +81,7 @@ func newCreate() *cobra.Command {
|
||||||
cmd.Flags().BoolVar(&createReq.NoCompute, "no-compute", createReq.NoCompute, `If true, the app will not be started after creation.`)
|
cmd.Flags().BoolVar(&createReq.NoCompute, "no-compute", createReq.NoCompute, `If true, the app will not be started after creation.`)
|
||||||
// TODO: complex arg: active_deployment
|
// TODO: complex arg: active_deployment
|
||||||
// TODO: complex arg: app_status
|
// TODO: complex arg: app_status
|
||||||
|
cmd.Flags().StringVar(&createReq.App.BudgetPolicyId, "budget-policy-id", createReq.App.BudgetPolicyId, ``)
|
||||||
// TODO: complex arg: compute_status
|
// TODO: complex arg: compute_status
|
||||||
cmd.Flags().StringVar(&createReq.App.Description, "description", createReq.App.Description, `The description of the app.`)
|
cmd.Flags().StringVar(&createReq.App.Description, "description", createReq.App.Description, `The description of the app.`)
|
||||||
// TODO: complex arg: pending_deployment
|
// TODO: complex arg: pending_deployment
|
||||||
|
@ -938,6 +939,7 @@ func newUpdate() *cobra.Command {
|
||||||
|
|
||||||
// TODO: complex arg: active_deployment
|
// TODO: complex arg: active_deployment
|
||||||
// TODO: complex arg: app_status
|
// TODO: complex arg: app_status
|
||||||
|
cmd.Flags().StringVar(&updateReq.App.BudgetPolicyId, "budget-policy-id", updateReq.App.BudgetPolicyId, ``)
|
||||||
// TODO: complex arg: compute_status
|
// TODO: complex arg: compute_status
|
||||||
cmd.Flags().StringVar(&updateReq.App.Description, "description", updateReq.App.Description, `The description of the app.`)
|
cmd.Flags().StringVar(&updateReq.App.Description, "description", updateReq.App.Description, `The description of the app.`)
|
||||||
// TODO: complex arg: pending_deployment
|
// TODO: complex arg: pending_deployment
|
||||||
|
|
|
@ -223,7 +223,7 @@ func newCreate() *cobra.Command {
|
||||||
// TODO: complex arg: gcp_attributes
|
// TODO: complex arg: gcp_attributes
|
||||||
// TODO: array: init_scripts
|
// TODO: array: init_scripts
|
||||||
cmd.Flags().StringVar(&createReq.InstancePoolId, "instance-pool-id", createReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`)
|
cmd.Flags().StringVar(&createReq.InstancePoolId, "instance-pool-id", createReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`)
|
||||||
cmd.Flags().BoolVar(&createReq.IsSingleNode, "is-single-node", createReq.IsSingleNode, `This field can only be used with kind.`)
|
cmd.Flags().BoolVar(&createReq.IsSingleNode, "is-single-node", createReq.IsSingleNode, `This field can only be used when kind = CLASSIC_PREVIEW.`)
|
||||||
cmd.Flags().Var(&createReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`)
|
cmd.Flags().Var(&createReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`)
|
||||||
cmd.Flags().StringVar(&createReq.NodeTypeId, "node-type-id", createReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
cmd.Flags().StringVar(&createReq.NodeTypeId, "node-type-id", createReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
||||||
cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
||||||
|
@ -233,7 +233,7 @@ func newCreate() *cobra.Command {
|
||||||
// TODO: map via StringToStringVar: spark_conf
|
// TODO: map via StringToStringVar: spark_conf
|
||||||
// TODO: map via StringToStringVar: spark_env_vars
|
// TODO: map via StringToStringVar: spark_env_vars
|
||||||
// TODO: array: ssh_public_keys
|
// TODO: array: ssh_public_keys
|
||||||
cmd.Flags().BoolVar(&createReq.UseMlRuntime, "use-ml-runtime", createReq.UseMlRuntime, `This field can only be used with kind.`)
|
cmd.Flags().BoolVar(&createReq.UseMlRuntime, "use-ml-runtime", createReq.UseMlRuntime, `This field can only be used when kind = CLASSIC_PREVIEW.`)
|
||||||
// TODO: complex arg: workload_type
|
// TODO: complex arg: workload_type
|
||||||
|
|
||||||
cmd.Use = "create SPARK_VERSION"
|
cmd.Use = "create SPARK_VERSION"
|
||||||
|
@ -493,7 +493,7 @@ func newEdit() *cobra.Command {
|
||||||
// TODO: complex arg: gcp_attributes
|
// TODO: complex arg: gcp_attributes
|
||||||
// TODO: array: init_scripts
|
// TODO: array: init_scripts
|
||||||
cmd.Flags().StringVar(&editReq.InstancePoolId, "instance-pool-id", editReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`)
|
cmd.Flags().StringVar(&editReq.InstancePoolId, "instance-pool-id", editReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`)
|
||||||
cmd.Flags().BoolVar(&editReq.IsSingleNode, "is-single-node", editReq.IsSingleNode, `This field can only be used with kind.`)
|
cmd.Flags().BoolVar(&editReq.IsSingleNode, "is-single-node", editReq.IsSingleNode, `This field can only be used when kind = CLASSIC_PREVIEW.`)
|
||||||
cmd.Flags().Var(&editReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`)
|
cmd.Flags().Var(&editReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`)
|
||||||
cmd.Flags().StringVar(&editReq.NodeTypeId, "node-type-id", editReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
cmd.Flags().StringVar(&editReq.NodeTypeId, "node-type-id", editReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
||||||
cmd.Flags().IntVar(&editReq.NumWorkers, "num-workers", editReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
cmd.Flags().IntVar(&editReq.NumWorkers, "num-workers", editReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
||||||
|
@ -503,7 +503,7 @@ func newEdit() *cobra.Command {
|
||||||
// TODO: map via StringToStringVar: spark_conf
|
// TODO: map via StringToStringVar: spark_conf
|
||||||
// TODO: map via StringToStringVar: spark_env_vars
|
// TODO: map via StringToStringVar: spark_env_vars
|
||||||
// TODO: array: ssh_public_keys
|
// TODO: array: ssh_public_keys
|
||||||
cmd.Flags().BoolVar(&editReq.UseMlRuntime, "use-ml-runtime", editReq.UseMlRuntime, `This field can only be used with kind.`)
|
cmd.Flags().BoolVar(&editReq.UseMlRuntime, "use-ml-runtime", editReq.UseMlRuntime, `This field can only be used when kind = CLASSIC_PREVIEW.`)
|
||||||
// TODO: complex arg: workload_type
|
// TODO: complex arg: workload_type
|
||||||
|
|
||||||
cmd.Use = "edit CLUSTER_ID SPARK_VERSION"
|
cmd.Use = "edit CLUSTER_ID SPARK_VERSION"
|
||||||
|
|
|
@ -522,6 +522,7 @@ func newValidateCredential() *cobra.Command {
|
||||||
// TODO: complex arg: aws_iam_role
|
// TODO: complex arg: aws_iam_role
|
||||||
// TODO: complex arg: azure_managed_identity
|
// TODO: complex arg: azure_managed_identity
|
||||||
cmd.Flags().StringVar(&validateCredentialReq.CredentialName, "credential-name", validateCredentialReq.CredentialName, `Required.`)
|
cmd.Flags().StringVar(&validateCredentialReq.CredentialName, "credential-name", validateCredentialReq.CredentialName, `Required.`)
|
||||||
|
// TODO: complex arg: databricks_gcp_service_account
|
||||||
cmd.Flags().StringVar(&validateCredentialReq.ExternalLocationName, "external-location-name", validateCredentialReq.ExternalLocationName, `The name of an existing external location to validate.`)
|
cmd.Flags().StringVar(&validateCredentialReq.ExternalLocationName, "external-location-name", validateCredentialReq.ExternalLocationName, `The name of an existing external location to validate.`)
|
||||||
cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE, STORAGE]`)
|
cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE, STORAGE]`)
|
||||||
cmd.Flags().BoolVar(&validateCredentialReq.ReadOnly, "read-only", validateCredentialReq.ReadOnly, `Whether the credential is only usable for read operations.`)
|
cmd.Flags().BoolVar(&validateCredentialReq.ReadOnly, "read-only", validateCredentialReq.ReadOnly, `Whether the credential is only usable for read operations.`)
|
||||||
|
|
|
@ -105,7 +105,7 @@ func newCreateExperiment() *cobra.Command {
|
||||||
already exist and fails if another experiment with the same name already
|
already exist and fails if another experiment with the same name already
|
||||||
exists.
|
exists.
|
||||||
|
|
||||||
Throws RESOURCE_ALREADY_EXISTS if a experiment with the given name exists.
|
Throws RESOURCE_ALREADY_EXISTS if an experiment with the given name exists.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
NAME: Experiment name.`
|
NAME: Experiment name.`
|
||||||
|
@ -183,6 +183,7 @@ func newCreateRun() *cobra.Command {
|
||||||
cmd.Flags().Var(&createRunJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&createRunJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
cmd.Flags().StringVar(&createRunReq.ExperimentId, "experiment-id", createRunReq.ExperimentId, `ID of the associated experiment.`)
|
cmd.Flags().StringVar(&createRunReq.ExperimentId, "experiment-id", createRunReq.ExperimentId, `ID of the associated experiment.`)
|
||||||
|
cmd.Flags().StringVar(&createRunReq.RunName, "run-name", createRunReq.RunName, `The name of the run.`)
|
||||||
cmd.Flags().Int64Var(&createRunReq.StartTime, "start-time", createRunReq.StartTime, `Unix timestamp in milliseconds of when the run started.`)
|
cmd.Flags().Int64Var(&createRunReq.StartTime, "start-time", createRunReq.StartTime, `Unix timestamp in milliseconds of when the run started.`)
|
||||||
// TODO: array: tags
|
// TODO: array: tags
|
||||||
cmd.Flags().StringVar(&createRunReq.UserId, "user-id", createRunReq.UserId, `ID of the user executing the run.`)
|
cmd.Flags().StringVar(&createRunReq.UserId, "user-id", createRunReq.UserId, `ID of the user executing the run.`)
|
||||||
|
@ -193,7 +194,7 @@ func newCreateRun() *cobra.Command {
|
||||||
|
|
||||||
Creates a new run within an experiment. A run is usually a single execution of
|
Creates a new run within an experiment. A run is usually a single execution of
|
||||||
a machine learning or data ETL pipeline. MLflow uses runs to track the
|
a machine learning or data ETL pipeline. MLflow uses runs to track the
|
||||||
mlflowParam, mlflowMetric and mlflowRunTag associated with a single
|
mlflowParam, mlflowMetric, and mlflowRunTag associated with a single
|
||||||
execution.`
|
execution.`
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
@ -263,7 +264,7 @@ func newDeleteExperiment() *cobra.Command {
|
||||||
cmd.Long = `Delete an experiment.
|
cmd.Long = `Delete an experiment.
|
||||||
|
|
||||||
Marks an experiment and associated metadata, runs, metrics, params, and tags
|
Marks an experiment and associated metadata, runs, metrics, params, and tags
|
||||||
for deletion. If the experiment uses FileStore, artifacts associated with
|
for deletion. If the experiment uses FileStore, artifacts associated with the
|
||||||
experiment are also deleted.
|
experiment are also deleted.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
|
@ -431,7 +432,6 @@ func newDeleteRuns() *cobra.Command {
|
||||||
Bulk delete runs in an experiment that were created prior to or at the
|
Bulk delete runs in an experiment that were created prior to or at the
|
||||||
specified timestamp. Deletes at most max_runs per request. To call this API
|
specified timestamp. Deletes at most max_runs per request. To call this API
|
||||||
from a Databricks Notebook in Python, you can use the client code snippet on
|
from a Databricks Notebook in Python, you can use the client code snippet on
|
||||||
https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
EXPERIMENT_ID: The ID of the experiment containing the runs to delete.
|
EXPERIMENT_ID: The ID of the experiment containing the runs to delete.
|
||||||
|
@ -518,8 +518,8 @@ func newDeleteTag() *cobra.Command {
|
||||||
cmd.Flags().Var(&deleteTagJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&deleteTagJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
cmd.Use = "delete-tag RUN_ID KEY"
|
cmd.Use = "delete-tag RUN_ID KEY"
|
||||||
cmd.Short = `Delete a tag.`
|
cmd.Short = `Delete a tag on a run.`
|
||||||
cmd.Long = `Delete a tag.
|
cmd.Long = `Delete a tag on a run.
|
||||||
|
|
||||||
Deletes a tag on a run. Tags are run metadata that can be updated during a run
|
Deletes a tag on a run. Tags are run metadata that can be updated during a run
|
||||||
and after a run completes.
|
and after a run completes.
|
||||||
|
@ -602,8 +602,8 @@ func newGetByName() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
cmd.Use = "get-by-name EXPERIMENT_NAME"
|
cmd.Use = "get-by-name EXPERIMENT_NAME"
|
||||||
cmd.Short = `Get metadata.`
|
cmd.Short = `Get an experiment by name.`
|
||||||
cmd.Long = `Get metadata.
|
cmd.Long = `Get an experiment by name.
|
||||||
|
|
||||||
Gets metadata for an experiment.
|
Gets metadata for an experiment.
|
||||||
|
|
||||||
|
@ -731,8 +731,8 @@ func newGetHistory() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&getHistoryReq.RunUuid, "run-uuid", getHistoryReq.RunUuid, `[Deprecated, use run_id instead] ID of the run from which to fetch metric values.`)
|
cmd.Flags().StringVar(&getHistoryReq.RunUuid, "run-uuid", getHistoryReq.RunUuid, `[Deprecated, use run_id instead] ID of the run from which to fetch metric values.`)
|
||||||
|
|
||||||
cmd.Use = "get-history METRIC_KEY"
|
cmd.Use = "get-history METRIC_KEY"
|
||||||
cmd.Short = `Get history of a given metric within a run.`
|
cmd.Short = `Get metric history for a run.`
|
||||||
cmd.Long = `Get history of a given metric within a run.
|
cmd.Long = `Get metric history for a run.
|
||||||
|
|
||||||
Gets a list of all values for the specified metric for a given run.
|
Gets a list of all values for the specified metric for a given run.
|
||||||
|
|
||||||
|
@ -973,12 +973,11 @@ func newListArtifacts() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&listArtifactsReq.RunUuid, "run-uuid", listArtifactsReq.RunUuid, `[Deprecated, use run_id instead] ID of the run whose artifacts to list.`)
|
cmd.Flags().StringVar(&listArtifactsReq.RunUuid, "run-uuid", listArtifactsReq.RunUuid, `[Deprecated, use run_id instead] ID of the run whose artifacts to list.`)
|
||||||
|
|
||||||
cmd.Use = "list-artifacts"
|
cmd.Use = "list-artifacts"
|
||||||
cmd.Short = `Get all artifacts.`
|
cmd.Short = `List artifacts.`
|
||||||
cmd.Long = `Get all artifacts.
|
cmd.Long = `List artifacts.
|
||||||
|
|
||||||
List artifacts for a run. Takes an optional artifact_path prefix. If it is
|
List artifacts for a run. Takes an optional artifact_path prefix which if
|
||||||
specified, the response contains only artifacts with the specified prefix.
|
specified, the response contains only artifacts with the specified prefix. A
|
||||||
This API does not support pagination when listing artifacts in UC Volumes. A
|
|
||||||
maximum of 1000 artifacts will be retrieved for UC Volumes. Please call
|
maximum of 1000 artifacts will be retrieved for UC Volumes. Please call
|
||||||
/api/2.0/fs/directories{directory_path} for listing artifacts in UC Volumes,
|
/api/2.0/fs/directories{directory_path} for listing artifacts in UC Volumes,
|
||||||
which supports pagination. See [List directory contents | Files
|
which supports pagination. See [List directory contents | Files
|
||||||
|
@ -1028,9 +1027,9 @@ func newListExperiments() *cobra.Command {
|
||||||
|
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
cmd.Flags().IntVar(&listExperimentsReq.MaxResults, "max-results", listExperimentsReq.MaxResults, `Maximum number of experiments desired.`)
|
cmd.Flags().Int64Var(&listExperimentsReq.MaxResults, "max-results", listExperimentsReq.MaxResults, `Maximum number of experiments desired.`)
|
||||||
cmd.Flags().StringVar(&listExperimentsReq.PageToken, "page-token", listExperimentsReq.PageToken, `Token indicating the page of experiments to fetch.`)
|
cmd.Flags().StringVar(&listExperimentsReq.PageToken, "page-token", listExperimentsReq.PageToken, `Token indicating the page of experiments to fetch.`)
|
||||||
cmd.Flags().StringVar(&listExperimentsReq.ViewType, "view-type", listExperimentsReq.ViewType, `Qualifier for type of experiments to be returned.`)
|
cmd.Flags().Var(&listExperimentsReq.ViewType, "view-type", `Qualifier for type of experiments to be returned. Supported values: [ACTIVE_ONLY, ALL, DELETED_ONLY]`)
|
||||||
|
|
||||||
cmd.Use = "list-experiments"
|
cmd.Use = "list-experiments"
|
||||||
cmd.Short = `List experiments.`
|
cmd.Short = `List experiments.`
|
||||||
|
@ -1090,8 +1089,8 @@ func newLogBatch() *cobra.Command {
|
||||||
// TODO: array: tags
|
// TODO: array: tags
|
||||||
|
|
||||||
cmd.Use = "log-batch"
|
cmd.Use = "log-batch"
|
||||||
cmd.Short = `Log a batch.`
|
cmd.Short = `Log a batch of metrics/params/tags for a run.`
|
||||||
cmd.Long = `Log a batch.
|
cmd.Long = `Log a batch of metrics/params/tags for a run.
|
||||||
|
|
||||||
Logs a batch of metrics, params, and tags for a run. If any data failed to be
|
Logs a batch of metrics, params, and tags for a run. If any data failed to be
|
||||||
persisted, the server will respond with an error (non-200 status code).
|
persisted, the server will respond with an error (non-200 status code).
|
||||||
|
@ -1120,8 +1119,13 @@ func newLogBatch() *cobra.Command {
|
||||||
Request Limits ------------------------------- A single JSON-serialized API
|
Request Limits ------------------------------- A single JSON-serialized API
|
||||||
request may be up to 1 MB in size and contain:
|
request may be up to 1 MB in size and contain:
|
||||||
|
|
||||||
* No more than 1000 metrics, params, and tags in total * Up to 1000 metrics *
|
* No more than 1000 metrics, params, and tags in total
|
||||||
Up to 100 params * Up to 100 tags
|
|
||||||
|
* Up to 1000 metrics
|
||||||
|
|
||||||
|
* Up to 100 params
|
||||||
|
|
||||||
|
* Up to 100 tags
|
||||||
|
|
||||||
For example, a valid request might contain 900 metrics, 50 params, and 50
|
For example, a valid request might contain 900 metrics, 50 params, and 50
|
||||||
tags, but logging 900 metrics, 50 params, and 51 tags is invalid.
|
tags, but logging 900 metrics, 50 params, and 51 tags is invalid.
|
||||||
|
@ -1129,6 +1133,7 @@ func newLogBatch() *cobra.Command {
|
||||||
The following limits also apply to metric, param, and tag keys and values:
|
The following limits also apply to metric, param, and tag keys and values:
|
||||||
|
|
||||||
* Metric keys, param keys, and tag keys can be up to 250 characters in length
|
* Metric keys, param keys, and tag keys can be up to 250 characters in length
|
||||||
|
|
||||||
* Parameter and tag values can be up to 250 characters in length`
|
* Parameter and tag values can be up to 250 characters in length`
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
@ -1194,19 +1199,30 @@ func newLogInputs() *cobra.Command {
|
||||||
cmd.Flags().Var(&logInputsJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&logInputsJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
// TODO: array: datasets
|
// TODO: array: datasets
|
||||||
cmd.Flags().StringVar(&logInputsReq.RunId, "run-id", logInputsReq.RunId, `ID of the run to log under.`)
|
|
||||||
|
|
||||||
cmd.Use = "log-inputs"
|
cmd.Use = "log-inputs RUN_ID"
|
||||||
cmd.Short = `Log inputs to a run.`
|
cmd.Short = `Log inputs to a run.`
|
||||||
cmd.Long = `Log inputs to a run.
|
cmd.Long = `Log inputs to a run.
|
||||||
|
|
||||||
**NOTE:** Experimental: This API may change or be removed in a future release
|
**NOTE:** Experimental: This API may change or be removed in a future release
|
||||||
without warning.`
|
without warning.
|
||||||
|
|
||||||
|
Logs inputs, such as datasets and models, to an MLflow Run.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
RUN_ID: ID of the run to log under`
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
check := root.ExactArgs(0)
|
if cmd.Flags().Changed("json") {
|
||||||
|
err := root.ExactArgs(0)(cmd, args)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
check := root.ExactArgs(1)
|
||||||
return check(cmd, args)
|
return check(cmd, args)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1227,6 +1243,9 @@ func newLogInputs() *cobra.Command {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !cmd.Flags().Changed("json") {
|
||||||
|
logInputsReq.RunId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
err = w.Experiments.LogInputs(ctx, logInputsReq)
|
err = w.Experiments.LogInputs(ctx, logInputsReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1270,11 +1289,11 @@ func newLogMetric() *cobra.Command {
|
||||||
cmd.Flags().Int64Var(&logMetricReq.Step, "step", logMetricReq.Step, `Step at which to log the metric.`)
|
cmd.Flags().Int64Var(&logMetricReq.Step, "step", logMetricReq.Step, `Step at which to log the metric.`)
|
||||||
|
|
||||||
cmd.Use = "log-metric KEY VALUE TIMESTAMP"
|
cmd.Use = "log-metric KEY VALUE TIMESTAMP"
|
||||||
cmd.Short = `Log a metric.`
|
cmd.Short = `Log a metric for a run.`
|
||||||
cmd.Long = `Log a metric.
|
cmd.Long = `Log a metric for a run.
|
||||||
|
|
||||||
Logs a metric for a run. A metric is a key-value pair (string key, float
|
Log a metric for a run. A metric is a key-value pair (string key, float value)
|
||||||
value) with an associated timestamp. Examples include the various metrics that
|
with an associated timestamp. Examples include the various metrics that
|
||||||
represent ML model accuracy. A metric can be logged multiple times.
|
represent ML model accuracy. A metric can be logged multiple times.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
|
@ -1442,8 +1461,8 @@ func newLogParam() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&logParamReq.RunUuid, "run-uuid", logParamReq.RunUuid, `[Deprecated, use run_id instead] ID of the run under which to log the param.`)
|
cmd.Flags().StringVar(&logParamReq.RunUuid, "run-uuid", logParamReq.RunUuid, `[Deprecated, use run_id instead] ID of the run under which to log the param.`)
|
||||||
|
|
||||||
cmd.Use = "log-param KEY VALUE"
|
cmd.Use = "log-param KEY VALUE"
|
||||||
cmd.Short = `Log a param.`
|
cmd.Short = `Log a param for a run.`
|
||||||
cmd.Long = `Log a param.
|
cmd.Long = `Log a param for a run.
|
||||||
|
|
||||||
Logs a param used for a run. A param is a key-value pair (string key, string
|
Logs a param used for a run. A param is a key-value pair (string key, string
|
||||||
value). Examples include hyperparameters used for ML model training and
|
value). Examples include hyperparameters used for ML model training and
|
||||||
|
@ -1530,8 +1549,8 @@ func newRestoreExperiment() *cobra.Command {
|
||||||
cmd.Flags().Var(&restoreExperimentJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&restoreExperimentJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
cmd.Use = "restore-experiment EXPERIMENT_ID"
|
cmd.Use = "restore-experiment EXPERIMENT_ID"
|
||||||
cmd.Short = `Restores an experiment.`
|
cmd.Short = `Restore an experiment.`
|
||||||
cmd.Long = `Restores an experiment.
|
cmd.Long = `Restore an experiment.
|
||||||
|
|
||||||
Restore an experiment marked for deletion. This also restores associated
|
Restore an experiment marked for deletion. This also restores associated
|
||||||
metadata, runs, metrics, params, and tags. If experiment uses FileStore,
|
metadata, runs, metrics, params, and tags. If experiment uses FileStore,
|
||||||
|
@ -1619,7 +1638,11 @@ func newRestoreRun() *cobra.Command {
|
||||||
cmd.Short = `Restore a run.`
|
cmd.Short = `Restore a run.`
|
||||||
cmd.Long = `Restore a run.
|
cmd.Long = `Restore a run.
|
||||||
|
|
||||||
Restores a deleted run.
|
Restores a deleted run. This also restores associated metadata, runs, metrics,
|
||||||
|
params, and tags.
|
||||||
|
|
||||||
|
Throws RESOURCE_DOES_NOT_EXIST if the run was never created or was
|
||||||
|
permanently deleted.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
RUN_ID: ID of the run to restore.`
|
RUN_ID: ID of the run to restore.`
|
||||||
|
@ -1705,7 +1728,6 @@ func newRestoreRuns() *cobra.Command {
|
||||||
Bulk restore runs in an experiment that were deleted no earlier than the
|
Bulk restore runs in an experiment that were deleted no earlier than the
|
||||||
specified timestamp. Restores at most max_runs per request. To call this API
|
specified timestamp. Restores at most max_runs per request. To call this API
|
||||||
from a Databricks Notebook in Python, you can use the client code snippet on
|
from a Databricks Notebook in Python, you can use the client code snippet on
|
||||||
https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
EXPERIMENT_ID: The ID of the experiment containing the runs to restore.
|
EXPERIMENT_ID: The ID of the experiment containing the runs to restore.
|
||||||
|
@ -1875,7 +1897,7 @@ func newSearchRuns() *cobra.Command {
|
||||||
|
|
||||||
Searches for runs that satisfy expressions.
|
Searches for runs that satisfy expressions.
|
||||||
|
|
||||||
Search expressions can use mlflowMetric and mlflowParam keys.",`
|
Search expressions can use mlflowMetric and mlflowParam keys.`
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
@ -1937,18 +1959,16 @@ func newSetExperimentTag() *cobra.Command {
|
||||||
cmd.Flags().Var(&setExperimentTagJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&setExperimentTagJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
cmd.Use = "set-experiment-tag EXPERIMENT_ID KEY VALUE"
|
cmd.Use = "set-experiment-tag EXPERIMENT_ID KEY VALUE"
|
||||||
cmd.Short = `Set a tag.`
|
cmd.Short = `Set a tag for an experiment.`
|
||||||
cmd.Long = `Set a tag.
|
cmd.Long = `Set a tag for an experiment.
|
||||||
|
|
||||||
Sets a tag on an experiment. Experiment tags are metadata that can be updated.
|
Sets a tag on an experiment. Experiment tags are metadata that can be updated.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
EXPERIMENT_ID: ID of the experiment under which to log the tag. Must be provided.
|
EXPERIMENT_ID: ID of the experiment under which to log the tag. Must be provided.
|
||||||
KEY: Name of the tag. Maximum size depends on storage backend. All storage
|
KEY: Name of the tag. Keys up to 250 bytes in size are supported.
|
||||||
backends are guaranteed to support key values up to 250 bytes in size.
|
VALUE: String value of the tag being logged. Values up to 64KB in size are
|
||||||
VALUE: String value of the tag being logged. Maximum size depends on storage
|
supported.`
|
||||||
backend. All storage backends are guaranteed to support key values up to
|
|
||||||
5000 bytes in size.`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
@ -2108,18 +2128,16 @@ func newSetTag() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&setTagReq.RunUuid, "run-uuid", setTagReq.RunUuid, `[Deprecated, use run_id instead] ID of the run under which to log the tag.`)
|
cmd.Flags().StringVar(&setTagReq.RunUuid, "run-uuid", setTagReq.RunUuid, `[Deprecated, use run_id instead] ID of the run under which to log the tag.`)
|
||||||
|
|
||||||
cmd.Use = "set-tag KEY VALUE"
|
cmd.Use = "set-tag KEY VALUE"
|
||||||
cmd.Short = `Set a tag.`
|
cmd.Short = `Set a tag for a run.`
|
||||||
cmd.Long = `Set a tag.
|
cmd.Long = `Set a tag for a run.
|
||||||
|
|
||||||
Sets a tag on a run. Tags are run metadata that can be updated during a run
|
Sets a tag on a run. Tags are run metadata that can be updated during a run
|
||||||
and after a run completes.
|
and after a run completes.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
KEY: Name of the tag. Maximum size depends on storage backend. All storage
|
KEY: Name of the tag. Keys up to 250 bytes in size are supported.
|
||||||
backends are guaranteed to support key values up to 250 bytes in size.
|
VALUE: String value of the tag being logged. Values up to 64KB in size are
|
||||||
VALUE: String value of the tag being logged. Maximum size depends on storage
|
supported.`
|
||||||
backend. All storage backends are guaranteed to support key values up to
|
|
||||||
5000 bytes in size.`
|
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
@ -2356,6 +2374,7 @@ func newUpdateRun() *cobra.Command {
|
||||||
|
|
||||||
cmd.Flags().Int64Var(&updateRunReq.EndTime, "end-time", updateRunReq.EndTime, `Unix timestamp in milliseconds of when the run ended.`)
|
cmd.Flags().Int64Var(&updateRunReq.EndTime, "end-time", updateRunReq.EndTime, `Unix timestamp in milliseconds of when the run ended.`)
|
||||||
cmd.Flags().StringVar(&updateRunReq.RunId, "run-id", updateRunReq.RunId, `ID of the run to update.`)
|
cmd.Flags().StringVar(&updateRunReq.RunId, "run-id", updateRunReq.RunId, `ID of the run to update.`)
|
||||||
|
cmd.Flags().StringVar(&updateRunReq.RunName, "run-name", updateRunReq.RunName, `Updated name of the run.`)
|
||||||
cmd.Flags().StringVar(&updateRunReq.RunUuid, "run-uuid", updateRunReq.RunUuid, `[Deprecated, use run_id instead] ID of the run to update.`)
|
cmd.Flags().StringVar(&updateRunReq.RunUuid, "run-uuid", updateRunReq.RunUuid, `[Deprecated, use run_id instead] ID of the run to update.`)
|
||||||
cmd.Flags().Var(&updateRunReq.Status, "status", `Updated status of the run. Supported values: [FAILED, FINISHED, KILLED, RUNNING, SCHEDULED]`)
|
cmd.Flags().Var(&updateRunReq.Status, "status", `Updated status of the run. Supported values: [FAILED, FINISHED, KILLED, RUNNING, SCHEDULED]`)
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,7 @@ func New() *cobra.Command {
|
||||||
cmd.AddCommand(newGetMessage())
|
cmd.AddCommand(newGetMessage())
|
||||||
cmd.AddCommand(newGetMessageQueryResult())
|
cmd.AddCommand(newGetMessageQueryResult())
|
||||||
cmd.AddCommand(newGetMessageQueryResultByAttachment())
|
cmd.AddCommand(newGetMessageQueryResultByAttachment())
|
||||||
|
cmd.AddCommand(newGetSpace())
|
||||||
cmd.AddCommand(newStartConversation())
|
cmd.AddCommand(newStartConversation())
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
// Apply optional overrides to this command.
|
||||||
|
@ -78,8 +79,9 @@ func newCreateMessage() *cobra.Command {
|
||||||
cmd.Short = `Create conversation message.`
|
cmd.Short = `Create conversation message.`
|
||||||
cmd.Long = `Create conversation message.
|
cmd.Long = `Create conversation message.
|
||||||
|
|
||||||
Create new message in [conversation](:method:genie/startconversation). The AI
|
Create new message in a [conversation](:method:genie/startconversation). The
|
||||||
response uses all previously created messages in the conversation to respond.
|
AI response uses all previously created messages in the conversation to
|
||||||
|
respond.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
SPACE_ID: The ID associated with the Genie space where the conversation is started.
|
SPACE_ID: The ID associated with the Genie space where the conversation is started.
|
||||||
|
@ -298,8 +300,8 @@ func newGetMessageQueryResult() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
cmd.Use = "get-message-query-result SPACE_ID CONVERSATION_ID MESSAGE_ID"
|
cmd.Use = "get-message-query-result SPACE_ID CONVERSATION_ID MESSAGE_ID"
|
||||||
cmd.Short = `Get conversation message SQL query result.`
|
cmd.Short = `[Deprecated] Get conversation message SQL query result.`
|
||||||
cmd.Long = `Get conversation message SQL query result.
|
cmd.Long = `[Deprecated] Get conversation message SQL query result.
|
||||||
|
|
||||||
Get the result of SQL query if the message has a query attachment. This is
|
Get the result of SQL query if the message has a query attachment. This is
|
||||||
only available if a message has a query attachment and the message status is
|
only available if a message has a query attachment and the message status is
|
||||||
|
@ -362,11 +364,12 @@ func newGetMessageQueryResultByAttachment() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
cmd.Use = "get-message-query-result-by-attachment SPACE_ID CONVERSATION_ID MESSAGE_ID ATTACHMENT_ID"
|
cmd.Use = "get-message-query-result-by-attachment SPACE_ID CONVERSATION_ID MESSAGE_ID ATTACHMENT_ID"
|
||||||
cmd.Short = `Get conversation message SQL query result by attachment id.`
|
cmd.Short = `Get conversation message SQL query result.`
|
||||||
cmd.Long = `Get conversation message SQL query result by attachment id.
|
cmd.Long = `Get conversation message SQL query result.
|
||||||
|
|
||||||
Get the result of SQL query by attachment id This is only available if a
|
Get the result of SQL query if the message has a query attachment. This is
|
||||||
message has a query attachment and the message status is EXECUTING_QUERY.
|
only available if a message has a query attachment and the message status is
|
||||||
|
EXECUTING_QUERY OR COMPLETED.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
SPACE_ID: Genie space ID
|
SPACE_ID: Genie space ID
|
||||||
|
@ -410,6 +413,64 @@ func newGetMessageQueryResultByAttachment() *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start get-space command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var getSpaceOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*dashboards.GenieGetSpaceRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newGetSpace() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var getSpaceReq dashboards.GenieGetSpaceRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Use = "get-space SPACE_ID"
|
||||||
|
cmd.Short = `Get details of a Genie Space.`
|
||||||
|
cmd.Long = `Get details of a Genie Space.
|
||||||
|
|
||||||
|
Get a Genie Space.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
SPACE_ID: The ID associated with the Genie space`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
getSpaceReq.SpaceId = args[0]
|
||||||
|
|
||||||
|
response, err := w.Genie.GetSpace(ctx, getSpaceReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range getSpaceOverrides {
|
||||||
|
fn(cmd, &getSpaceReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
// start start-conversation command
|
// start start-conversation command
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
// Slice with functions to override default command behavior.
|
||||||
|
|
|
@ -34,6 +34,7 @@ func New() *cobra.Command {
|
||||||
cmd.AddCommand(newDelete())
|
cmd.AddCommand(newDelete())
|
||||||
cmd.AddCommand(newGet())
|
cmd.AddCommand(newGet())
|
||||||
cmd.AddCommand(newList())
|
cmd.AddCommand(newList())
|
||||||
|
cmd.AddCommand(newListProviderShareAssets())
|
||||||
cmd.AddCommand(newListShares())
|
cmd.AddCommand(newListShares())
|
||||||
cmd.AddCommand(newUpdate())
|
cmd.AddCommand(newUpdate())
|
||||||
|
|
||||||
|
@ -337,6 +338,72 @@ func newList() *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start list-provider-share-assets command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var listProviderShareAssetsOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*sharing.ListProviderShareAssetsRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newListProviderShareAssets() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var listProviderShareAssetsReq sharing.ListProviderShareAssetsRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Flags().IntVar(&listProviderShareAssetsReq.FunctionMaxResults, "function-max-results", listProviderShareAssetsReq.FunctionMaxResults, `Maximum number of functions to return.`)
|
||||||
|
cmd.Flags().IntVar(&listProviderShareAssetsReq.NotebookMaxResults, "notebook-max-results", listProviderShareAssetsReq.NotebookMaxResults, `Maximum number of notebooks to return.`)
|
||||||
|
cmd.Flags().IntVar(&listProviderShareAssetsReq.TableMaxResults, "table-max-results", listProviderShareAssetsReq.TableMaxResults, `Maximum number of tables to return.`)
|
||||||
|
cmd.Flags().IntVar(&listProviderShareAssetsReq.VolumeMaxResults, "volume-max-results", listProviderShareAssetsReq.VolumeMaxResults, `Maximum number of volumes to return.`)
|
||||||
|
|
||||||
|
cmd.Use = "list-provider-share-assets PROVIDER_NAME SHARE_NAME"
|
||||||
|
cmd.Short = `List assets by provider share.`
|
||||||
|
cmd.Long = `List assets by provider share.
|
||||||
|
|
||||||
|
Get arrays of assets associated with a specified provider's share. The caller
|
||||||
|
is the recipient of the share.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
PROVIDER_NAME: The name of the provider who owns the share.
|
||||||
|
SHARE_NAME: The name of the share.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(2)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
listProviderShareAssetsReq.ProviderName = args[0]
|
||||||
|
listProviderShareAssetsReq.ShareName = args[1]
|
||||||
|
|
||||||
|
response, err := w.Providers.ListProviderShareAssets(ctx, listProviderShareAssetsReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range listProviderShareAssetsOverrides {
|
||||||
|
fn(cmd, &listProviderShareAssetsReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
// start list-shares command
|
// start list-shares command
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
// Slice with functions to override default command behavior.
|
||||||
|
|
|
@ -484,8 +484,6 @@ func newUpdatePermissions() *cobra.Command {
|
||||||
cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
// TODO: array: changes
|
// TODO: array: changes
|
||||||
cmd.Flags().IntVar(&updatePermissionsReq.MaxResults, "max-results", updatePermissionsReq.MaxResults, `Maximum number of permissions to return.`)
|
|
||||||
cmd.Flags().StringVar(&updatePermissionsReq.PageToken, "page-token", updatePermissionsReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
|
|
||||||
|
|
||||||
cmd.Use = "update-permissions NAME"
|
cmd.Use = "update-permissions NAME"
|
||||||
cmd.Short = `Update permissions.`
|
cmd.Short = `Update permissions.`
|
||||||
|
@ -494,8 +492,8 @@ func newUpdatePermissions() *cobra.Command {
|
||||||
Updates the permissions for a data share in the metastore. The caller must be
|
Updates the permissions for a data share in the metastore. The caller must be
|
||||||
a metastore admin or an owner of the share.
|
a metastore admin or an owner of the share.
|
||||||
|
|
||||||
For new recipient grants, the user must also be the owner of the recipients.
|
For new recipient grants, the user must also be the recipient owner or
|
||||||
recipient revocations do not require additional privileges.
|
metastore admin. recipient revocations do not require additional privileges.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
NAME: The name of the share.`
|
NAME: The name of the share.`
|
||||||
|
@ -526,11 +524,11 @@ func newUpdatePermissions() *cobra.Command {
|
||||||
}
|
}
|
||||||
updatePermissionsReq.Name = args[0]
|
updatePermissionsReq.Name = args[0]
|
||||||
|
|
||||||
err = w.Shares.UpdatePermissions(ctx, updatePermissionsReq)
|
response, err := w.Shares.UpdatePermissions(ctx, updatePermissionsReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return cmdio.Render(ctx, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable completions since they are not applicable.
|
// Disable completions since they are not applicable.
|
||||||
|
|
|
@ -426,6 +426,7 @@ func newQueryIndex() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
cmd.Flags().Var(&queryIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&queryIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
// TODO: array: columns_to_rerank
|
||||||
cmd.Flags().StringVar(&queryIndexReq.FiltersJson, "filters-json", queryIndexReq.FiltersJson, `JSON string representing query filters.`)
|
cmd.Flags().StringVar(&queryIndexReq.FiltersJson, "filters-json", queryIndexReq.FiltersJson, `JSON string representing query filters.`)
|
||||||
cmd.Flags().IntVar(&queryIndexReq.NumResults, "num-results", queryIndexReq.NumResults, `Number of results to return.`)
|
cmd.Flags().IntVar(&queryIndexReq.NumResults, "num-results", queryIndexReq.NumResults, `Number of results to return.`)
|
||||||
cmd.Flags().StringVar(&queryIndexReq.QueryText, "query-text", queryIndexReq.QueryText, `Query text.`)
|
cmd.Flags().StringVar(&queryIndexReq.QueryText, "query-text", queryIndexReq.QueryText, `Query text.`)
|
||||||
|
|
|
@ -177,6 +177,7 @@ func newExport() *cobra.Command {
|
||||||
DBC,
|
DBC,
|
||||||
HTML,
|
HTML,
|
||||||
JUPYTER,
|
JUPYTER,
|
||||||
|
RAW,
|
||||||
R_MARKDOWN,
|
R_MARKDOWN,
|
||||||
SOURCE,
|
SOURCE,
|
||||||
]`)
|
]`)
|
||||||
|
@ -539,7 +540,7 @@ func newList() *cobra.Command {
|
||||||
|
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
cmd.Flags().IntVar(&listReq.NotebooksModifiedAfter, "notebooks-modified-after", listReq.NotebooksModifiedAfter, `UTC timestamp in milliseconds.`)
|
cmd.Flags().Int64Var(&listReq.NotebooksModifiedAfter, "notebooks-modified-after", listReq.NotebooksModifiedAfter, `UTC timestamp in milliseconds.`)
|
||||||
|
|
||||||
cmd.Use = "list PATH"
|
cmd.Use = "list PATH"
|
||||||
cmd.Short = `List contents.`
|
cmd.Short = `List contents.`
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -9,7 +9,7 @@ require (
|
||||||
github.com/BurntSushi/toml v1.4.0 // MIT
|
github.com/BurntSushi/toml v1.4.0 // MIT
|
||||||
github.com/Masterminds/semver/v3 v3.3.1 // MIT
|
github.com/Masterminds/semver/v3 v3.3.1 // MIT
|
||||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||||
github.com/databricks/databricks-sdk-go v0.58.1 // Apache 2.0
|
github.com/databricks/databricks-sdk-go v0.59.0 // Apache 2.0
|
||||||
github.com/fatih/color v1.18.0 // MIT
|
github.com/fatih/color v1.18.0 // MIT
|
||||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||||
github.com/gorilla/mux v1.8.1 // BSD 3-Clause
|
github.com/gorilla/mux v1.8.1 // BSD 3-Clause
|
||||||
|
|
|
@ -34,8 +34,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
|
github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||||
github.com/databricks/databricks-sdk-go v0.58.1 h1:dUs9ZmFi7hYiL3NwLSAbxqQu66E3BzwM8EU/wcCTJ10=
|
github.com/databricks/databricks-sdk-go v0.59.0 h1:m87rbnoeO7A6+QKo4QzwyPE5AzEeGvopEaavn3F5y/o=
|
||||||
github.com/databricks/databricks-sdk-go v0.58.1/go.mod h1:JpLizplEs+up9/Z4Xf2x++o3sM9eTTWFGzIXAptKJzI=
|
github.com/databricks/databricks-sdk-go v0.59.0/go.mod h1:JpLizplEs+up9/Z4Xf2x++o3sM9eTTWFGzIXAptKJzI=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
|
|
@ -257,8 +257,9 @@ func (w *FilesClient) deleteDirectory(ctx context.Context, name string) error {
|
||||||
// The directory delete API returns a 400 if the directory is not empty
|
// The directory delete API returns a 400 if the directory is not empty
|
||||||
if aerr.StatusCode == http.StatusBadRequest {
|
if aerr.StatusCode == http.StatusBadRequest {
|
||||||
reasons := []string{}
|
reasons := []string{}
|
||||||
for _, detail := range aerr.Details {
|
details := aerr.ErrorDetails()
|
||||||
reasons = append(reasons, detail.Reason)
|
if details.ErrorInfo != nil {
|
||||||
|
reasons = append(reasons, details.ErrorInfo.Reason)
|
||||||
}
|
}
|
||||||
// Error code 400 is generic and can be returned for other reasons. Make
|
// Error code 400 is generic and can be returned for other reasons. Make
|
||||||
// sure one of the reasons for the error is that the directory is not empty.
|
// sure one of the reasons for the error is that the directory is not empty.
|
||||||
|
|
|
@ -91,6 +91,7 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Tagging = tags.ForCloud(w.Config)
|
b.Tagging = tags.ForCloud(w.Config)
|
||||||
|
b.SetWorkpaceClient(w)
|
||||||
b.WorkspaceClient()
|
b.WorkspaceClient()
|
||||||
|
|
||||||
diags = phases.Initialize(ctx, b)
|
diags = phases.Initialize(ctx, b)
|
||||||
|
|
Loading…
Reference in New Issue