Regenerate the CLI using the same OpenAPI spec as the SDK (#1205)

## Changes
The OpenAPI spec used to generate the CLI doesn't match the version used
for the SDK version that the CLI currently depends on. This PR
regenerates the CLI based on the same version of the OpenAPI spec used
by the SDK on v0.30.1.

## Tests
<!-- How is this tested? -->
This commit is contained in:
Miles Yucht 2024-02-13 15:33:59 +01:00 committed by GitHub
parent 52b813bd8e
commit e8b0698e19
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 810 additions and 47 deletions

View File

@ -1 +1 @@
a7a9dc025bb80303e676bf3708942c6aa06689f1
e05401ed5dd4974c5333d737ec308a7d451f749f

1
.gitattributes vendored
View File

@ -50,6 +50,7 @@ cmd/workspace/instance-pools/instance-pools.go linguist-generated=true
cmd/workspace/instance-profiles/instance-profiles.go linguist-generated=true
cmd/workspace/ip-access-lists/ip-access-lists.go linguist-generated=true
cmd/workspace/jobs/jobs.go linguist-generated=true
cmd/workspace/lakehouse-monitors/lakehouse-monitors.go linguist-generated=true
cmd/workspace/lakeview/lakeview.go linguist-generated=true
cmd/workspace/libraries/libraries.go linguist-generated=true
cmd/workspace/metastores/metastores.go linguist-generated=true

View File

@ -9,6 +9,9 @@
"build": {
"description": ""
},
"executable": {
"description": ""
},
"files": {
"description": "",
"items": {
@ -35,6 +38,14 @@
"compute_id": {
"description": ""
},
"deployment": {
"description": "",
"properties": {
"fail_on_active_runs": {
"description": ""
}
}
},
"git": {
"description": "",
"properties": {
@ -803,7 +814,7 @@
}
},
"existing_cluster_id": {
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability."
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability."
},
"health": {
"description": "",
@ -1210,7 +1221,7 @@
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
},
"source": {
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n"
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
}
}
},
@ -1312,7 +1323,7 @@
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
},
"source": {
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n"
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
}
}
},
@ -2093,6 +2104,72 @@
}
}
},
"init_scripts": {
"description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.",
"items": {
"description": "",
"properties": {
"dbfs": {
"description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`",
"properties": {
"destination": {
"description": "dbfs destination, e.g. `dbfs:/my/path`"
}
}
},
"file": {
"description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`",
"properties": {
"destination": {
"description": "local file destination, e.g. `file:/my/local/file.sh`"
}
}
},
"s3": {
"description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.",
"properties": {
"canned_acl": {
"description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs."
},
"destination": {
"description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs."
},
"enable_encryption": {
"description": "(Optional) Flag to enable server side encryption, `false` by default."
},
"encryption_type": {
"description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`."
},
"endpoint": {
"description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used."
},
"kms_key": {
"description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`."
},
"region": {
"description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used."
}
}
},
"volumes": {
"description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`",
"properties": {
"destination": {
"description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`"
}
}
},
"workspace": {
"description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`",
"properties": {
"destination": {
"description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`"
}
}
}
}
}
},
"instance_pool_id": {
"description": "The optional ID of the instance pool to which the cluster belongs."
},
@ -2368,6 +2445,9 @@
"build": {
"description": ""
},
"executable": {
"description": ""
},
"files": {
"description": "",
"items": {
@ -2394,6 +2474,14 @@
"compute_id": {
"description": ""
},
"deployment": {
"description": "",
"properties": {
"fail_on_active_runs": {
"description": ""
}
}
},
"git": {
"description": "",
"properties": {
@ -3162,7 +3250,7 @@
}
},
"existing_cluster_id": {
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability."
"description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability."
},
"health": {
"description": "",
@ -3569,7 +3657,7 @@
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
},
"source": {
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n"
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
}
}
},
@ -3671,7 +3759,7 @@
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
},
"source": {
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n"
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
}
}
},
@ -4452,6 +4540,72 @@
}
}
},
"init_scripts": {
"description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.",
"items": {
"description": "",
"properties": {
"dbfs": {
"description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`",
"properties": {
"destination": {
"description": "dbfs destination, e.g. `dbfs:/my/path`"
}
}
},
"file": {
"description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`",
"properties": {
"destination": {
"description": "local file destination, e.g. `file:/my/local/file.sh`"
}
}
},
"s3": {
"description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.",
"properties": {
"canned_acl": {
"description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs."
},
"destination": {
"description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs."
},
"enable_encryption": {
"description": "(Optional) Flag to enable server side encryption, `false` by default."
},
"encryption_type": {
"description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`."
},
"endpoint": {
"description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used."
},
"kms_key": {
"description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`."
},
"region": {
"description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used."
}
}
},
"volumes": {
"description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`",
"properties": {
"destination": {
"description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`"
}
}
},
"workspace": {
"description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`",
"properties": {
"destination": {
"description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`"
}
}
}
}
}
},
"instance_pool_id": {
"description": "The optional ID of the instance pool to which the cluster belongs."
},

View File

@ -26,10 +26,11 @@ func New() *cobra.Command {
limit their use to specific users and groups.
With cluster policies, you can: - Auto-install cluster libraries on the next
restart by listing them in the policy's "libraries" field. - Limit users to
creating clusters with the prescribed settings. - Simplify the user interface,
enabling more users to create clusters, by fixing and hiding some fields. -
Manage costs by setting limits on attributes that impact the hourly rate.
restart by listing them in the policy's "libraries" field (Public Preview). -
Limit users to creating clusters with the prescribed settings. - Simplify the
user interface, enabling more users to create clusters, by fixing and hiding
some fields. - Manage costs by setting limits on attributes that impact the
hourly rate.
Cluster policy permissions limit which policies a user can select in the
Policy drop-down when the user creates a cluster: - A user who has

View File

@ -84,8 +84,9 @@ func newChangeOwner() *cobra.Command {
cmd.Short = `Change cluster owner.`
cmd.Long = `Change cluster owner.
Change the owner of the cluster. You must be an admin to perform this
operation.
Change the owner of the cluster. You must be an admin and the cluster must be
terminated to perform this operation. The service principal application ID can
be supplied as an argument to owner_username.
Arguments:
CLUSTER_ID: <needs content added>

2
cmd/workspace/cmd.go generated
View File

@ -27,6 +27,7 @@ import (
instance_profiles "github.com/databricks/cli/cmd/workspace/instance-profiles"
ip_access_lists "github.com/databricks/cli/cmd/workspace/ip-access-lists"
jobs "github.com/databricks/cli/cmd/workspace/jobs"
lakehouse_monitors "github.com/databricks/cli/cmd/workspace/lakehouse-monitors"
lakeview "github.com/databricks/cli/cmd/workspace/lakeview"
libraries "github.com/databricks/cli/cmd/workspace/libraries"
metastores "github.com/databricks/cli/cmd/workspace/metastores"
@ -93,6 +94,7 @@ func All() []*cobra.Command {
out = append(out, instance_profiles.New())
out = append(out, ip_access_lists.New())
out = append(out, jobs.New())
out = append(out, lakehouse_monitors.New())
out = append(out, lakeview.New())
out = append(out, libraries.New())
out = append(out, metastores.New())

View File

@ -122,7 +122,7 @@ func newDelete() *cobra.Command {
cmd.Use = "delete DASHBOARD_ID"
cmd.Short = `Remove a dashboard.`
cmd.Long = `Remove a dashboard.
Moves a dashboard to the trash. Trashed dashboards do not appear in list views
or searches, and cannot be shared.`
@ -196,7 +196,7 @@ func newGet() *cobra.Command {
cmd.Use = "get DASHBOARD_ID"
cmd.Short = `Retrieve a definition.`
cmd.Long = `Retrieve a definition.
Returns a JSON representation of a dashboard object, including its
visualization and query objects.`
@ -275,7 +275,7 @@ func newList() *cobra.Command {
cmd.Use = "list"
cmd.Short = `Get dashboard objects.`
cmd.Long = `Get dashboard objects.
Fetch a paginated list of dashboard objects.`
cmd.Annotations = make(map[string]string)
@ -334,7 +334,7 @@ func newRestore() *cobra.Command {
cmd.Use = "restore DASHBOARD_ID"
cmd.Short = `Restore a dashboard.`
cmd.Long = `Restore a dashboard.
A restored dashboard appears in list views and searches and can be shared.`
cmd.Annotations = make(map[string]string)
@ -388,4 +388,91 @@ func init() {
})
}
// start update command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var updateOverrides []func(
*cobra.Command,
*sql.DashboardEditContent,
)
func newUpdate() *cobra.Command {
cmd := &cobra.Command{}
var updateReq sql.DashboardEditContent
var updateJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this dashboard that appears in list views and at the top of the dashboard page.`)
cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`)
cmd.Use = "update DASHBOARD_ID"
cmd.Short = `Change a dashboard definition.`
cmd.Long = `Change a dashboard definition.
Modify this dashboard definition. This operation only affects attributes of
the dashboard object. It does not add, modify, or remove widgets.
**Note**: You cannot undo this operation.`
cmd.Annotations = make(map[string]string)
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = updateJson.Unmarshal(&updateReq)
if err != nil {
return err
}
}
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No DASHBOARD_ID argument specified. Loading names for Dashboards drop-down."
names, err := w.Dashboards.DashboardNameToIdMap(ctx, sql.ListDashboardsRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Dashboards drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have ")
}
updateReq.DashboardId = args[0]
response, err := w.Dashboards.Update(ctx, updateReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range updateOverrides {
fn(cmd, &updateReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newUpdate())
})
}
// end service Dashboards

View File

@ -397,7 +397,9 @@ func newDeleteRuns() *cobra.Command {
cmd.Long = `Delete runs by creation time.
Bulk delete runs in an experiment that were created prior to or at the
specified timestamp. Deletes at most max_runs per request.
specified timestamp. Deletes at most max_runs per request. To call this API
from a Databricks Notebook in Python, you can use the client code snippet on
https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete.
Arguments:
EXPERIMENT_ID: The ID of the experiment containing the runs to delete.
@ -1721,7 +1723,9 @@ func newRestoreRuns() *cobra.Command {
cmd.Long = `Restore runs by deletion time.
Bulk restore runs in an experiment that were deleted no earlier than the
specified timestamp. Restores at most max_runs per request.
specified timestamp. Restores at most max_runs per request. To call this API
from a Databricks Notebook in Python, you can use the client code snippet on
https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore.
Arguments:
EXPERIMENT_ID: The ID of the experiment containing the runs to restore.

View File

@ -301,7 +301,7 @@ func newList() *cobra.Command {
Get a list of all global init scripts for this workspace. This returns all
properties for each script but **not** the script contents. To retrieve the
contents of a script, use the [get a global init
script](#operation/get-script) operation.`
script](:method:globalinitscripts/get) operation.`
cmd.Annotations = make(map[string]string)

View File

@ -0,0 +1,414 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package lakehouse_monitors
import (
"fmt"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "lakehouse-monitors",
Short: `A monitor computes and monitors data or model quality metrics for a table over time.`,
Long: `A monitor computes and monitors data or model quality metrics for a table over
time. It generates metrics tables and a dashboard that you can use to monitor
table health and set alerts.
Most write operations require the user to be the owner of the table (or its
parent schema or parent catalog). Viewing the dashboard, computed metrics, or
monitor configuration only requires the user to have **SELECT** privileges on
the table (along with **USE_SCHEMA** and **USE_CATALOG**).`,
GroupID: "catalog",
Annotations: map[string]string{
"package": "catalog",
},
}
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start create command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var createOverrides []func(
*cobra.Command,
*catalog.CreateMonitor,
)
func newCreate() *cobra.Command {
cmd := &cobra.Command{}
var createReq catalog.CreateMonitor
var createJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&createReq.BaselineTableName, "baseline-table-name", createReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`)
// TODO: array: custom_metrics
// TODO: complex arg: data_classification_config
// TODO: complex arg: inference_log
// TODO: array: notifications
// TODO: complex arg: schedule
cmd.Flags().BoolVar(&createReq.SkipBuiltinDashboard, "skip-builtin-dashboard", createReq.SkipBuiltinDashboard, `Whether to skip creating a default dashboard summarizing data quality metrics.`)
// TODO: array: slicing_exprs
// TODO: output-only field
// TODO: complex arg: time_series
cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `Optional argument to specify the warehouse for dashboard creation.`)
cmd.Use = "create FULL_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME"
cmd.Short = `Create a table monitor.`
cmd.Long = `Create a table monitor.
Creates a new monitor for the specified table.
The caller must either: 1. be an owner of the table's parent catalog, have
**USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the
table 2. have **USE_CATALOG** on the table's parent catalog, be an owner of
the table's parent schema, and have **SELECT** access on the table. 3. have
the following permissions: - **USE_CATALOG** on the table's parent catalog -
**USE_SCHEMA** on the table's parent schema - be an owner of the table.
Workspace assets, such as the dashboard, will be created in the workspace
where this call was made.
Arguments:
FULL_NAME: Full name of the table.
ASSETS_DIR: The directory to store monitoring assets (e.g. dashboard, metric tables).
OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(1)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(3)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = createJson.Unmarshal(&createReq)
if err != nil {
return err
}
}
createReq.FullName = args[0]
if !cmd.Flags().Changed("json") {
createReq.AssetsDir = args[1]
}
if !cmd.Flags().Changed("json") {
createReq.OutputSchemaName = args[2]
}
response, err := w.LakehouseMonitors.Create(ctx, createReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range createOverrides {
fn(cmd, &createReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newCreate())
})
}
// start delete command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var deleteOverrides []func(
*cobra.Command,
*catalog.DeleteLakehouseMonitorRequest,
)
func newDelete() *cobra.Command {
cmd := &cobra.Command{}
var deleteReq catalog.DeleteLakehouseMonitorRequest
// TODO: short flags
cmd.Use = "delete FULL_NAME"
cmd.Short = `Delete a table monitor.`
cmd.Long = `Delete a table monitor.
Deletes a monitor for the specified table.
The caller must either: 1. be an owner of the table's parent catalog 2. have
**USE_CATALOG** on the table's parent catalog and be an owner of the table's
parent schema 3. have the following permissions: - **USE_CATALOG** on the
table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
owner of the table.
Additionally, the call must be made from the workspace where the monitor was
created.
Note that the metric tables and dashboard will not be deleted as part of this
call; those assets must be manually cleaned up (if desired).
Arguments:
FULL_NAME: Full name of the table.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
deleteReq.FullName = args[0]
err = w.LakehouseMonitors.Delete(ctx, deleteReq)
if err != nil {
return err
}
return nil
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range deleteOverrides {
fn(cmd, &deleteReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newDelete())
})
}
// start get command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var getOverrides []func(
*cobra.Command,
*catalog.GetLakehouseMonitorRequest,
)
func newGet() *cobra.Command {
cmd := &cobra.Command{}
var getReq catalog.GetLakehouseMonitorRequest
// TODO: short flags
cmd.Use = "get FULL_NAME"
cmd.Short = `Get a table monitor.`
cmd.Long = `Get a table monitor.
Gets a monitor for the specified table.
The caller must either: 1. be an owner of the table's parent catalog 2. have
**USE_CATALOG** on the table's parent catalog and be an owner of the table's
parent schema. 3. have the following permissions: - **USE_CATALOG** on the
table's parent catalog - **USE_SCHEMA** on the table's parent schema -
**SELECT** privilege on the table.
The returned information includes configuration values, as well as information
on assets created by the monitor. Some information (e.g., dashboard) may be
filtered out if the caller is in a different workspace than where the monitor
was created.
Arguments:
FULL_NAME: Full name of the table.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
getReq.FullName = args[0]
response, err := w.LakehouseMonitors.Get(ctx, getReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range getOverrides {
fn(cmd, &getReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newGet())
})
}
// start update command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var updateOverrides []func(
*cobra.Command,
*catalog.UpdateMonitor,
)
func newUpdate() *cobra.Command {
cmd := &cobra.Command{}
var updateReq catalog.UpdateMonitor
var updateJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&updateReq.BaselineTableName, "baseline-table-name", updateReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`)
// TODO: array: custom_metrics
// TODO: complex arg: data_classification_config
// TODO: complex arg: inference_log
// TODO: array: notifications
// TODO: complex arg: schedule
// TODO: array: slicing_exprs
// TODO: output-only field
// TODO: complex arg: time_series
cmd.Use = "update FULL_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME"
cmd.Short = `Update a table monitor.`
cmd.Long = `Update a table monitor.
Updates a monitor for the specified table.
The caller must either: 1. be an owner of the table's parent catalog 2. have
**USE_CATALOG** on the table's parent catalog and be an owner of the table's
parent schema 3. have the following permissions: - **USE_CATALOG** on the
table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
owner of the table.
Additionally, the call must be made from the workspace where the monitor was
created, and the caller must be the original creator of the monitor.
Certain configuration fields, such as output asset identifiers, cannot be
updated.
Arguments:
FULL_NAME: Full name of the table.
ASSETS_DIR: The directory to store monitoring assets (e.g. dashboard, metric tables).
OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(1)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(3)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = updateJson.Unmarshal(&updateReq)
if err != nil {
return err
}
}
updateReq.FullName = args[0]
if !cmd.Flags().Changed("json") {
updateReq.AssetsDir = args[1]
}
if !cmd.Flags().Changed("json") {
updateReq.OutputSchemaName = args[2]
}
response, err := w.LakehouseMonitors.Update(ctx, updateReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range updateOverrides {
fn(cmd, &updateReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newUpdate())
})
}
// end service LakehouseMonitors

View File

@ -911,6 +911,7 @@ func newStartUpdate() *cobra.Command {
cmd.Flags().BoolVar(&startUpdateReq.FullRefresh, "full-refresh", startUpdateReq.FullRefresh, `If true, this update will reset all tables before running.`)
// TODO: array: full_refresh_selection
// TODO: array: refresh_selection
cmd.Flags().BoolVar(&startUpdateReq.ValidateOnly, "validate-only", startUpdateReq.ValidateOnly, `If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets.`)
cmd.Use = "start-update PIPELINE_ID"
cmd.Short = `Start a pipeline.`

View File

@ -286,7 +286,10 @@ func newList() *cobra.Command {
cmd.Long = `Get a list of queries.
Gets a list of queries. Optionally, this list can be filtered by a search
term.`
term.
### **Warning: Calling this API concurrently 10 or more times could result in
throttling, service degradation, or a temporary ban.**`
cmd.Annotations = make(map[string]string)
@ -422,6 +425,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this query that appears in list views, widget headings, and on the query page.`)
// TODO: any: options
cmd.Flags().StringVar(&updateReq.Query, "query", updateReq.Query, `The text of the query to be run.`)
cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`)
cmd.Use = "update QUERY_ID"
cmd.Short = `Change a query definition.`

View File

@ -123,6 +123,89 @@ func init() {
})
}
// start exists command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var existsOverrides []func(
*cobra.Command,
*catalog.ExistsRequest,
)
func newExists() *cobra.Command {
cmd := &cobra.Command{}
var existsReq catalog.ExistsRequest
// TODO: short flags
cmd.Use = "exists FULL_NAME"
cmd.Short = `Get boolean reflecting if table exists.`
cmd.Long = `Get boolean reflecting if table exists.
Gets if a table exists in the metastore for a specific catalog and schema. The
caller must satisfy one of the following requirements: * Be a metastore admin
* Be the owner of the parent catalog * Be the owner of the parent schema and
have the USE_CATALOG privilege on the parent catalog * Have the
**USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA**
privilege on the parent schema, and either be the table owner or have the
SELECT privilege on the table. * Have BROWSE privilege on the parent catalog *
Have BROWSE privilege on the parent schema.
Arguments:
FULL_NAME: Full name of the table.`
cmd.Annotations = make(map[string]string)
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No FULL_NAME argument specified. Loading names for Tables drop-down."
names, err := w.Tables.TableInfoNameToTableIdMap(ctx, catalog.ListTablesRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Tables drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "Full name of the table")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have full name of the table")
}
existsReq.FullName = args[0]
response, err := w.Tables.Exists(ctx, existsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range existsOverrides {
fn(cmd, &existsReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newExists())
})
}
// start get command
// Slice with functions to override default command behavior.
@ -146,10 +229,12 @@ func newGet() *cobra.Command {
cmd.Long = `Get a table.
Gets a table from the metastore for a specific catalog and schema. The caller
must be a metastore admin, be the owner of the table and have the
**USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA**
privilege on the parent schema, or be the owner of the table and have the
**SELECT** privilege on it as well.
must satisfy one of the following requirements: * Be a metastore admin * Be
the owner of the parent catalog * Be the owner of the parent schema and have
the USE_CATALOG privilege on the parent catalog * Have the **USE_CATALOG**
privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent
schema, and either be the table owner or have the SELECT privilege on the
table.
Arguments:
FULL_NAME: Full name of the table.`

View File

@ -56,16 +56,16 @@ func newCreateOboToken() *cobra.Command {
cmd.Flags().Var(&createOboTokenJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&createOboTokenReq.Comment, "comment", createOboTokenReq.Comment, `Comment that describes the purpose of the token.`)
cmd.Flags().Int64Var(&createOboTokenReq.LifetimeSeconds, "lifetime-seconds", createOboTokenReq.LifetimeSeconds, `The number of seconds before the token expires.`)
cmd.Use = "create-obo-token APPLICATION_ID LIFETIME_SECONDS"
cmd.Use = "create-obo-token APPLICATION_ID"
cmd.Short = `Create on-behalf token.`
cmd.Long = `Create on-behalf token.
Creates a token on behalf of a service principal.
Arguments:
APPLICATION_ID: Application ID of the service principal.
LIFETIME_SECONDS: The number of seconds before the token expires.`
APPLICATION_ID: Application ID of the service principal.`
cmd.Annotations = make(map[string]string)
@ -73,12 +73,11 @@ func newCreateOboToken() *cobra.Command {
if cmd.Flags().Changed("json") {
err := cobra.ExactArgs(0)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'application_id', 'lifetime_seconds' in your JSON input")
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'application_id' in your JSON input")
}
return nil
}
check := cobra.ExactArgs(2)
return check(cmd, args)
return nil
}
cmd.PreRunE = root.MustWorkspaceClient
@ -91,15 +90,25 @@ func newCreateOboToken() *cobra.Command {
if err != nil {
return err
}
}
if !cmd.Flags().Changed("json") {
createOboTokenReq.ApplicationId = args[0]
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[1], &createOboTokenReq.LifetimeSeconds)
if err != nil {
return fmt.Errorf("invalid LIFETIME_SECONDS: %s", args[1])
} else {
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No APPLICATION_ID argument specified. Loading names for Token Management drop-down."
names, err := w.TokenManagement.TokenInfoCommentToTokenIdMap(ctx, settings.ListTokenManagementRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Token Management drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "Application ID of the service principal")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have application id of the service principal")
}
createOboTokenReq.ApplicationId = args[0]
}
response, err := w.TokenManagement.CreateOboToken(ctx, createOboTokenReq)
@ -146,7 +155,7 @@ func newDelete() *cobra.Command {
cmd.Use = "delete TOKEN_ID"
cmd.Short = `Delete a token.`
cmd.Long = `Delete a token.
Deletes a token, specified by its ID.
Arguments:
@ -222,7 +231,7 @@ func newGet() *cobra.Command {
cmd.Use = "get TOKEN_ID"
cmd.Short = `Get token info.`
cmd.Long = `Get token info.
Gets information about a token, specified by its ID.
Arguments:
@ -293,7 +302,7 @@ func newGetPermissionLevels() *cobra.Command {
cmd.Use = "get-permission-levels"
cmd.Short = `Get token permission levels.`
cmd.Long = `Get token permission levels.
Gets the permission levels that a user can have on an object.`
cmd.Annotations = make(map[string]string)
@ -341,7 +350,7 @@ func newGetPermissions() *cobra.Command {
cmd.Use = "get-permissions"
cmd.Short = `Get token permissions.`
cmd.Long = `Get token permissions.
Gets the permissions of all tokens. Tokens can inherit permissions from their
root object.`
@ -398,7 +407,7 @@ func newList() *cobra.Command {
cmd.Use = "list"
cmd.Short = `List all tokens.`
cmd.Long = `List all tokens.
Lists all tokens associated with the specified workspace or user.`
cmd.Annotations = make(map[string]string)
@ -461,7 +470,7 @@ func newSetPermissions() *cobra.Command {
cmd.Use = "set-permissions"
cmd.Short = `Set token permissions.`
cmd.Long = `Set token permissions.
Sets permissions on all tokens. Tokens can inherit permissions from their root
object.`
@ -532,7 +541,7 @@ func newUpdatePermissions() *cobra.Command {
cmd.Use = "update-permissions"
cmd.Short = `Update token permissions.`
cmd.Long = `Update token permissions.
Updates the permissions on all tokens. Tokens can inherit permissions from
their root object.`