Bump Go SDK to v0.12.0 (#540)

## Changes

* Regenerate CLI commands
* Ignore `account-access-control-proxy` (see #505)

## Tests

Unit and integration tests pass.
This commit is contained in:
Pieter Noordhuis 2023-07-03 11:46:45 +02:00 committed by GitHub
parent f42279fe47
commit ad8183d7a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 269 additions and 77 deletions

View File

@ -2,7 +2,7 @@
package workspace
{{$excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions"}}
{{ $excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions" "account-access-control-proxy" }}
import (
"github.com/databricks/cli/cmd/root"

View File

@ -10,7 +10,8 @@ import (
"github.com/spf13/cobra"
)
{{- $excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions" -}}
{{ $excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions" "account-access-control-proxy" }}
{{if not (in $excludes .KebabName) }}
{{template "service" .}}
{{else}}

View File

@ -105,7 +105,7 @@ func (m *translatePaths) translateFilePath(literal, localPath, remotePath string
return remotePath, nil
}
func (m *translatePaths) translateJobTask(dir string, b *bundle.Bundle, task *jobs.JobTaskSettings) error {
func (m *translatePaths) translateJobTask(dir string, b *bundle.Bundle, task *jobs.Task) error {
var err error
if task.NotebookTask != nil {

View File

@ -54,7 +54,7 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
GitTag: "sometag",
GitUrl: "https://github.com/someuser/somerepo",
},
Tasks: []jobs.JobTaskSettings{
Tasks: []jobs.Task{
{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "my_job_notebook.py",
@ -117,7 +117,7 @@ func TestTranslatePaths(t *testing.T) {
ConfigFilePath: filepath.Join(dir, "resource.yml"),
},
JobSettings: &jobs.JobSettings{
Tasks: []jobs.JobTaskSettings{
Tasks: []jobs.Task{
{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "./my_job_notebook.py",
@ -251,7 +251,7 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
ConfigFilePath: filepath.Join(dir, "job/resource.yml"),
},
JobSettings: &jobs.JobSettings{
Tasks: []jobs.JobTaskSettings{
Tasks: []jobs.Task{
{
SparkPythonTask: &jobs.SparkPythonTask{
PythonFile: "./my_python_file.py",
@ -314,7 +314,7 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
ConfigFilePath: filepath.Join(dir, "../resource.yml"),
},
JobSettings: &jobs.JobSettings{
Tasks: []jobs.JobTaskSettings{
Tasks: []jobs.Task{
{
SparkPythonTask: &jobs.SparkPythonTask{
PythonFile: "./my_python_file.py",
@ -345,7 +345,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
ConfigFilePath: filepath.Join(dir, "fake.yml"),
},
JobSettings: &jobs.JobSettings{
Tasks: []jobs.JobTaskSettings{
Tasks: []jobs.Task{
{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "./doesnt_exist.py",
@ -376,7 +376,7 @@ func TestJobFileDoesNotExistError(t *testing.T) {
ConfigFilePath: filepath.Join(dir, "fake.yml"),
},
JobSettings: &jobs.JobSettings{
Tasks: []jobs.JobTaskSettings{
Tasks: []jobs.Task{
{
SparkPythonTask: &jobs.SparkPythonTask{
PythonFile: "./doesnt_exist.py",

View File

@ -20,13 +20,13 @@ func TestConvertJob(t *testing.T) {
JobClusters: []jobs.JobCluster{
{
JobClusterKey: "key",
NewCluster: &compute.BaseClusterInfo{
NewCluster: &compute.ClusterSpec{
SparkVersion: "10.4.x-scala2.12",
},
},
},
GitSource: &jobs.GitSource{
GitProvider: jobs.GitSourceGitProviderGithub,
GitProvider: jobs.GitProviderGithub,
GitUrl: "https://github.com/foo/bar",
},
},
@ -78,7 +78,7 @@ func TestConvertJobTaskLibraries(t *testing.T) {
var src = resources.Job{
JobSettings: &jobs.JobSettings{
Name: "my job",
Tasks: []jobs.JobTaskSettings{
Tasks: []jobs.Task{
{
TaskKey: "key",
Libraries: []compute.Library{

View File

@ -40,7 +40,12 @@ var downloadCmd = &cobra.Command{
Returns billable usage logs in CSV format for the specified account and date
range. For the data schema, see [CSV file schema]. Note that this method might
take multiple seconds to complete.
take multiple minutes to complete.
**Warning**: Depending on the queried date range, the number of workspaces in
the account, the size of the response and the internet speed of the caller,
this API may hit a timeout after a few minutes. If you experience this, try to
mitigate by calling the API with narrower date ranges.
[CSV file schema]: https://docs.databricks.com/administration-guide/account-settings/usage-analysis.html#schema`,

View File

@ -43,6 +43,7 @@ func init() {
// TODO: array: groups
createCmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Databricks group ID.`)
// TODO: array: members
// TODO: complex arg: meta
// TODO: array: roles
}
@ -345,6 +346,7 @@ func init() {
// TODO: array: groups
updateCmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Databricks group ID.`)
// TODO: array: members
// TODO: complex arg: meta
// TODO: array: roles
}

View File

@ -17,6 +17,61 @@ var Cmd = &cobra.Command{
Annotations: map[string]string{
"package": "settings",
},
// This service is being previewed; hide from help output.
Hidden: true,
}
// start delete-personal-compute-setting command
var deletePersonalComputeSettingReq settings.DeletePersonalComputeSettingRequest
var deletePersonalComputeSettingJson flags.JsonFlag
func init() {
Cmd.AddCommand(deletePersonalComputeSettingCmd)
// TODO: short flags
deletePersonalComputeSettingCmd.Flags().Var(&deletePersonalComputeSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`)
deletePersonalComputeSettingCmd.Flags().StringVar(&deletePersonalComputeSettingReq.Etag, "etag", deletePersonalComputeSettingReq.Etag, `TBD.`)
}
var deletePersonalComputeSettingCmd = &cobra.Command{
Use: "delete-personal-compute-setting",
Short: `Delete Personal Compute setting.`,
Long: `Delete Personal Compute setting.
TBD`,
Annotations: map[string]string{},
Args: func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(0)
if cmd.Flags().Changed("json") {
check = cobra.ExactArgs(0)
}
return check(cmd, args)
},
PreRunE: root.MustAccountClient,
RunE: func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
if cmd.Flags().Changed("json") {
err = deletePersonalComputeSettingJson.Unmarshal(&deletePersonalComputeSettingReq)
if err != nil {
return err
}
} else {
}
response, err := a.Settings.DeletePersonalComputeSetting(ctx, deletePersonalComputeSettingReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
},
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
ValidArgsFunction: cobra.NoFileCompletions,
}
// start read-personal-compute-setting command
@ -71,4 +126,57 @@ var readPersonalComputeSettingCmd = &cobra.Command{
ValidArgsFunction: cobra.NoFileCompletions,
}
// start update-personal-compute-setting command
var updatePersonalComputeSettingReq settings.UpdatePersonalComputeSettingRequest
var updatePersonalComputeSettingJson flags.JsonFlag
func init() {
Cmd.AddCommand(updatePersonalComputeSettingCmd)
// TODO: short flags
updatePersonalComputeSettingCmd.Flags().Var(&updatePersonalComputeSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`)
updatePersonalComputeSettingCmd.Flags().BoolVar(&updatePersonalComputeSettingReq.AllowMissing, "allow-missing", updatePersonalComputeSettingReq.AllowMissing, `TBD.`)
// TODO: complex arg: setting
}
var updatePersonalComputeSettingCmd = &cobra.Command{
Use: "update-personal-compute-setting",
Short: `Update Personal Compute setting.`,
Long: `Update Personal Compute setting.
TBD`,
Annotations: map[string]string{},
Args: func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(0)
if cmd.Flags().Changed("json") {
check = cobra.ExactArgs(0)
}
return check(cmd, args)
},
PreRunE: root.MustAccountClient,
RunE: func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
if cmd.Flags().Changed("json") {
err = updatePersonalComputeSettingJson.Unmarshal(&updatePersonalComputeSettingReq)
if err != nil {
return err
}
} else {
}
response, err := a.Settings.UpdatePersonalComputeSetting(ctx, updatePersonalComputeSettingReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
},
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
ValidArgsFunction: cobra.NoFileCompletions,
}
// end service AccountSettings

View File

@ -66,7 +66,7 @@ var loginCmd = &cobra.Command{
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "Loading list of clusters to select from"
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load clusters list. Original error: %w", err)

View File

@ -149,14 +149,9 @@ var createCmd = &cobra.Command{
Long: `Create new cluster.
Creates a new Spark cluster. This method will acquire new instances from the
cloud provider if necessary. This method is asynchronous; the returned
cluster_id can be used to poll the cluster status. When this method returns,
the cluster will be in a PENDING state. The cluster will be usable once it
enters a RUNNING state.
Note: Databricks may not be able to acquire some of the requested nodes, due
to cloud provider limitations (account limits, spot price, etc.) or transient
network issues.
cloud provider if necessary. Note: Databricks may not be able to acquire some
of the requested nodes, due to cloud provider limitations (account limits,
spot price, etc.) or transient network issues.
If Databricks acquires at least 85% of the requested on-demand nodes, cluster
creation will succeed. Otherwise the cluster will terminate with an
@ -191,7 +186,7 @@ var createCmd = &cobra.Command{
return cmdio.Render(ctx, wait.Response)
}
spinner := cmdio.Spinner(ctx)
info, err := wait.OnProgress(func(i *compute.ClusterInfo) {
info, err := wait.OnProgress(func(i *compute.ClusterDetails) {
statusMessage := i.StateMessage
spinner <- statusMessage
}).GetWithTimeout(createTimeout)
@ -247,7 +242,7 @@ var deleteCmd = &cobra.Command{
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
@ -272,7 +267,7 @@ var deleteCmd = &cobra.Command{
return nil
}
spinner := cmdio.Spinner(ctx)
info, err := wait.OnProgress(func(i *compute.ClusterInfo) {
info, err := wait.OnProgress(func(i *compute.ClusterDetails) {
statusMessage := i.StateMessage
spinner <- statusMessage
}).GetWithTimeout(deleteTimeout)
@ -380,7 +375,7 @@ var editCmd = &cobra.Command{
return nil
}
spinner := cmdio.Spinner(ctx)
info, err := wait.OnProgress(func(i *compute.ClusterInfo) {
info, err := wait.OnProgress(func(i *compute.ClusterDetails) {
statusMessage := i.StateMessage
spinner <- statusMessage
}).GetWithTimeout(editTimeout)
@ -437,7 +432,7 @@ var eventsCmd = &cobra.Command{
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
@ -504,7 +499,7 @@ var getCmd = &cobra.Command{
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
@ -692,7 +687,7 @@ var permanentDeleteCmd = &cobra.Command{
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
@ -755,7 +750,7 @@ var pinCmd = &cobra.Command{
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
@ -825,7 +820,7 @@ var resizeCmd = &cobra.Command{
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
@ -850,7 +845,7 @@ var resizeCmd = &cobra.Command{
return nil
}
spinner := cmdio.Spinner(ctx)
info, err := wait.OnProgress(func(i *compute.ClusterInfo) {
info, err := wait.OnProgress(func(i *compute.ClusterDetails) {
statusMessage := i.StateMessage
spinner <- statusMessage
}).GetWithTimeout(resizeTimeout)
@ -906,7 +901,7 @@ var restartCmd = &cobra.Command{
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
@ -931,7 +926,7 @@ var restartCmd = &cobra.Command{
return nil
}
spinner := cmdio.Spinner(ctx)
info, err := wait.OnProgress(func(i *compute.ClusterInfo) {
info, err := wait.OnProgress(func(i *compute.ClusterDetails) {
statusMessage := i.StateMessage
spinner <- statusMessage
}).GetWithTimeout(restartTimeout)
@ -1022,7 +1017,7 @@ var startCmd = &cobra.Command{
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
@ -1047,7 +1042,7 @@ var startCmd = &cobra.Command{
return nil
}
spinner := cmdio.Spinner(ctx)
info, err := wait.OnProgress(func(i *compute.ClusterInfo) {
info, err := wait.OnProgress(func(i *compute.ClusterDetails) {
statusMessage := i.StateMessage
spinner <- statusMessage
}).GetWithTimeout(startTimeout)
@ -1097,7 +1092,7 @@ var unpinCmd = &cobra.Command{
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)

View File

@ -36,6 +36,11 @@ func init() {
// TODO: short flags
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
createCmd.Flags().BoolVar(&createReq.IsFavorite, "is-favorite", createReq.IsFavorite, `Indicates whether this query object should appear in the current user's favorites list.`)
createCmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `The title of this dashboard that appears in list views and at the top of the dashboard page.`)
createCmd.Flags().StringVar(&createReq.Parent, "parent", createReq.Parent, `The identifier of the workspace folder containing the dashboard.`)
// TODO: array: tags
}
var createCmd = &cobra.Command{
@ -61,7 +66,6 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Dashboards.Create(ctx, createReq)

View File

@ -43,6 +43,7 @@ func init() {
// TODO: array: groups
createCmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Databricks group ID.`)
// TODO: array: members
// TODO: complex arg: meta
// TODO: array: roles
}
@ -345,6 +346,7 @@ func init() {
// TODO: array: groups
updateCmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Databricks group ID.`)
// TODO: array: members
// TODO: complex arg: meta
// TODO: array: roles
}

View File

@ -53,6 +53,7 @@ func init() {
// TODO: map via StringToStringVar: custom_tags
// TODO: complex arg: disk_spec
createCmd.Flags().BoolVar(&createReq.EnableElasticDisk, "enable-elastic-disk", createReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`)
// TODO: complex arg: gcp_attributes
createCmd.Flags().IntVar(&createReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", createReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`)
// TODO: complex arg: instance_pool_fleet_attributes
createCmd.Flags().IntVar(&createReq.MaxCapacity, "max-capacity", createReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`)
@ -179,6 +180,7 @@ func init() {
// TODO: map via StringToStringVar: custom_tags
// TODO: complex arg: disk_spec
editCmd.Flags().BoolVar(&editReq.EnableElasticDisk, "enable-elastic-disk", editReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`)
// TODO: complex arg: gcp_attributes
editCmd.Flags().IntVar(&editReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", editReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`)
// TODO: complex arg: instance_pool_fleet_attributes
editCmd.Flags().IntVar(&editReq.MaxCapacity, "max-capacity", editReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`)

View File

@ -203,6 +203,25 @@ func init() {
// TODO: short flags
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: array: access_control_list
// TODO: array: compute
// TODO: complex arg: continuous
// TODO: complex arg: email_notifications
createCmd.Flags().Var(&createReq.Format, "format", `Used to tell what is the format of the job.`)
// TODO: complex arg: git_source
// TODO: array: job_clusters
createCmd.Flags().IntVar(&createReq.MaxConcurrentRuns, "max-concurrent-runs", createReq.MaxConcurrentRuns, `An optional maximum allowed number of concurrent runs of the job.`)
createCmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `An optional name for the job.`)
// TODO: complex arg: notification_settings
// TODO: array: parameters
// TODO: complex arg: run_as
// TODO: complex arg: schedule
// TODO: map via StringToStringVar: tags
// TODO: array: tasks
createCmd.Flags().IntVar(&createReq.TimeoutSeconds, "timeout-seconds", createReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`)
// TODO: complex arg: trigger
// TODO: complex arg: webhook_notifications
}
var createCmd = &cobra.Command{
@ -230,7 +249,6 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Jobs.Create(ctx, createReq)
@ -666,8 +684,8 @@ func init() {
var listCmd = &cobra.Command{
Use: "list",
Short: `List all jobs.`,
Long: `List all jobs.
Short: `List jobs.`,
Long: `List jobs.
Retrieves a list of jobs.`,
@ -727,8 +745,8 @@ func init() {
var listRunsCmd = &cobra.Command{
Use: "list-runs",
Short: `List runs for a job.`,
Long: `List runs for a job.
Short: `List job runs.`,
Long: `List job runs.
List runs in descending order by start time.`,
@ -786,6 +804,7 @@ func init() {
// TODO: map via StringToStringVar: python_named_params
// TODO: array: python_params
repairRunCmd.Flags().BoolVar(&repairRunReq.RerunAllFailedTasks, "rerun-all-failed-tasks", repairRunReq.RerunAllFailedTasks, `If true, repair all failed tasks.`)
repairRunCmd.Flags().BoolVar(&repairRunReq.RerunDependentTasks, "rerun-dependent-tasks", repairRunReq.RerunDependentTasks, `If true, repair all tasks that depend on the tasks in rerun_tasks, even if they were previously successful.`)
// TODO: array: rerun_tasks
// TODO: array: spark_submit_params
// TODO: map via StringToStringVar: sql_params
@ -928,6 +947,7 @@ func init() {
// TODO: array: dbt_commands
runNowCmd.Flags().StringVar(&runNowReq.IdempotencyToken, "idempotency-token", runNowReq.IdempotencyToken, `An optional token to guarantee the idempotency of job run requests.`)
// TODO: array: jar_params
// TODO: array: job_parameters
// TODO: map via StringToStringVar: notebook_params
// TODO: complex arg: pipeline_params
// TODO: map via StringToStringVar: python_named_params

View File

@ -45,6 +45,25 @@ func init() {
// TODO: short flags
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
createCmd.Flags().BoolVar(&createReq.AllowDuplicateNames, "allow-duplicate-names", createReq.AllowDuplicateNames, `If false, deployment will fail if name conflicts with that of another pipeline.`)
createCmd.Flags().StringVar(&createReq.Catalog, "catalog", createReq.Catalog, `A catalog in Unity Catalog to publish data from this pipeline to.`)
createCmd.Flags().StringVar(&createReq.Channel, "channel", createReq.Channel, `DLT Release Channel that specifies which version to use.`)
// TODO: array: clusters
// TODO: map via StringToStringVar: configuration
createCmd.Flags().BoolVar(&createReq.Continuous, "continuous", createReq.Continuous, `Whether the pipeline is continuous or triggered.`)
createCmd.Flags().BoolVar(&createReq.Development, "development", createReq.Development, `Whether the pipeline is in Development mode.`)
createCmd.Flags().BoolVar(&createReq.DryRun, "dry-run", createReq.DryRun, ``)
createCmd.Flags().StringVar(&createReq.Edition, "edition", createReq.Edition, `Pipeline product edition.`)
// TODO: complex arg: filters
createCmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Unique identifier for this pipeline.`)
// TODO: array: libraries
createCmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Friendly identifier for this pipeline.`)
createCmd.Flags().BoolVar(&createReq.Photon, "photon", createReq.Photon, `Whether Photon is enabled for this pipeline.`)
createCmd.Flags().BoolVar(&createReq.Serverless, "serverless", createReq.Serverless, `Whether serverless compute is enabled for this pipeline.`)
createCmd.Flags().StringVar(&createReq.Storage, "storage", createReq.Storage, `DBFS root directory for storing checkpoints and tables.`)
createCmd.Flags().StringVar(&createReq.Target, "target", createReq.Target, `Target schema (database) to add tables in this pipeline to.`)
// TODO: complex arg: trigger
}
var createCmd = &cobra.Command{
@ -73,7 +92,6 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Pipelines.Create(ctx, createReq)

View File

@ -34,6 +34,13 @@ func init() {
// TODO: short flags
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
createCmd.Flags().StringVar(&createReq.DataSourceId, "data-source-id", createReq.DataSourceId, `The ID of the data source / SQL warehouse where this query will run.`)
createCmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `General description that can convey additional information about this query such as usage notes.`)
createCmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `The name or title of this query to display in list views.`)
// TODO: any: options
createCmd.Flags().StringVar(&createReq.Parent, "parent", createReq.Parent, `The identifier of the workspace folder containing the query.`)
createCmd.Flags().StringVar(&createReq.Query, "query", createReq.Query, `The text of the query.`)
}
var createCmd = &cobra.Command{
@ -69,7 +76,6 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Queries.Create(ctx, createReq)

View File

@ -42,8 +42,8 @@ func init() {
// TODO: short flags
createScopeCmd.Flags().Var(&createScopeJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: complex arg: backend_azure_keyvault
createScopeCmd.Flags().StringVar(&createScopeReq.InitialManagePrincipal, "initial-manage-principal", createScopeReq.InitialManagePrincipal, `The principal that is initially granted MANAGE permission to the created scope.`)
// TODO: complex arg: keyvault_metadata
createScopeCmd.Flags().Var(&createScopeReq.ScopeBackendType, "scope-backend-type", `The backend type the scope will be created with.`)
}
@ -502,9 +502,9 @@ var putAclCmd = &cobra.Command{
Throws RESOURCE_DOES_NOT_EXIST if no such secret scope exists. Throws
RESOURCE_ALREADY_EXISTS if a permission for the principal already exists.
Throws INVALID_PARAMETER_VALUE if the permission is invalid. Throws
PERMISSION_DENIED if the user does not have permission to make this API
call.`,
Throws INVALID_PARAMETER_VALUE if the permission or principal is invalid.
Throws PERMISSION_DENIED if the user does not have permission to make this
API call.`,
Annotations: map[string]string{},
Args: func(cmd *cobra.Command, args []string) error {

View File

@ -20,15 +20,15 @@ var Cmd = &cobra.Command{
serving endpoints.
You can use a serving endpoint to serve models from the Databricks Model
Registry. Endpoints expose the underlying models as scalable REST API
endpoints using serverless compute. This means the endpoints and associated
compute resources are fully managed by Databricks and will not appear in your
cloud account. A serving endpoint can consist of one or more MLflow models
from the Databricks Model Registry, called served models. A serving endpoint
can have at most ten served models. You can configure traffic settings to
define how requests should be routed to your served models behind an endpoint.
Additionally, you can configure the scale of resources that should be applied
to each served model.`,
Registry or from Unity Catalog. Endpoints expose the underlying models as
scalable REST API endpoints using serverless compute. This means the endpoints
and associated compute resources are fully managed by Databricks and will not
appear in your cloud account. A serving endpoint can consist of one or more
MLflow models from the Databricks Model Registry, called served models. A
serving endpoint can have at most ten served models. You can configure traffic
settings to define how requests should be routed to your served models behind
an endpoint. Additionally, you can configure the scale of resources that
should be applied to each served model.`,
Annotations: map[string]string{
"package": "serving",
},
@ -210,9 +210,8 @@ func init() {
var exportMetricsCmd = &cobra.Command{
Use: "export-metrics NAME",
Short: `Retrieve the metrics corresponding to a serving endpoint for the current time in Prometheus or OpenMetrics exposition format.`,
Long: `Retrieve the metrics corresponding to a serving endpoint for the current time
in Prometheus or OpenMetrics exposition format.
Short: `Retrieve the metrics associated with a serving endpoint.`,
Long: `Retrieve the metrics associated with a serving endpoint.
Retrieves the metrics associated with the provided serving endpoint in either
Prometheus or OpenMetrics exposition format.`,

View File

@ -3,6 +3,8 @@
package system_schemas
import (
"fmt"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
@ -63,7 +65,10 @@ var disableCmd = &cobra.Command{
}
} else {
disableReq.MetastoreId = args[0]
disableReq.SchemaName = args[1]
_, err = fmt.Sscan(args[1], &disableReq.SchemaName)
if err != nil {
return fmt.Errorf("invalid SCHEMA_NAME: %s", args[1])
}
}
err = w.SystemSchemas.Disable(ctx, disableReq)
@ -79,13 +84,18 @@ var disableCmd = &cobra.Command{
// start enable command
var enableReq catalog.EnableRequest
var enableJson flags.JsonFlag
func init() {
Cmd.AddCommand(enableCmd)
// TODO: short flags
enableCmd.Flags().Var(&enableJson, "json", `either inline JSON string or @path/to/file.json with request body`)
}
var enableCmd = &cobra.Command{
Use: "enable",
Use: "enable METASTORE_ID SCHEMA_NAME",
Short: `Enable a system schema.`,
Long: `Enable a system schema.
@ -93,11 +103,31 @@ var enableCmd = &cobra.Command{
be an account admin or a metastore admin.`,
Annotations: map[string]string{},
PreRunE: root.MustWorkspaceClient,
Args: func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(2)
if cmd.Flags().Changed("json") {
check = cobra.ExactArgs(0)
}
return check(cmd, args)
},
PreRunE: root.MustWorkspaceClient,
RunE: func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
err = w.SystemSchemas.Enable(ctx)
if cmd.Flags().Changed("json") {
err = enableJson.Unmarshal(&enableReq)
if err != nil {
return err
}
} else {
enableReq.MetastoreId = args[0]
_, err = fmt.Sscan(args[1], &enableReq.SchemaName)
if err != nil {
return fmt.Errorf("invalid SCHEMA_NAME: %s", args[1])
}
}
err = w.SystemSchemas.Enable(ctx, enableReq)
if err != nil {
return err
}

8
go.mod
View File

@ -4,7 +4,7 @@ go 1.18
require (
github.com/briandowns/spinner v1.23.0 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.10.1 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.12.0 // Apache 2.0
github.com/fatih/color v1.15.0 // MIT
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
github.com/google/uuid v1.3.0 // BSD-3-Clause
@ -54,11 +54,11 @@ require (
golang.org/x/net v0.11.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/api v0.128.0 // indirect
google.golang.org/api v0.129.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
google.golang.org/grpc v1.56.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
google.golang.org/grpc v1.56.1 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

16
go.sum
View File

@ -34,8 +34,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/databricks/databricks-sdk-go v0.10.1 h1:aaIGurgo7PXFyXCkDy/LButElTOdIBgmVAtdNQtFMsw=
github.com/databricks/databricks-sdk-go v0.10.1/go.mod h1:FHsME5YoKTTrti7UC2y/7ZjpsuHalLag0Etf93c/Bnk=
github.com/databricks/databricks-sdk-go v0.12.0 h1:VgMJpvEiyRRrJ0mQx22Rkc73zjxUe125Ou9c5C99phM=
github.com/databricks/databricks-sdk-go v0.12.0/go.mod h1:h/oWnnfWcJQAotAhZS/GMnlcaE/8WhuZ5Vj7el/6Gn8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -243,8 +243,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg=
google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750=
google.golang.org/api v0.129.0 h1:2XbdjjNfFPXQyufzQVwPf1RRnHH8Den2pfNE2jw7L8w=
google.golang.org/api v0.129.0/go.mod h1:dFjiXlanKwWE3612X97llhsoI36FAoIiRj3aTl5b/zE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
@ -263,8 +263,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.56.0 h1:+y7Bs8rtMd07LeXmL3NxcTLn7mUkbKZqEpPhMNkwJEE=
google.golang.org/grpc v1.56.0/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ=
google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -276,8 +276,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=