Update Go SDK to 0.23.0 and use custom marshaller (#772)

## Changes
Update Go SDK to 0.23.0 and use custom marshaller.
## Tests
* Run unit tests

* Run nightly

* Manual test:
```
./cli jobs create --json @myjob.json
```
with 
```
{
    "name": "my-job-marshal-test-go",
    "tasks": [{
        "task_key": "testgomarshaltask",
        "new_cluster": {
            "num_workers": 0,
            "spark_version": "10.4.x-scala2.12",
            "node_type_id": "Standard_DS3_v2"
        },
        "libraries": [
            {
                "jar": "dbfs:/max/jars/exampleJarTask.jar"
            }
        ],
        "spark_jar_task": {
            "main_class_name":  "com.databricks.quickstart.exampleTask"
        }
    }]
}
```
Main branch:
```
Error: Cluster validation error: Missing required field: settings.cluster_spec.new_cluster.size
```
This branch:
```
{
  "job_id":<jobid>
}
```

---------

Co-authored-by: Miles Yucht <miles@databricks.com>
This commit is contained in:
hectorcast-db 2023-10-16 08:56:06 +02:00 committed by GitHub
parent ff01898b61
commit 36f30c8b47
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 307 additions and 298 deletions

View File

@ -1 +1 @@
bcbf6e851e3d82fd910940910dd31c10c059746c
493a76554afd3afdd15dc858773d01643f80352a

1
.gitattributes vendored
View File

@ -10,7 +10,6 @@ cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true
cmd/account/log-delivery/log-delivery.go linguist-generated=true
cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true
cmd/account/metastores/metastores.go linguist-generated=true
cmd/account/network-policy/network-policy.go linguist-generated=true
cmd/account/networks/networks.go linguist-generated=true
cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true
cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true

View File

@ -2,6 +2,7 @@ package resources
import (
"github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/imdario/mergo"
)
@ -15,6 +16,14 @@ type Job struct {
*jobs.JobSettings
}
func (s *Job) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, s)
}
func (s Job) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s)
}
// MergeJobClusters merges job clusters with the same key.
// The job clusters field is a slice, and as such, overrides are appended to it.
// We can identify a job cluster by its key, however, so we can use this key

View File

@ -2,6 +2,7 @@ package resources
import (
"github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/ml"
)
@ -12,3 +13,11 @@ type MlflowExperiment struct {
*ml.Experiment
}
func (s *MlflowExperiment) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, s)
}
func (s MlflowExperiment) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s)
}

View File

@ -2,6 +2,7 @@ package resources
import (
"github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/ml"
)
@ -12,3 +13,11 @@ type MlflowModel struct {
*ml.Model
}
func (s *MlflowModel) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, s)
}
func (s MlflowModel) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s)
}

View File

@ -2,6 +2,7 @@ package resources
import (
"github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/serving"
)
@ -22,3 +23,11 @@ type ModelServingEndpoint struct {
// Implementation could be different based on the resource type.
Permissions []Permission `json:"permissions,omitempty"`
}
func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, s)
}
func (s ModelServingEndpoint) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s)
}

View File

@ -4,6 +4,7 @@ import (
"strings"
"github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/imdario/mergo"
)
@ -17,6 +18,14 @@ type Pipeline struct {
*pipelines.PipelineSpec
}
func (s *Pipeline) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, s)
}
func (s Pipeline) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s)
}
// MergeClusters merges cluster definitions with same label.
// The clusters field is a slice, and as such, overrides are appended to it.
// We can identify a cluster by its label, however, so we can use this label

View File

@ -7,6 +7,7 @@ import (
"github.com/databricks/cli/libs/databrickscfg"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/iam"
)
@ -69,6 +70,14 @@ type User struct {
*iam.User
}
func (s *User) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, s)
}
func (s User) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s)
}
func (w *Workspace) Client() (*databricks.WorkspaceClient, error) {
cfg := databricks.Config{
// Generic

View File

@ -4,6 +4,7 @@ package billable_usage
import (
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/databricks-sdk-go/service/billing"
"github.com/spf13/cobra"
)
@ -80,11 +81,11 @@ func newDownload() *cobra.Command {
downloadReq.StartMonth = args[0]
downloadReq.EndMonth = args[1]
err = a.BillableUsage.Download(ctx, downloadReq)
response, err := a.BillableUsage.Download(ctx, downloadReq)
if err != nil {
return err
}
return nil
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.

2
cmd/account/cmd.go generated
View File

@ -16,7 +16,6 @@ import (
log_delivery "github.com/databricks/cli/cmd/account/log-delivery"
account_metastore_assignments "github.com/databricks/cli/cmd/account/metastore-assignments"
account_metastores "github.com/databricks/cli/cmd/account/metastores"
account_network_policy "github.com/databricks/cli/cmd/account/network-policy"
networks "github.com/databricks/cli/cmd/account/networks"
o_auth_enrollment "github.com/databricks/cli/cmd/account/o-auth-enrollment"
o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps"
@ -50,7 +49,6 @@ func New() *cobra.Command {
cmd.AddCommand(log_delivery.New())
cmd.AddCommand(account_metastore_assignments.New())
cmd.AddCommand(account_metastores.New())
cmd.AddCommand(account_network_policy.New())
cmd.AddCommand(networks.New())
cmd.AddCommand(o_auth_enrollment.New())
cmd.AddCommand(o_auth_published_apps.New())

View File

@ -1,243 +0,0 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package network_policy
import (
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/settings"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "network-policy",
Short: `Network policy is a set of rules that defines what can be accessed from your Databricks network.`,
Long: `Network policy is a set of rules that defines what can be accessed from your
Databricks network. E.g.: You can choose to block your SQL UDF to access
internet from your Databricks serverless clusters.
There is only one instance of this setting per account. Since this setting has
a default value, this setting is present on all accounts even though it's
never set on a given account. Deletion reverts the value of the setting back
to the default value.`,
GroupID: "settings",
Annotations: map[string]string{
"package": "settings",
},
// This service is being previewed; hide from help output.
Hidden: true,
}
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start delete-account-network-policy command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var deleteAccountNetworkPolicyOverrides []func(
*cobra.Command,
*settings.DeleteAccountNetworkPolicyRequest,
)
func newDeleteAccountNetworkPolicy() *cobra.Command {
cmd := &cobra.Command{}
var deleteAccountNetworkPolicyReq settings.DeleteAccountNetworkPolicyRequest
// TODO: short flags
cmd.Use = "delete-account-network-policy ETAG"
cmd.Short = `Delete Account Network Policy.`
cmd.Long = `Delete Account Network Policy.
Reverts back all the account network policies back to default.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustAccountClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
deleteAccountNetworkPolicyReq.Etag = args[0]
response, err := a.NetworkPolicy.DeleteAccountNetworkPolicy(ctx, deleteAccountNetworkPolicyReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range deleteAccountNetworkPolicyOverrides {
fn(cmd, &deleteAccountNetworkPolicyReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newDeleteAccountNetworkPolicy())
})
}
// start read-account-network-policy command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var readAccountNetworkPolicyOverrides []func(
*cobra.Command,
*settings.ReadAccountNetworkPolicyRequest,
)
func newReadAccountNetworkPolicy() *cobra.Command {
cmd := &cobra.Command{}
var readAccountNetworkPolicyReq settings.ReadAccountNetworkPolicyRequest
// TODO: short flags
cmd.Use = "read-account-network-policy ETAG"
cmd.Short = `Get Account Network Policy.`
cmd.Long = `Get Account Network Policy.
Gets the value of Account level Network Policy.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustAccountClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
readAccountNetworkPolicyReq.Etag = args[0]
response, err := a.NetworkPolicy.ReadAccountNetworkPolicy(ctx, readAccountNetworkPolicyReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range readAccountNetworkPolicyOverrides {
fn(cmd, &readAccountNetworkPolicyReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newReadAccountNetworkPolicy())
})
}
// start update-account-network-policy command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var updateAccountNetworkPolicyOverrides []func(
*cobra.Command,
*settings.UpdateAccountNetworkPolicyRequest,
)
func newUpdateAccountNetworkPolicy() *cobra.Command {
cmd := &cobra.Command{}
var updateAccountNetworkPolicyReq settings.UpdateAccountNetworkPolicyRequest
var updateAccountNetworkPolicyJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&updateAccountNetworkPolicyJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().BoolVar(&updateAccountNetworkPolicyReq.AllowMissing, "allow-missing", updateAccountNetworkPolicyReq.AllowMissing, `This should always be set to true for Settings RPCs.`)
// TODO: complex arg: setting
cmd.Use = "update-account-network-policy"
cmd.Short = `Update Account Network Policy.`
cmd.Long = `Update Account Network Policy.
Updates the policy content of Account level Network Policy.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(0)
if cmd.Flags().Changed("json") {
check = cobra.ExactArgs(0)
}
return check(cmd, args)
}
cmd.PreRunE = root.MustAccountClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
if cmd.Flags().Changed("json") {
err = updateAccountNetworkPolicyJson.Unmarshal(&updateAccountNetworkPolicyReq)
if err != nil {
return err
}
} else {
}
response, err := a.NetworkPolicy.UpdateAccountNetworkPolicy(ctx, updateAccountNetworkPolicyReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range updateAccountNetworkPolicyOverrides {
fn(cmd, &updateAccountNetworkPolicyReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newUpdateAccountNetworkPolicy())
})
}
// end service AccountNetworkPolicy

View File

@ -128,7 +128,7 @@ func newDelete() *cobra.Command {
cmd.Flags().BoolVar(&deleteReq.Force, "force", deleteReq.Force, `Force deletion even if the Storage Credential is not empty.`)
cmd.Use = "delete METASTORE_ID NAME"
cmd.Use = "delete METASTORE_ID STORAGE_CREDENTIAL_NAME"
cmd.Short = `Delete a storage credential.`
cmd.Long = `Delete a storage credential.
@ -148,7 +148,7 @@ func newDelete() *cobra.Command {
a := root.AccountClient(ctx)
deleteReq.MetastoreId = args[0]
deleteReq.Name = args[1]
deleteReq.StorageCredentialName = args[1]
err = a.StorageCredentials.Delete(ctx, deleteReq)
if err != nil {
@ -191,7 +191,7 @@ func newGet() *cobra.Command {
// TODO: short flags
cmd.Use = "get METASTORE_ID NAME"
cmd.Use = "get METASTORE_ID STORAGE_CREDENTIAL_NAME"
cmd.Short = `Gets the named storage credential.`
cmd.Long = `Gets the named storage credential.
@ -212,7 +212,7 @@ func newGet() *cobra.Command {
a := root.AccountClient(ctx)
getReq.MetastoreId = args[0]
getReq.Name = args[1]
getReq.StorageCredentialName = args[1]
response, err := a.StorageCredentials.Get(ctx, getReq)
if err != nil {
@ -321,7 +321,7 @@ func newUpdate() *cobra.Command {
// TODO: complex arg: credential_info
cmd.Use = "update METASTORE_ID NAME"
cmd.Use = "update METASTORE_ID STORAGE_CREDENTIAL_NAME"
cmd.Short = `Updates a storage credential.`
cmd.Long = `Updates a storage credential.
@ -348,7 +348,7 @@ func newUpdate() *cobra.Command {
}
}
updateReq.MetastoreId = args[0]
updateReq.Name = args[1]
updateReq.StorageCredentialName = args[1]
response, err := a.StorageCredentials.Update(ctx, updateReq)
if err != nil {

View File

@ -160,7 +160,7 @@ func newCreate() *cobra.Command {
// TODO: short flags
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, `Note: This field won't be true for webapp requests.`)
cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, ``)
// TODO: complex arg: autoscale
cmd.Flags().IntVar(&createReq.AutoterminationMinutes, "autotermination-minutes", createReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`)
// TODO: complex arg: aws_attributes
@ -387,7 +387,7 @@ func newEdit() *cobra.Command {
// TODO: short flags
cmd.Flags().Var(&editJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, `Note: This field won't be true for webapp requests.`)
cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, ``)
// TODO: complex arg: autoscale
cmd.Flags().IntVar(&editReq.AutoterminationMinutes, "autotermination-minutes", editReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`)
// TODO: complex arg: aws_attributes

View File

@ -330,6 +330,8 @@ func newUpdate() *cobra.Command {
// TODO: short flags
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of the connection.`)
cmd.Use = "update"
cmd.Short = `Update a connection.`
cmd.Long = `Update a connection.

View File

@ -239,17 +239,10 @@ func newEdit() *cobra.Command {
// TODO: short flags
cmd.Flags().Var(&editJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: complex arg: aws_attributes
// TODO: complex arg: azure_attributes
// TODO: map via StringToStringVar: custom_tags
// TODO: complex arg: disk_spec
cmd.Flags().BoolVar(&editReq.EnableElasticDisk, "enable-elastic-disk", editReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`)
// TODO: complex arg: gcp_attributes
cmd.Flags().IntVar(&editReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", editReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`)
cmd.Flags().IntVar(&editReq.MaxCapacity, "max-capacity", editReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`)
cmd.Flags().IntVar(&editReq.MinIdleInstances, "min-idle-instances", editReq.MinIdleInstances, `Minimum number of idle instances to keep in the instance pool.`)
// TODO: array: preloaded_docker_images
// TODO: array: preloaded_spark_versions
cmd.Use = "edit INSTANCE_POOL_ID INSTANCE_POOL_NAME NODE_TYPE_ID"
cmd.Short = `Edit an existing instance pool.`

View File

@ -1256,11 +1256,11 @@ func newReset() *cobra.Command {
cmd.Flags().Var(&resetJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Use = "reset"
cmd.Short = `Overwrites all settings for a job.`
cmd.Long = `Overwrites all settings for a job.
cmd.Short = `Overwrite all settings for a job.`
cmd.Long = `Overwrite all settings for a job.
Overwrites all the settings for a specific job. Use the Update endpoint to
update job settings partially.`
Overwrite all settings for the given job. Use the Update endpoint to update
job settings partially.`
cmd.Annotations = make(map[string]string)

View File

@ -919,10 +919,11 @@ func newStartUpdate() *cobra.Command {
// TODO: array: refresh_selection
cmd.Use = "start-update PIPELINE_ID"
cmd.Short = `Queue a pipeline update.`
cmd.Long = `Queue a pipeline update.
cmd.Short = `Start a pipeline.`
cmd.Long = `Start a pipeline.
Starts or queues a pipeline update.`
Starts a new update for the pipeline. If there is already an active update for
the pipeline, the request will fail and the active update will remain running.`
cmd.Annotations = make(map[string]string)
@ -1006,7 +1007,8 @@ func newStop() *cobra.Command {
cmd.Short = `Stop a pipeline.`
cmd.Long = `Stop a pipeline.
Stops a pipeline.`
Stops the pipeline by canceling the active update. If there is no active
update for the pipeline, this request is a no-op.`
cmd.Annotations = make(map[string]string)

View File

@ -77,14 +77,7 @@ func newCreate() *cobra.Command {
cmd.Short = `Create a storage credential.`
cmd.Long = `Create a storage credential.
Creates a new storage credential. The request object is specific to the cloud:
* **AwsIamRole** for AWS credentials. * **AzureServicePrincipal** for Azure
credentials. * **AzureManagedIdentity** for Azure managed credentials. *
**DatabricksGcpServiceAccount** for GCP managed credentials.
The caller must be a metastore admin and have the
**CREATE_STORAGE_CREDENTIAL** privilege on the metastore.`
Creates a new storage credential.`
cmd.Annotations = make(map[string]string)
@ -371,9 +364,7 @@ func newUpdate() *cobra.Command {
cmd.Short = `Update a credential.`
cmd.Long = `Update a credential.
Updates a storage credential on the metastore. The caller must be the owner of
the storage credential or a metastore admin. If the caller is a metastore
admin, only the __owner__ credential can be changed.`
Updates a storage credential on the metastore.`
cmd.Annotations = make(map[string]string)

View File

@ -17,13 +17,25 @@ var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "workspace-bindings",
Short: `A catalog in Databricks can be configured as __OPEN__ or __ISOLATED__.`,
Long: `A catalog in Databricks can be configured as __OPEN__ or __ISOLATED__. An
__OPEN__ catalog can be accessed from any workspace, while an __ISOLATED__
catalog can only be access from a configured list of workspaces.
Short: `A securable in Databricks can be configured as __OPEN__ or __ISOLATED__.`,
Long: `A securable in Databricks can be configured as __OPEN__ or __ISOLATED__. An
__OPEN__ securable can be accessed from any workspace, while an __ISOLATED__
securable can only be accessed from a configured list of workspaces. This API
allows you to configure (bind) securables to workspaces.
A catalog's workspace bindings can be configured by a metastore admin or the
owner of the catalog.`,
NOTE: The __isolation_mode__ is configured for the securable itself (using its
Update method) and the workspace bindings are only consulted when the
securable's __isolation_mode__ is set to __ISOLATED__.
A securable's workspace bindings can be configured by a metastore admin or the
owner of the securable.
The original path (/api/2.1/unity-catalog/workspace-bindings/catalogs/{name})
is deprecated. Please use the new path
(/api/2.1/unity-catalog/bindings/{securable_type}/{securable_name}) which
introduces the ability to bind a securable in READ_ONLY mode (catalogs only).
Securables that support binding: - catalog`,
GroupID: "catalog",
Annotations: map[string]string{
"package": "catalog",
@ -100,6 +112,69 @@ func init() {
})
}
// start get-bindings command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var getBindingsOverrides []func(
*cobra.Command,
*catalog.GetBindingsRequest,
)
func newGetBindings() *cobra.Command {
cmd := &cobra.Command{}
var getBindingsReq catalog.GetBindingsRequest
// TODO: short flags
cmd.Use = "get-bindings SECURABLE_TYPE SECURABLE_NAME"
cmd.Short = `Get securable workspace bindings.`
cmd.Long = `Get securable workspace bindings.
Gets workspace bindings of the securable. The caller must be a metastore admin
or an owner of the securable.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(2)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
getBindingsReq.SecurableType = args[0]
getBindingsReq.SecurableName = args[1]
response, err := w.WorkspaceBindings.GetBindings(ctx, getBindingsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range getBindingsOverrides {
fn(cmd, &getBindingsReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newGetBindings())
})
}
// start update command
// Slice with functions to override default command behavior.
@ -173,4 +248,78 @@ func init() {
})
}
// start update-bindings command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var updateBindingsOverrides []func(
*cobra.Command,
*catalog.UpdateWorkspaceBindingsParameters,
)
func newUpdateBindings() *cobra.Command {
cmd := &cobra.Command{}
var updateBindingsReq catalog.UpdateWorkspaceBindingsParameters
var updateBindingsJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&updateBindingsJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: array: add
// TODO: array: remove
cmd.Use = "update-bindings SECURABLE_TYPE SECURABLE_NAME"
cmd.Short = `Update securable workspace bindings.`
cmd.Long = `Update securable workspace bindings.
Updates workspace bindings of the securable. The caller must be a metastore
admin or an owner of the securable.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(2)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = updateBindingsJson.Unmarshal(&updateBindingsReq)
if err != nil {
return err
}
}
updateBindingsReq.SecurableType = args[0]
updateBindingsReq.SecurableName = args[1]
response, err := w.WorkspaceBindings.UpdateBindings(ctx, updateBindingsReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range updateBindingsOverrides {
fn(cmd, &updateBindingsReq)
}
return cmd
}
func init() {
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
cmd.AddCommand(newUpdateBindings())
})
}
// end service WorkspaceBindings

View File

@ -428,8 +428,10 @@ func newImport() *cobra.Command {
Imports a workspace object (for example, a notebook or file) or the contents
of an entire directory. If path already exists and overwrite is set to
false, this call returns an error RESOURCE_ALREADY_EXISTS. One can only
use DBC format to import a directory.`
false, this call returns an error RESOURCE_ALREADY_EXISTS. To import a
directory, you can use either the DBC format or the SOURCE format with the
language field unset. To import a single file as SOURCE, you must set the
language field.`
cmd.Annotations = make(map[string]string)

4
go.mod
View File

@ -4,7 +4,7 @@ go 1.21
require (
github.com/briandowns/spinner v1.23.0 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.22.0 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.23.0 // Apache 2.0
github.com/fatih/color v1.15.0 // MIT
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
github.com/google/uuid v1.3.1 // BSD-3-Clause
@ -54,7 +54,7 @@ require (
golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/api v0.143.0 // indirect
google.golang.org/api v0.146.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/grpc v1.58.2 // indirect

8
go.sum
View File

@ -29,8 +29,8 @@ github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEM
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/databricks/databricks-sdk-go v0.22.0 h1:CIwNZcOV7wYZmRLl1NWA+07f2j6H9h5L6MhR5O/4dRw=
github.com/databricks/databricks-sdk-go v0.22.0/go.mod h1:COiklTN3IdieazXcs4TnMou5GQFwIM7uhMGrz7nEAAk=
github.com/databricks/databricks-sdk-go v0.23.0 h1:rdLMA7cDUPJiCSMyuUSufzDDmugqyp79SNiY/vc7kMI=
github.com/databricks/databricks-sdk-go v0.23.0/go.mod h1:a6rErRNh5bz+IJbO07nwW70iGyvtWidy1p/S5thepXI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -246,8 +246,8 @@ golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.143.0 h1:o8cekTkqhywkbZT6p1UHJPZ9+9uuCAJs/KYomxZB8fA=
google.golang.org/api v0.143.0/go.mod h1:FoX9DO9hT7DLNn97OuoZAGSDuNAXdJRuGK98rSUgurk=
google.golang.org/api v0.146.0 h1:9aBYT4vQXt9dhCuLNfwfd3zpwu8atg0yPkjBymwSrOM=
google.golang.org/api v0.146.0/go.mod h1:OARJqIfoYjXJj4C1AiBSXYZt03qsoz8FQYU6fBEfrHM=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=

View File

@ -5,13 +5,14 @@ import (
"os"
"path"
"path/filepath"
"strings"
"testing"
)
// Detects if test is run from "debug test" feature in VS Code.
func isInDebug() bool {
ex, _ := os.Executable()
return path.Base(ex) == "__debug_bin"
return strings.HasPrefix(path.Base(ex), "__debug_bin")
}
// Loads debug environment from ~/.databricks/debug-env.json.

25
internal/jobs_test.go Normal file
View File

@ -0,0 +1,25 @@
package internal
import (
"encoding/json"
"fmt"
"testing"
"github.com/databricks/cli/internal/acc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAccCreateJob(t *testing.T) {
acc.WorkspaceTest(t)
env := GetEnvOrSkipTest(t, "CLOUD_ENV")
if env != "azure" {
t.Skipf("Not running test on cloud %s", env)
}
stdout, stderr := RequireSuccessfulRun(t, "jobs", "create", "--json", "@testjsons/create_job_without_workers.json", "--log-level=debug")
assert.Empty(t, stderr.String())
var output map[string]int
err := json.Unmarshal(stdout.Bytes(), &output)
require.NoError(t, err)
RequireSuccessfulRun(t, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug")
}

View File

@ -0,0 +1,35 @@
{
"name": "create-job-without-workers",
"job_clusters": [{
"job_cluster_key": "create-job-without-workers-cluster",
"new_cluster": {
"num_workers": 0,
"spark_version": "10.4.x-scala2.12",
"node_type_id": "Standard_DS3_v2"
}
}],
"tasks": [{
"job_cluster_key": "create-job-without-workers-cluster",
"task_key": "create-job-without-workers-cluster1",
"libraries": [
{
"jar": "dbfs:/max/jars/exampleJarTask.jar"
}
],
"spark_jar_task": {
"main_class_name": "com.databricks.quickstart.exampleTask"
}
},
{
"job_cluster_key": "create-job-without-workers-cluster",
"task_key": "create-job-without-workers-cluster2",
"libraries": [
{
"jar": "dbfs:/max/jars/exampleJarTask.jar"
}
],
"spark_jar_task": {
"main_class_name": "com.databricks.quickstart.exampleTask"
}
}]
}