Merge remote-tracking branch 'upstream/main' into add-debug-runs

This commit is contained in:
Lennart Kats 2023-06-23 11:03:47 +02:00
commit b7c53b2039
50 changed files with 340 additions and 256 deletions

View File

@ -58,7 +58,8 @@ func init() {
{{if .Request}}// TODO: short flags
{{.CamelName}}Cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
{{$method := .}}
{{range .Request.Fields -}}
{{ if not .IsJsonOnly }}
{{range .Request.Fields -}}
{{- if not .Required -}}
{{if .Entity.IsObject }}// TODO: complex arg: {{.Name}}
{{else if .Entity.IsAny }}// TODO: any: {{.Name}}
@ -69,8 +70,9 @@ func init() {
{{else}}{{$method.CamelName}}Cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
{{end}}
{{- end -}}
{{- end}}
{{end}}
{{- end}}
{{- end}}
{{end}}
}
{{- $excludeFromPrompts := list "workspace get-status" -}}
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
@ -133,7 +135,7 @@ var {{.CamelName}}Cmd = &cobra.Command{
}
{{- end -}}
{{$method := .}}
{{- if .Request.IsAllRequiredFieldsPrimitive -}}
{{- if and .Request.IsAllRequiredFieldsPrimitive (not .IsJsonOnly) -}}
{{- range $arg, $field := .Request.RequiredFields}}
{{if not $field.Entity.IsString -}}
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})

View File

@ -2,9 +2,9 @@
[![build](https://github.com/databricks/cli/workflows/build/badge.svg?branch=main)](https://github.com/databricks/cli/actions?query=workflow%3Abuild+branch%3Amain)
This project is in public preview.
This project is in Public Preview.
Documentation about the full REST API coverage is avaialbe in the [docs folder](docs/commands.md).
Documentation about the full REST API coverage is available in the [docs folder](docs/commands.md).
Documentation is available at https://docs.databricks.com/dev-tools/cli/databricks-cli.html.

View File

@ -48,7 +48,7 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
err = b.WorkspaceClient().Workspace.Import(ctx, workspace.Import{
Path: artifact.RemotePath,
Overwrite: true,
Format: workspace.ExportFormatSource,
Format: workspace.ImportFormatSource,
Language: artifact.Language,
Content: base64.StdEncoding.EncodeToString(raw),
})

View File

@ -91,8 +91,6 @@ func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient {
return b.client
}
var cacheDirName = filepath.Join(".databricks", "bundle")
// CacheDir returns directory to use for temporary files for this bundle.
// Scoped to the bundle's environment.
func (b *Bundle) CacheDir(paths ...string) (string, error) {
@ -100,11 +98,20 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) {
panic("environment not set")
}
cacheDirName, exists := os.LookupEnv("DATABRICKS_BUNDLE_TMP")
if !exists || cacheDirName == "" {
cacheDirName = filepath.Join(
// Anchor at bundle root directory.
b.Config.Path,
// Static cache directory.
".databricks",
"bundle",
)
}
// Fixed components of the result path.
parts := []string{
// Anchor at bundle root directory.
b.Config.Path,
// Static cache directory.
cacheDirName,
// Scope with environment name.
b.Config.Bundle.Environment,

View File

@ -3,7 +3,6 @@ package bundle
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@ -35,9 +34,38 @@ func TestBundleCacheDir(t *testing.T) {
// This is otherwise done by [mutators.SelectEnvironment].
bundle.Config.Bundle.Environment = "default"
// unset env variable in case it's set
t.Setenv("DATABRICKS_BUNDLE_TMP", "")
cacheDir, err := bundle.CacheDir()
// format is <CWD>/.databricks/bundle/<environment>
assert.NoError(t, err)
assert.True(t, strings.HasPrefix(cacheDir, projectDir))
assert.Equal(t, filepath.Join(projectDir, ".databricks", "bundle", "default"), cacheDir)
}
func TestBundleCacheDirOverride(t *testing.T) {
projectDir := t.TempDir()
bundleTmpDir := t.TempDir()
f1, err := os.Create(filepath.Join(projectDir, "bundle.yml"))
require.NoError(t, err)
f1.Close()
bundle, err := Load(projectDir)
require.NoError(t, err)
// Artificially set environment.
// This is otherwise done by [mutators.SelectEnvironment].
bundle.Config.Bundle.Environment = "default"
// now we expect to use 'bundleTmpDir' instead of CWD/.databricks/bundle
t.Setenv("DATABRICKS_BUNDLE_TMP", bundleTmpDir)
cacheDir, err := bundle.CacheDir()
// format is <DATABRICKS_BUNDLE_TMP>/<environment>
assert.NoError(t, err)
assert.Equal(t, filepath.Join(bundleTmpDir, "default"), cacheDir)
}
func TestBundleMustLoadSuccess(t *testing.T) {

View File

@ -3,8 +3,12 @@
package schema
type DataSourceClusterPolicy struct {
Definition string `json:"definition,omitempty"`
Id string `json:"id,omitempty"`
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
Name string `json:"name,omitempty"`
Definition string `json:"definition,omitempty"`
Description string `json:"description,omitempty"`
Id string `json:"id,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
Name string `json:"name,omitempty"`
PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"`
PolicyFamilyId string `json:"policy_family_id,omitempty"`
}

View File

@ -388,6 +388,11 @@ type DataSourceJobJobSettingsSettingsPythonWheelTask struct {
type DataSourceJobJobSettingsSettingsQueue struct {
}
type DataSourceJobJobSettingsSettingsRunAs struct {
ServicePrincipalName string `json:"service_principal_name,omitempty"`
UserName string `json:"user_name,omitempty"`
}
type DataSourceJobJobSettingsSettingsSchedule struct {
PauseStatus string `json:"pause_status,omitempty"`
QuartzCronExpression string `json:"quartz_cron_expression"`
@ -740,6 +745,7 @@ type DataSourceJobJobSettingsSettings struct {
PipelineTask *DataSourceJobJobSettingsSettingsPipelineTask `json:"pipeline_task,omitempty"`
PythonWheelTask *DataSourceJobJobSettingsSettingsPythonWheelTask `json:"python_wheel_task,omitempty"`
Queue *DataSourceJobJobSettingsSettingsQueue `json:"queue,omitempty"`
RunAs *DataSourceJobJobSettingsSettingsRunAs `json:"run_as,omitempty"`
Schedule *DataSourceJobJobSettingsSettingsSchedule `json:"schedule,omitempty"`
SparkJarTask *DataSourceJobJobSettingsSettingsSparkJarTask `json:"spark_jar_task,omitempty"`
SparkPythonTask *DataSourceJobJobSettingsSettingsSparkPythonTask `json:"spark_python_task,omitempty"`
@ -753,6 +759,7 @@ type DataSourceJobJobSettings struct {
CreatedTime int `json:"created_time,omitempty"`
CreatorUserName string `json:"creator_user_name,omitempty"`
JobId int `json:"job_id,omitempty"`
RunAsUserName string `json:"run_as_user_name,omitempty"`
Settings *DataSourceJobJobSettingsSettings `json:"settings,omitempty"`
}

View File

@ -3,14 +3,15 @@
package schema
type ResourceCatalog struct {
Comment string `json:"comment,omitempty"`
ForceDestroy bool `json:"force_destroy,omitempty"`
Id string `json:"id,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name"`
Owner string `json:"owner,omitempty"`
Properties map[string]string `json:"properties,omitempty"`
ProviderName string `json:"provider_name,omitempty"`
ShareName string `json:"share_name,omitempty"`
StorageRoot string `json:"storage_root,omitempty"`
Comment string `json:"comment,omitempty"`
ForceDestroy bool `json:"force_destroy,omitempty"`
Id string `json:"id,omitempty"`
IsolationMode string `json:"isolation_mode,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name"`
Owner string `json:"owner,omitempty"`
Properties map[string]string `json:"properties,omitempty"`
ProviderName string `json:"provider_name,omitempty"`
ShareName string `json:"share_name,omitempty"`
StorageRoot string `json:"storage_root,omitempty"`
}

View File

@ -5,10 +5,12 @@ package schema
type ResourceExternalLocation struct {
Comment string `json:"comment,omitempty"`
CredentialName string `json:"credential_name"`
ForceDestroy bool `json:"force_destroy,omitempty"`
Id string `json:"id,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name"`
Owner string `json:"owner,omitempty"`
ReadOnly bool `json:"read_only,omitempty"`
SkipValidation bool `json:"skip_validation,omitempty"`
Url string `json:"url"`
}

View File

@ -388,6 +388,11 @@ type ResourceJobPythonWheelTask struct {
type ResourceJobQueue struct {
}
type ResourceJobRunAs struct {
ServicePrincipalName string `json:"service_principal_name,omitempty"`
UserName string `json:"user_name,omitempty"`
}
type ResourceJobSchedule struct {
PauseStatus string `json:"pause_status,omitempty"`
QuartzCronExpression string `json:"quartz_cron_expression"`
@ -743,6 +748,7 @@ type ResourceJob struct {
PipelineTask *ResourceJobPipelineTask `json:"pipeline_task,omitempty"`
PythonWheelTask *ResourceJobPythonWheelTask `json:"python_wheel_task,omitempty"`
Queue *ResourceJobQueue `json:"queue,omitempty"`
RunAs *ResourceJobRunAs `json:"run_as,omitempty"`
Schedule *ResourceJobSchedule `json:"schedule,omitempty"`
SparkJarTask *ResourceJobSparkJarTask `json:"spark_jar_task,omitempty"`
SparkPythonTask *ResourceJobSparkPythonTask `json:"spark_python_task,omitempty"`

View File

@ -25,7 +25,8 @@ type ResourceModelServingConfig struct {
}
type ResourceModelServing struct {
Id string `json:"id,omitempty"`
Name string `json:"name"`
Config *ResourceModelServingConfig `json:"config,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name"`
ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
Config *ResourceModelServingConfig `json:"config,omitempty"`
}

View File

@ -26,6 +26,7 @@ type ResourcePermissions struct {
RegisteredModelId string `json:"registered_model_id,omitempty"`
RepoId string `json:"repo_id,omitempty"`
RepoPath string `json:"repo_path,omitempty"`
ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
SqlAlertId string `json:"sql_alert_id,omitempty"`
SqlDashboardId string `json:"sql_dashboard_id,omitempty"`
SqlEndpointId string `json:"sql_endpoint_id,omitempty"`

View File

@ -32,6 +32,7 @@ type ResourceStorageCredential struct {
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name"`
Owner string `json:"owner,omitempty"`
ReadOnly bool `json:"read_only,omitempty"`
AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"`
AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"`
AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"`

View File

@ -0,0 +1,14 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceVolume struct {
CatalogName string `json:"catalog_name"`
Comment string `json:"comment,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name"`
Owner string `json:"owner,omitempty"`
SchemaName string `json:"schema_name"`
StorageLocation string `json:"storage_location,omitempty"`
VolumeType string `json:"volume_type"`
}

View File

@ -74,6 +74,7 @@ type Resources struct {
User map[string]*ResourceUser `json:"databricks_user,omitempty"`
UserInstanceProfile map[string]*ResourceUserInstanceProfile `json:"databricks_user_instance_profile,omitempty"`
UserRole map[string]*ResourceUserRole `json:"databricks_user_role,omitempty"`
Volume map[string]*ResourceVolume `json:"databricks_volume,omitempty"`
WorkspaceConf map[string]*ResourceWorkspaceConf `json:"databricks_workspace_conf,omitempty"`
WorkspaceFile map[string]*ResourceWorkspaceFile `json:"databricks_workspace_file,omitempty"`
}
@ -151,6 +152,7 @@ func NewResources() *Resources {
User: make(map[string]*ResourceUser),
UserInstanceProfile: make(map[string]*ResourceUserInstanceProfile),
UserRole: make(map[string]*ResourceUserRole),
Volume: make(map[string]*ResourceVolume),
WorkspaceConf: make(map[string]*ResourceWorkspaceConf),
WorkspaceFile: make(map[string]*ResourceWorkspaceFile),
}

View File

@ -21,25 +21,75 @@ var Cmd = &cobra.Command{
Annotations: map[string]string{
"package": "iam",
},
// This service is being previewed; hide from help output.
Hidden: true,
}
// start get command
// start get-assignable-roles-for-resource command
var getReq iam.GetAccountAccessControlRequest
var getJson flags.JsonFlag
var getAssignableRolesForResourceReq iam.GetAssignableRolesForResourceRequest
var getAssignableRolesForResourceJson flags.JsonFlag
func init() {
Cmd.AddCommand(getCmd)
Cmd.AddCommand(getAssignableRolesForResourceCmd)
// TODO: short flags
getCmd.Flags().Var(&getJson, "json", `either inline JSON string or @path/to/file.json with request body`)
getAssignableRolesForResourceCmd.Flags().Var(&getAssignableRolesForResourceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
}
var getCmd = &cobra.Command{
Use: "get NAME ETAG",
var getAssignableRolesForResourceCmd = &cobra.Command{
Use: "get-assignable-roles-for-resource RESOURCE",
Short: `Get assignable roles for a resource.`,
Long: `Get assignable roles for a resource.
Gets all the roles that can be granted on an account level resource. A role is
grantable if the rule set on the resource can contain an access rule of the
role.`,
Annotations: map[string]string{},
Args: func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
if cmd.Flags().Changed("json") {
check = cobra.ExactArgs(0)
}
return check(cmd, args)
},
PreRunE: root.MustAccountClient,
RunE: func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
if cmd.Flags().Changed("json") {
err = getAssignableRolesForResourceJson.Unmarshal(&getAssignableRolesForResourceReq)
if err != nil {
return err
}
} else {
getAssignableRolesForResourceReq.Resource = args[0]
}
response, err := a.AccessControl.GetAssignableRolesForResource(ctx, getAssignableRolesForResourceReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
},
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
ValidArgsFunction: cobra.NoFileCompletions,
}
// start get-rule-set command
var getRuleSetReq iam.GetRuleSetRequest
var getRuleSetJson flags.JsonFlag
func init() {
Cmd.AddCommand(getRuleSetCmd)
// TODO: short flags
getRuleSetCmd.Flags().Var(&getRuleSetJson, "json", `either inline JSON string or @path/to/file.json with request body`)
}
var getRuleSetCmd = &cobra.Command{
Use: "get-rule-set NAME ETAG",
Short: `Get a rule set.`,
Long: `Get a rule set.
@ -60,16 +110,16 @@ var getCmd = &cobra.Command{
ctx := cmd.Context()
a := root.AccountClient(ctx)
if cmd.Flags().Changed("json") {
err = getJson.Unmarshal(&getReq)
err = getRuleSetJson.Unmarshal(&getRuleSetReq)
if err != nil {
return err
}
} else {
getReq.Name = args[0]
getReq.Etag = args[1]
getRuleSetReq.Name = args[0]
getRuleSetReq.Etag = args[1]
}
response, err := a.AccessControl.Get(ctx, getReq)
response, err := a.AccessControl.GetRuleSet(ctx, getRuleSetReq)
if err != nil {
return err
}
@ -80,73 +130,20 @@ var getCmd = &cobra.Command{
ValidArgsFunction: cobra.NoFileCompletions,
}
// start list command
// start update-rule-set command
var listReq iam.ListAccountAccessControlRequest
var listJson flags.JsonFlag
var updateRuleSetReq iam.UpdateRuleSetRequest
var updateRuleSetJson flags.JsonFlag
func init() {
Cmd.AddCommand(listCmd)
Cmd.AddCommand(updateRuleSetCmd)
// TODO: short flags
listCmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`)
updateRuleSetCmd.Flags().Var(&updateRuleSetJson, "json", `either inline JSON string or @path/to/file.json with request body`)
}
var listCmd = &cobra.Command{
Use: "list NAME",
Short: `List assignable roles on a resource.`,
Long: `List assignable roles on a resource.
Gets all the roles that can be granted on an account level resource. A role is
grantable if the rule set on the resource can contain an access rule of the
role.`,
Annotations: map[string]string{},
Args: func(cmd *cobra.Command, args []string) error {
check := cobra.ExactArgs(1)
if cmd.Flags().Changed("json") {
check = cobra.ExactArgs(0)
}
return check(cmd, args)
},
PreRunE: root.MustAccountClient,
RunE: func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
a := root.AccountClient(ctx)
if cmd.Flags().Changed("json") {
err = listJson.Unmarshal(&listReq)
if err != nil {
return err
}
} else {
listReq.Name = args[0]
}
response, err := a.AccessControl.List(ctx, listReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
},
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
ValidArgsFunction: cobra.NoFileCompletions,
}
// start update command
var updateReq iam.UpdateRuleSetRequest
var updateJson flags.JsonFlag
func init() {
Cmd.AddCommand(updateCmd)
// TODO: short flags
updateCmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
}
var updateCmd = &cobra.Command{
Use: "update",
var updateRuleSetCmd = &cobra.Command{
Use: "update-rule-set",
Short: `Update a rule set.`,
Long: `Update a rule set.
@ -160,20 +157,15 @@ var updateCmd = &cobra.Command{
ctx := cmd.Context()
a := root.AccountClient(ctx)
if cmd.Flags().Changed("json") {
err = updateJson.Unmarshal(&updateReq)
err = updateRuleSetJson.Unmarshal(&updateRuleSetReq)
if err != nil {
return err
}
} else {
updateReq.Name = args[0]
_, err = fmt.Sscan(args[1], &updateReq.RuleSet)
if err != nil {
return fmt.Errorf("invalid RULE_SET: %s", args[1])
}
updateReq.Etag = args[2]
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := a.AccessControl.Update(ctx, updateReq)
response, err := a.AccessControl.UpdateRuleSet(ctx, updateRuleSetReq)
if err != nil {
return err
}

View File

@ -55,7 +55,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := a.Budgets.Create(ctx, createReq)
@ -254,7 +254,7 @@ var updateCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = a.Budgets.Update(ctx, updateReq)

View File

@ -68,7 +68,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := a.Credentials.Create(ctx, createReq)

View File

@ -63,7 +63,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := a.CustomAppIntegration.Create(ctx, createReq)

View File

@ -85,7 +85,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := a.EncryptionKeys.Create(ctx, createReq)

View File

@ -85,7 +85,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := a.IpAccessLists.Create(ctx, createReq)
@ -292,7 +292,7 @@ var replaceCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = a.IpAccessLists.Replace(ctx, replaceReq)
@ -351,7 +351,7 @@ var updateCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = a.IpAccessLists.Update(ctx, updateReq)

View File

@ -295,11 +295,11 @@ var updateCmd = &cobra.Command{
updateReq.MetastoreId = args[1]
}
response, err := a.MetastoreAssignments.Update(ctx, updateReq)
err = a.MetastoreAssignments.Update(ctx, updateReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
return nil
},
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.

View File

@ -65,7 +65,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := a.Storage.Create(ctx, createReq)

View File

@ -221,7 +221,7 @@ var updateCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = a.WorkspaceAssignment.Update(ctx, updateReq)

View File

@ -2,13 +2,20 @@ package auth
import (
"context"
"fmt"
"time"
"github.com/databricks/cli/libs/auth"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/databrickscfg"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/spf13/cobra"
)
var loginTimeout time.Duration
var configureCluster bool
var loginCmd = &cobra.Command{
Use: "login [HOST]",
@ -17,10 +24,68 @@ var loginCmd = &cobra.Command{
if perisistentAuth.Host == "" && len(args) == 1 {
perisistentAuth.Host = args[0]
}
defer perisistentAuth.Close()
ctx, cancel := context.WithTimeout(cmd.Context(), loginTimeout)
defer cancel()
return perisistentAuth.Challenge(ctx)
var profileName string
profileFlag := cmd.Flag("profile")
if profileFlag != nil && profileFlag.Value.String() != "" {
profileName = profileFlag.Value.String()
} else {
prompt := cmdio.Prompt(ctx)
prompt.Label = "Databricks Profile Name"
prompt.Default = perisistentAuth.ProfileName()
prompt.AllowEdit = true
profile, err := prompt.Run()
if err != nil {
return err
}
profileName = profile
}
err := perisistentAuth.Challenge(ctx)
if err != nil {
return err
}
// We need the config without the profile before it's used to initialise new workspace client below.
// Otherwise it will complain about non existing profile because it was not yet saved.
cfg := config.Config{
Host: perisistentAuth.Host,
AccountID: perisistentAuth.AccountID,
AuthType: "databricks-cli",
}
if configureCluster {
w, err := databricks.NewWorkspaceClient((*databricks.Config)(&cfg))
if err != nil {
return err
}
ctx := cmd.Context()
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "Loading list of clusters to select from"
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load clusters list. Original error: %w", err)
}
clusterId, err := cmdio.Select(ctx, names, "Choose cluster")
if err != nil {
return err
}
cfg.ClusterID = clusterId
}
cfg.Profile = profileName
err = databrickscfg.SaveToProfile(ctx, &cfg)
if err != nil {
return err
}
cmdio.LogString(ctx, fmt.Sprintf("Profile %s was successfully saved", profileName))
return nil
},
}
@ -28,4 +93,7 @@ func init() {
authCmd.AddCommand(loginCmd)
loginCmd.Flags().DurationVar(&loginTimeout, "timeout", auth.DefaultTimeout,
"Timeout for completing login challenge in the browser")
loginCmd.Flags().BoolVar(&configureCluster, "configure-cluster", false,
"Prompts to configure cluster")
}

View File

@ -46,7 +46,7 @@ func textOutput(ctx context.Context, ch <-chan sync.Event, w io.Writer) {
// Sync events produce an empty string if nothing happened.
if str := e.String(); str != "" {
bw.WriteString(str)
bw.WriteString("\r\n")
bw.WriteString("\n")
bw.Flush()
}
}

View File

@ -60,7 +60,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Alerts.Create(ctx, createReq)
@ -260,7 +260,7 @@ var updateCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = w.Alerts.Update(ctx, updateReq)

View File

@ -18,26 +18,26 @@ var Cmd = &cobra.Command{
Short: `The Clusters API allows you to create, start, edit, list, terminate, and delete clusters.`,
Long: `The Clusters API allows you to create, start, edit, list, terminate, and
delete clusters.
Databricks maps cluster node instance types to compute units known as DBUs.
See the instance type pricing page for a list of the supported instance types
and their corresponding DBUs.
A Databricks cluster is a set of computation resources and configurations on
which you run data engineering, data science, and data analytics workloads,
such as production ETL pipelines, streaming analytics, ad-hoc analytics, and
machine learning.
You run these workloads as a set of commands in a notebook or as an automated
job. Databricks makes a distinction between all-purpose clusters and job
clusters. You use all-purpose clusters to analyze data collaboratively using
interactive notebooks. You use job clusters to run fast and robust automated
jobs.
You can create an all-purpose cluster using the UI, CLI, or REST API. You can
manually terminate and restart an all-purpose cluster. Multiple users can
share such clusters to do collaborative interactive analysis.
IMPORTANT: Databricks retains cluster configuration information for up to 200
all-purpose clusters terminated in the last 30 days and up to 30 job clusters
recently terminated by the job scheduler. To keep an all-purpose cluster
@ -64,7 +64,7 @@ var changeOwnerCmd = &cobra.Command{
Use: "change-owner CLUSTER_ID OWNER_USERNAME",
Short: `Change cluster owner.`,
Long: `Change cluster owner.
Change the owner of the cluster. You must be an admin to perform this
operation.`,
@ -147,17 +147,17 @@ var createCmd = &cobra.Command{
Use: "create SPARK_VERSION",
Short: `Create new cluster.`,
Long: `Create new cluster.
Creates a new Spark cluster. This method will acquire new instances from the
cloud provider if necessary. This method is asynchronous; the returned
cluster_id can be used to poll the cluster status. When this method returns,
the cluster will be in a PENDING state. The cluster will be usable once it
enters a RUNNING state.
Note: Databricks may not be able to acquire some of the requested nodes, due
to cloud provider limitations (account limits, spot price, etc.) or transient
network issues.
If Databricks acquires at least 85% of the requested on-demand nodes, cluster
creation will succeed. Otherwise the cluster will terminate with an
informative error message.`,
@ -227,7 +227,7 @@ var deleteCmd = &cobra.Command{
Use: "delete CLUSTER_ID",
Short: `Terminate cluster.`,
Long: `Terminate cluster.
Terminates the Spark cluster with the specified ID. The cluster is removed
asynchronously. Once the termination has completed, the cluster will be in a
TERMINATED state. If the cluster is already in a TERMINATING or
@ -336,18 +336,18 @@ var editCmd = &cobra.Command{
Use: "edit CLUSTER_ID SPARK_VERSION",
Short: `Update cluster configuration.`,
Long: `Update cluster configuration.
Updates the configuration of a cluster to match the provided attributes and
size. A cluster can be updated if it is in a RUNNING or TERMINATED state.
If a cluster is updated while in a RUNNING state, it will be restarted so
that the new attributes can take effect.
If a cluster is updated while in a TERMINATED state, it will remain
TERMINATED. The next time it is started using the clusters/start API, the
new attributes will take effect. Any attempt to update a cluster in any other
state will be rejected with an INVALID_STATE error code.
Clusters created by the Databricks Jobs service cannot be edited.`,
Annotations: map[string]string{},
@ -418,7 +418,7 @@ var eventsCmd = &cobra.Command{
Use: "events CLUSTER_ID",
Short: `List cluster activity events.`,
Long: `List cluster activity events.
Retrieves a list of events about the activity of a cluster. This API is
paginated. If there are more events to read, the response includes all the
nparameters necessary to request the next page of events.`,
@ -486,7 +486,7 @@ var getCmd = &cobra.Command{
Use: "get CLUSTER_ID",
Short: `Get cluster info.`,
Long: `Get cluster info.
Retrieves the information for a cluster given its identifier. Clusters can be
described while they are running, or up to 60 days after they are terminated.`,
@ -550,11 +550,11 @@ var listCmd = &cobra.Command{
Use: "list",
Short: `List all clusters.`,
Long: `List all clusters.
Return information about all pinned clusters, active clusters, up to 200 of
the most recently terminated all-purpose clusters in the past 30 days, and up
to 30 of the most recently terminated job clusters in the past 30 days.
For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated
all-purpose clusters in the past 30 days, and 50 terminated job clusters in
the past 30 days, then this API returns the 1 pinned cluster, 4 active
@ -603,7 +603,7 @@ var listNodeTypesCmd = &cobra.Command{
Use: "list-node-types",
Short: `List node types.`,
Long: `List node types.
Returns a list of supported Spark node types. These node types can be used to
launch a cluster.`,
@ -634,7 +634,7 @@ var listZonesCmd = &cobra.Command{
Use: "list-zones",
Short: `List availability zones.`,
Long: `List availability zones.
Returns a list of availability zones where clusters can be created in (For
example, us-west-2a). These zones can be used to launch a cluster.`,
@ -670,10 +670,10 @@ var permanentDeleteCmd = &cobra.Command{
Use: "permanent-delete CLUSTER_ID",
Short: `Permanently delete cluster.`,
Long: `Permanently delete cluster.
Permanently deletes a Spark cluster. This cluster is terminated and resources
are asynchronously removed.
In addition, users will no longer see permanently deleted clusters in the
cluster list, and API users can no longer perform any action on permanently
deleted clusters.`,
@ -736,7 +736,7 @@ var pinCmd = &cobra.Command{
Use: "pin CLUSTER_ID",
Short: `Pin cluster.`,
Long: `Pin cluster.
Pinning a cluster ensures that the cluster will always be returned by the
ListClusters API. Pinning a cluster that is already pinned will have no
effect. This API can only be called by workspace admins.`,
@ -807,7 +807,7 @@ var resizeCmd = &cobra.Command{
Use: "resize CLUSTER_ID",
Short: `Resize cluster.`,
Long: `Resize cluster.
Resizes a cluster to have a desired number of workers. This will fail unless
the cluster is in a RUNNING state.`,
@ -888,7 +888,7 @@ var restartCmd = &cobra.Command{
Use: "restart CLUSTER_ID",
Short: `Restart cluster.`,
Long: `Restart cluster.
Restarts a Spark cluster with the supplied ID. If the cluster is not currently
in a RUNNING state, nothing will happen.`,
@ -957,7 +957,7 @@ var sparkVersionsCmd = &cobra.Command{
Use: "spark-versions",
Short: `List available Spark versions.`,
Long: `List available Spark versions.
Returns the list of available Spark versions. These versions can be used to
launch a cluster.`,
@ -998,10 +998,10 @@ var startCmd = &cobra.Command{
Use: "start CLUSTER_ID",
Short: `Start terminated cluster.`,
Long: `Start terminated cluster.
Starts a terminated Spark cluster with the supplied ID. This works similar to
createCluster except:
* The previous cluster id and attributes are preserved. * The cluster starts
with the last specified cluster size. * If the previous cluster was an
autoscaling cluster, the current cluster starts with the minimum number of
@ -1078,7 +1078,7 @@ var unpinCmd = &cobra.Command{
Use: "unpin CLUSTER_ID",
Short: `Unpin cluster.`,
Long: `Unpin cluster.
Unpinning a cluster will allow the cluster to eventually be removed from the
ListClusters API. Unpinning a cluster that is not pinned will have no effect.
This API can only be called by workspace admins.`,

View File

@ -73,7 +73,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Connections.Create(ctx, createReq)
@ -269,7 +269,7 @@ var updateCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Connections.Update(ctx, updateReq)

View File

@ -36,11 +36,6 @@ func init() {
// TODO: short flags
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
createCmd.Flags().BoolVar(&createReq.IsFavorite, "is-favorite", createReq.IsFavorite, `Indicates whether this query object should appear in the current user's favorites list.`)
createCmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `The title of this dashboard that appears in list views and at the top of the dashboard page.`)
createCmd.Flags().StringVar(&createReq.Parent, "parent", createReq.Parent, `The identifier of the workspace folder containing the dashboard.`)
// TODO: array: tags
}
var createCmd = &cobra.Command{
@ -66,6 +61,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Dashboards.Create(ctx, createReq)

View File

@ -66,7 +66,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Functions.Create(ctx, createReq)

View File

@ -53,7 +53,6 @@ func init() {
// TODO: map via StringToStringVar: custom_tags
// TODO: complex arg: disk_spec
createCmd.Flags().BoolVar(&createReq.EnableElasticDisk, "enable-elastic-disk", createReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`)
// TODO: complex arg: gcp_attributes
createCmd.Flags().IntVar(&createReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", createReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`)
// TODO: complex arg: instance_pool_fleet_attributes
createCmd.Flags().IntVar(&createReq.MaxCapacity, "max-capacity", createReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`)
@ -180,7 +179,6 @@ func init() {
// TODO: map via StringToStringVar: custom_tags
// TODO: complex arg: disk_spec
editCmd.Flags().BoolVar(&editReq.EnableElasticDisk, "enable-elastic-disk", editReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`)
// TODO: complex arg: gcp_attributes
editCmd.Flags().IntVar(&editReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", editReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`)
// TODO: complex arg: instance_pool_fleet_attributes
editCmd.Flags().IntVar(&editReq.MaxCapacity, "max-capacity", editReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`)

View File

@ -86,7 +86,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.IpAccessLists.Create(ctx, createReq)
@ -295,7 +295,7 @@ var replaceCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = w.IpAccessLists.Replace(ctx, replaceReq)
@ -356,7 +356,7 @@ var updateCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = w.IpAccessLists.Update(ctx, updateReq)

View File

@ -203,23 +203,6 @@ func init() {
// TODO: short flags
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
// TODO: array: access_control_list
// TODO: complex arg: continuous
// TODO: complex arg: email_notifications
createCmd.Flags().Var(&createReq.Format, "format", `Used to tell what is the format of the job.`)
// TODO: complex arg: git_source
// TODO: array: job_clusters
createCmd.Flags().IntVar(&createReq.MaxConcurrentRuns, "max-concurrent-runs", createReq.MaxConcurrentRuns, `An optional maximum allowed number of concurrent runs of the job.`)
createCmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `An optional name for the job.`)
// TODO: complex arg: notification_settings
// TODO: complex arg: run_as
// TODO: complex arg: schedule
// TODO: map via StringToStringVar: tags
// TODO: array: tasks
createCmd.Flags().IntVar(&createReq.TimeoutSeconds, "timeout-seconds", createReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`)
// TODO: complex arg: trigger
// TODO: complex arg: webhook_notifications
}
var createCmd = &cobra.Command{
@ -247,6 +230,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Jobs.Create(ctx, createReq)
@ -912,7 +896,7 @@ var resetCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = w.Jobs.Reset(ctx, resetReq)

View File

@ -172,7 +172,7 @@ var installCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = w.Libraries.Install(ctx, installReq)
@ -218,7 +218,7 @@ var uninstallCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
err = w.Libraries.Uninstall(ctx, uninstallReq)

View File

@ -349,7 +349,7 @@ var createWebhookCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.ModelRegistry.CreateWebhook(ctx, createWebhookReq)

View File

@ -45,25 +45,6 @@ func init() {
// TODO: short flags
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
createCmd.Flags().BoolVar(&createReq.AllowDuplicateNames, "allow-duplicate-names", createReq.AllowDuplicateNames, `If false, deployment will fail if name conflicts with that of another pipeline.`)
createCmd.Flags().StringVar(&createReq.Catalog, "catalog", createReq.Catalog, `A catalog in Unity Catalog to publish data from this pipeline to.`)
createCmd.Flags().StringVar(&createReq.Channel, "channel", createReq.Channel, `DLT Release Channel that specifies which version to use.`)
// TODO: array: clusters
// TODO: map via StringToStringVar: configuration
createCmd.Flags().BoolVar(&createReq.Continuous, "continuous", createReq.Continuous, `Whether the pipeline is continuous or triggered.`)
createCmd.Flags().BoolVar(&createReq.Development, "development", createReq.Development, `Whether the pipeline is in Development mode.`)
createCmd.Flags().BoolVar(&createReq.DryRun, "dry-run", createReq.DryRun, ``)
createCmd.Flags().StringVar(&createReq.Edition, "edition", createReq.Edition, `Pipeline product edition.`)
// TODO: complex arg: filters
createCmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Unique identifier for this pipeline.`)
// TODO: array: libraries
createCmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Friendly identifier for this pipeline.`)
createCmd.Flags().BoolVar(&createReq.Photon, "photon", createReq.Photon, `Whether Photon is enabled for this pipeline.`)
createCmd.Flags().BoolVar(&createReq.Serverless, "serverless", createReq.Serverless, `Whether serverless compute is enabled for this pipeline.`)
createCmd.Flags().StringVar(&createReq.Storage, "storage", createReq.Storage, `DBFS root directory for storing checkpoints and tables.`)
createCmd.Flags().StringVar(&createReq.Target, "target", createReq.Target, `Target schema (database) to add tables in this pipeline to.`)
// TODO: complex arg: trigger
}
var createCmd = &cobra.Command{
@ -92,6 +73,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Pipelines.Create(ctx, createReq)

View File

@ -312,7 +312,7 @@ var listSharesCmd = &cobra.Command{
listSharesReq.Name = args[0]
}
response, err := w.Providers.ListShares(ctx, listSharesReq)
response, err := w.Providers.ListSharesAll(ctx, listSharesReq)
if err != nil {
return err
}

View File

@ -34,13 +34,6 @@ func init() {
// TODO: short flags
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
createCmd.Flags().StringVar(&createReq.DataSourceId, "data-source-id", createReq.DataSourceId, `The ID of the data source / SQL warehouse where this query will run.`)
createCmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `General description that can convey additional information about this query such as usage notes.`)
createCmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `The name or title of this query to display in list views.`)
// TODO: any: options
createCmd.Flags().StringVar(&createReq.Parent, "parent", createReq.Parent, `The identifier of the workspace folder containing the query.`)
createCmd.Flags().StringVar(&createReq.Query, "query", createReq.Query, `The text of the query.`)
}
var createCmd = &cobra.Command{
@ -76,6 +69,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.Queries.Create(ctx, createReq)

View File

@ -120,7 +120,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
wait, err := w.ServingEndpoints.Create(ctx, createReq)
@ -470,7 +470,7 @@ var updateConfigCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
wait, err := w.ServingEndpoints.UpdateConfig(ctx, updateConfigReq)

View File

@ -72,7 +72,7 @@ var createCmd = &cobra.Command{
return err
}
} else {
return fmt.Errorf("provide command input in JSON format by specifying --json option")
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}
response, err := w.TableConstraints.Create(ctx, createReq)

View File

@ -287,7 +287,7 @@ var listSummariesCmd = &cobra.Command{
listSummariesReq.CatalogName = args[0]
}
response, err := w.Tables.ListSummaries(ctx, listSummariesReq)
response, err := w.Tables.ListSummariesAll(ctx, listSummariesReq)
if err != nil {
return err
}

10
go.mod
View File

@ -4,7 +4,7 @@ go 1.18
require (
github.com/briandowns/spinner v1.23.0 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.9.1-0.20230614092458-b5bbc1c8dabb // Apache 2.0
github.com/databricks/databricks-sdk-go v0.10.1 // Apache 2.0
github.com/fatih/color v1.15.0 // MIT
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
github.com/google/uuid v1.3.0 // BSD-3-Clause
@ -32,7 +32,7 @@ require (
)
require (
cloud.google.com/go/compute v1.19.3 // indirect
cloud.google.com/go/compute v1.20.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
@ -43,7 +43,7 @@ require (
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/s2a-go v0.1.4 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
@ -54,10 +54,10 @@ require (
golang.org/x/net v0.11.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/api v0.127.0 // indirect
google.golang.org/api v0.128.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
google.golang.org/grpc v1.55.0 // indirect
google.golang.org/grpc v1.56.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

22
go.sum
View File

@ -1,7 +1,7 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go/compute v1.19.3 h1:DcTwsFgGev/wV5+q8o2fzgcHOaac+DKGC91ZlvpsQds=
cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI=
cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg=
cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@ -34,8 +34,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/databricks/databricks-sdk-go v0.9.1-0.20230614092458-b5bbc1c8dabb h1:M4TAWTG9Cg205GQn84azS+jZIkE6eNdcGr04ZeBqQLA=
github.com/databricks/databricks-sdk-go v0.9.1-0.20230614092458-b5bbc1c8dabb/go.mod h1:KGnka35+ywspOGF6/t3okMzPZXLgLJtEIzAHWXy/eSg=
github.com/databricks/databricks-sdk-go v0.10.1 h1:aaIGurgo7PXFyXCkDy/LButElTOdIBgmVAtdNQtFMsw=
github.com/databricks/databricks-sdk-go v0.10.1/go.mod h1:FHsME5YoKTTrti7UC2y/7ZjpsuHalLag0Etf93c/Bnk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -91,9 +91,9 @@ github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkj
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.2.4 h1:uGy6JWR/uMIILU8wbf+OkstIrNiMjGpEIyhx8f6W7s4=
github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.10.0 h1:ebSgKfMxynOdxw8QQuFOKMgomqeLGPqNLQox2bo42zg=
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
@ -243,8 +243,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.127.0 h1:v7rj0vA0imM3Ou81k1eyFxQNScLzn71EyGnJDr+V/XI=
google.golang.org/api v0.127.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750=
google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg=
google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
@ -263,8 +263,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/grpc v1.56.0 h1:+y7Bs8rtMd07LeXmL3NxcTLn7mUkbKZqEpPhMNkwJEE=
google.golang.org/grpc v1.56.0/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -10,5 +10,5 @@ func TestAccAlertsCreateErrWhenNoArguments(t *testing.T) {
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
_, _, err := RequireErrorRun(t, "alerts", "create")
assert.Equal(t, "provide command input in JSON format by specifying --json option", err.Error())
assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error())
}

View File

@ -16,8 +16,6 @@ import (
"time"
"github.com/databricks/cli/libs/auth/cache"
"github.com/databricks/cli/libs/databrickscfg"
"github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/retries"
"github.com/pkg/browser"
"golang.org/x/oauth2"
@ -97,7 +95,7 @@ func (a *PersistentAuth) Load(ctx context.Context) (*oauth2.Token, error) {
return refreshed, nil
}
func (a *PersistentAuth) profileName() string {
func (a *PersistentAuth) ProfileName() string {
// TODO: get profile name from interactive input
if a.AccountID != "" {
return fmt.Sprintf("ACCOUNT-%s", a.AccountID)
@ -132,12 +130,7 @@ func (a *PersistentAuth) Challenge(ctx context.Context) error {
if err != nil {
return fmt.Errorf("store: %w", err)
}
return databrickscfg.SaveToProfile(ctx, &config.Config{
Host: a.Host,
AccountID: a.AccountID,
AuthType: "databricks-cli",
Profile: a.profileName(),
})
return nil
}
func (a *PersistentAuth) init(ctx context.Context) error {

View File

@ -53,7 +53,7 @@ func renderJson(w io.Writer, v any) error {
if err != nil {
return err
}
_, err = w.Write([]byte("\r\n"))
_, err = w.Write([]byte("\n"))
return err
}

View File

@ -76,6 +76,9 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error {
// Normalized version of the configured host.
host := normalizeHost(cfg.Host)
match, err := findMatchingProfile(configFile, func(s *ini.Section) bool {
if cfg.Profile != "" {
return cfg.Profile == s.Name()
}
key, err := s.GetKey("host")
if err != nil {
log.Tracef(ctx, "section %s: %s", s.Name(), err)

View File

@ -47,8 +47,8 @@ func loadOrCreateConfigFile(filename string) (*config.File, error) {
func matchOrCreateSection(ctx context.Context, configFile *config.File, cfg *config.Config) (*ini.Section, error) {
section, err := findMatchingProfile(configFile, func(s *ini.Section) bool {
if cfg.Profile == s.Name() {
return true
if cfg.Profile != "" {
return cfg.Profile == s.Name()
}
raw := s.KeysHash()
if cfg.AccountID != "" {
@ -89,8 +89,6 @@ func SaveToProfile(ctx context.Context, cfg *config.Config) error {
return err
}
// zeroval profile name before adding it to a section
cfg.Profile = ""
cfg.ConfigFile = ""
// clear old keys in case we're overriding the section
@ -99,7 +97,7 @@ func SaveToProfile(ctx context.Context, cfg *config.Config) error {
}
for _, attr := range config.ConfigAttributes {
if attr.IsZero(cfg) {
if attr.IsZero(cfg) || attr.Name == "profile" {
continue
}
key := section.Key(attr.Name)

View File

@ -38,7 +38,7 @@ func isSHA1(s string) bool {
}
func LoadReferenceFile(path string) (*Reference, error) {
// read referebce file content
// read reference file content
b, err := os.ReadFile(path)
if os.IsNotExist(err) {
return nil, nil