mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into import_dir
This commit is contained in:
commit
59081f7017
|
@ -26,6 +26,7 @@ var Cmd = &cobra.Command{
|
|||
{{- end}}
|
||||
}
|
||||
|
||||
{{- $serviceName := .KebabName -}}
|
||||
{{range .Methods}}
|
||||
|
||||
{{- $excludes := list "put-secret" -}}
|
||||
|
@ -62,11 +63,14 @@ func init() {
|
|||
{{- end}}
|
||||
{{end}}
|
||||
}
|
||||
{{- $excludeFromPrompts := list "workspace get-status" -}}
|
||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||
|
||||
{{ $hasPosArgs := and .Request (or .Request.IsAllRequiredFieldsPrimitive (eq .PascalName "RunNow")) -}}
|
||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
||||
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
|
||||
{{- $hasIdPrompt := and $hasSinglePosArg $serviceHasNamedIdMap -}}
|
||||
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
|
||||
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
||||
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
||||
var {{.CamelName}}Cmd = &cobra.Command{
|
||||
|
@ -96,9 +100,12 @@ var {{.CamelName}}Cmd = &cobra.Command{
|
|||
} else {
|
||||
{{- if $hasIdPrompt}}
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No{{range .Request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down."
|
||||
names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for {{.Service.TitleName}} drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
|
||||
if err != nil {
|
||||
|
|
|
@ -94,9 +94,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No BUDGET_ID argument specified. Loading names for Budgets drop-down."
|
||||
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Budgets drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Budget ID")
|
||||
if err != nil {
|
||||
|
@ -150,9 +153,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No BUDGET_ID argument specified. Loading names for Budgets drop-down."
|
||||
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Budgets drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Budget ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -113,9 +113,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CREDENTIALS_ID argument specified. Loading names for Credentials drop-down."
|
||||
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
||||
if err != nil {
|
||||
|
@ -169,9 +172,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CREDENTIALS_ID argument specified. Loading names for Credentials drop-down."
|
||||
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -111,9 +111,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Groups drop-down."
|
||||
names, err := a.Groups.GroupDisplayNameToIdMap(ctx, iam.ListAccountGroupsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Groups drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a group in the Databricks account")
|
||||
if err != nil {
|
||||
|
@ -166,9 +169,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Groups drop-down."
|
||||
names, err := a.Groups.GroupDisplayNameToIdMap(ctx, iam.ListAccountGroupsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Groups drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a group in the Databricks account")
|
||||
if err != nil {
|
||||
|
@ -278,9 +284,12 @@ var patchCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Groups drop-down."
|
||||
names, err := a.Groups.GroupDisplayNameToIdMap(ctx, iam.ListAccountGroupsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Groups drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a group in the Databricks account")
|
||||
if err != nil {
|
||||
|
@ -341,9 +350,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Groups drop-down."
|
||||
names, err := a.Groups.GroupDisplayNameToIdMap(ctx, iam.ListAccountGroupsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Groups drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks group ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -132,9 +132,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No IP_ACCESS_LIST_ID argument specified. Loading names for Account Ip Access Lists drop-down."
|
||||
names, err := a.IpAccessLists.IpAccessListInfoLabelToListIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list")
|
||||
if err != nil {
|
||||
|
@ -187,9 +190,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No IP_ACCESS_LIST_ID argument specified. Loading names for Account Ip Access Lists drop-down."
|
||||
names, err := a.IpAccessLists.IpAccessListInfoLabelToListIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list")
|
||||
if err != nil {
|
||||
|
|
|
@ -183,9 +183,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No LOG_DELIVERY_CONFIGURATION_ID argument specified. Loading names for Log Delivery drop-down."
|
||||
names, err := a.LogDelivery.LogDeliveryConfigurationConfigNameToConfigIdMap(ctx, billing.ListLogDeliveryRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Log Delivery drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks log delivery configuration ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -47,7 +47,14 @@ var createCmd = &cobra.Command{
|
|||
pre-existing VPC and subnets.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustAccountClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
@ -57,20 +64,6 @@ var createCmd = &cobra.Command{
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
names, err := a.Networks.NetworkNetworkNameToNetworkIdMap(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The human-readable name of the network configuration")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the human-readable name of the network configuration")
|
||||
}
|
||||
createReq.NetworkName = args[0]
|
||||
}
|
||||
|
||||
|
@ -118,9 +111,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NETWORK_ID argument specified. Loading names for Networks drop-down."
|
||||
names, err := a.Networks.NetworkNetworkNameToNetworkIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Networks drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API network configuration ID")
|
||||
if err != nil {
|
||||
|
@ -174,9 +170,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NETWORK_ID argument specified. Loading names for Networks drop-down."
|
||||
names, err := a.Networks.NetworkNetworkNameToNetworkIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Networks drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API network configuration ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -123,9 +123,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PRIVATE_ACCESS_SETTINGS_ID argument specified. Loading names for Private Access drop-down."
|
||||
names, err := a.PrivateAccess.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Private Access drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API private access settings ID")
|
||||
if err != nil {
|
||||
|
@ -185,9 +188,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PRIVATE_ACCESS_SETTINGS_ID argument specified. Loading names for Private Access drop-down."
|
||||
names, err := a.PrivateAccess.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Private Access drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API private access settings ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -110,9 +110,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Service Principals drop-down."
|
||||
names, err := a.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListAccountServicePrincipalsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Service Principals drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a service principal in the Databricks account")
|
||||
if err != nil {
|
||||
|
@ -166,9 +169,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Service Principals drop-down."
|
||||
names, err := a.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListAccountServicePrincipalsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Service Principals drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a service principal in the Databricks account")
|
||||
if err != nil {
|
||||
|
@ -279,9 +285,12 @@ var patchCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Service Principals drop-down."
|
||||
names, err := a.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListAccountServicePrincipalsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Service Principals drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a service principal in the Databricks account")
|
||||
if err != nil {
|
||||
|
@ -345,9 +354,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Service Principals drop-down."
|
||||
names, err := a.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListAccountServicePrincipalsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Service Principals drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks service principal ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -109,9 +109,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No STORAGE_CONFIGURATION_ID argument specified. Loading names for Storage drop-down."
|
||||
names, err := a.Storage.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Storage drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API storage configuration ID")
|
||||
if err != nil {
|
||||
|
@ -164,9 +167,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No STORAGE_CONFIGURATION_ID argument specified. Loading names for Storage drop-down."
|
||||
names, err := a.Storage.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Storage drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API storage configuration ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -119,9 +119,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Users drop-down."
|
||||
names, err := a.Users.UserUserNameToIdMap(ctx, iam.ListAccountUsersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Users drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a user in the Databricks account")
|
||||
if err != nil {
|
||||
|
@ -174,9 +177,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Users drop-down."
|
||||
names, err := a.Users.UserUserNameToIdMap(ctx, iam.ListAccountUsersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Users drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a user in the Databricks account")
|
||||
if err != nil {
|
||||
|
@ -287,9 +293,12 @@ var patchCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Users drop-down."
|
||||
names, err := a.Users.UserUserNameToIdMap(ctx, iam.ListAccountUsersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Users drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a user in the Databricks account")
|
||||
if err != nil {
|
||||
|
@ -353,9 +362,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Account Users drop-down."
|
||||
names, err := a.Users.UserUserNameToIdMap(ctx, iam.ListAccountUsersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Account Users drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks user ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -54,7 +54,14 @@ var createCmd = &cobra.Command{
|
|||
[endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustAccountClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
@ -64,20 +71,6 @@ var createCmd = &cobra.Command{
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
names, err := a.VpcEndpoints.VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The human-readable name of the storage configuration")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the human-readable name of the storage configuration")
|
||||
}
|
||||
createReq.VpcEndpointName = args[0]
|
||||
}
|
||||
|
||||
|
@ -128,9 +121,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No VPC_ENDPOINT_ID argument specified. Loading names for Vpc Endpoints drop-down."
|
||||
names, err := a.VpcEndpoints.VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Vpc Endpoints drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks VPC endpoint ID")
|
||||
if err != nil {
|
||||
|
@ -187,9 +183,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No VPC_ENDPOINT_ID argument specified. Loading names for Vpc Endpoints drop-down."
|
||||
names, err := a.VpcEndpoints.VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Vpc Endpoints drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks VPC endpoint ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -73,7 +73,14 @@ var createCmd = &cobra.Command{
|
|||
workspace becomes available when the status changes to RUNNING.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustAccountClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
@ -83,20 +90,6 @@ var createCmd = &cobra.Command{
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
names, err := a.Workspaces.WorkspaceWorkspaceNameToWorkspaceIdMap(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The workspace's human-readable name")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the workspace's human-readable name")
|
||||
}
|
||||
createReq.WorkspaceName = args[0]
|
||||
}
|
||||
|
||||
|
@ -163,9 +156,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Workspaces drop-down."
|
||||
names, err := a.Workspaces.WorkspaceWorkspaceNameToWorkspaceIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Workspaces drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Workspace ID")
|
||||
if err != nil {
|
||||
|
@ -234,9 +230,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Workspaces drop-down."
|
||||
names, err := a.Workspaces.WorkspaceWorkspaceNameToWorkspaceIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Workspaces drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Workspace ID")
|
||||
if err != nil {
|
||||
|
@ -445,9 +444,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Workspaces drop-down."
|
||||
names, err := a.Workspaces.WorkspaceWorkspaceNameToWorkspaceIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Workspaces drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Workspace ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -99,8 +99,8 @@ var profilesCmd = &cobra.Command{
|
|||
Short: "Lists profiles from ~/.databrickscfg",
|
||||
Annotations: map[string]string{
|
||||
"template": cmdio.Heredoc(`
|
||||
{{white "Name"}} {{white "Host"}} {{white "Valid"}}
|
||||
{{range .Profiles}}{{.Name | green}} {{.Host|white}} {{bool .Valid}}
|
||||
{{header "Name"}} {{header "Host"}} {{header "Valid"}}
|
||||
{{range .Profiles}}{{.Name | green}} {{.Host|cyan}} {{bool .Valid}}
|
||||
{{end}}`),
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
|
|
@ -7,10 +7,9 @@ import (
|
|||
|
||||
// fsCmd represents the fs command
|
||||
var fsCmd = &cobra.Command{
|
||||
Use: "fs",
|
||||
Short: "Filesystem related commands",
|
||||
Long: `Commands to do DBFS operations.`,
|
||||
Hidden: true,
|
||||
Use: "fs",
|
||||
Short: "Filesystem related commands",
|
||||
Long: `Commands to do DBFS operations.`,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func resolveDbfsPath(path string) (string, error) {
|
||||
if !strings.HasPrefix(path, "dbfs:/") {
|
||||
return "", fmt.Errorf("expected dbfs path (with the dbfs:/ prefix): %s", path)
|
||||
}
|
||||
|
||||
return strings.TrimPrefix(path, "dbfs:"), nil
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestResolveDbfsPath(t *testing.T) {
|
||||
path, err := resolveDbfsPath("dbfs:/")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/", path)
|
||||
|
||||
path, err = resolveDbfsPath("dbfs:/abc")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/abc", path)
|
||||
|
||||
path, err = resolveDbfsPath("dbfs:/a/b/c")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/a/b/c", path)
|
||||
|
||||
path, err = resolveDbfsPath("dbfs:/a/b/.")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/a/b/.", path)
|
||||
|
||||
path, err = resolveDbfsPath("dbfs:/a/../c")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/a/../c", path)
|
||||
|
||||
_, err = resolveDbfsPath("dbf:/a/b/c")
|
||||
assert.ErrorContains(t, err, "expected dbfs path (with the dbfs:/ prefix): dbf:/a/b/c")
|
||||
|
||||
_, err = resolveDbfsPath("/a/b/c")
|
||||
assert.ErrorContains(t, err, "expected dbfs path (with the dbfs:/ prefix): /a/b/c")
|
||||
|
||||
_, err = resolveDbfsPath("dbfs:a/b/c")
|
||||
assert.ErrorContains(t, err, "expected dbfs path (with the dbfs:/ prefix): dbfs:a/b/c")
|
||||
}
|
82
cmd/fs/ls.go
82
cmd/fs/ls.go
|
@ -1,23 +1,93 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type jsonDirEntry struct {
|
||||
Name string `json:"name"`
|
||||
IsDir bool `json:"is_directory"`
|
||||
Size int64 `json:"size"`
|
||||
ModTime time.Time `json:"last_modified"`
|
||||
}
|
||||
|
||||
func toJsonDirEntry(f fs.DirEntry) (*jsonDirEntry, error) {
|
||||
info, err := f.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &jsonDirEntry{
|
||||
Name: f.Name(),
|
||||
IsDir: f.IsDir(),
|
||||
Size: info.Size(),
|
||||
ModTime: info.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// lsCmd represents the ls command
|
||||
var lsCmd = &cobra.Command{
|
||||
Use: "ls <dir-name>",
|
||||
Short: "Lists files",
|
||||
Long: `Lists files`,
|
||||
Hidden: true,
|
||||
Use: "ls DIR_PATH",
|
||||
Short: "Lists files",
|
||||
Long: `Lists files`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("TODO")
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
path, err := resolveDbfsPath(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := filer.NewDbfsClient(w, "/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entries, err := f.ReadDir(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jsonDirEntries := make([]jsonDirEntry, len(entries))
|
||||
for i, entry := range entries {
|
||||
jsonDirEntry, err := toJsonDirEntry(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
jsonDirEntries[i] = *jsonDirEntry
|
||||
}
|
||||
sort.Slice(jsonDirEntries, func(i, j int) bool {
|
||||
return jsonDirEntries[i].Name < jsonDirEntries[j].Name
|
||||
})
|
||||
|
||||
// Use template for long mode if the flag is set
|
||||
if longMode {
|
||||
return cmdio.RenderWithTemplate(ctx, jsonDirEntries, cmdio.Heredoc(`
|
||||
{{range .}}{{if .IsDir}}DIRECTORY {{else}}FILE {{end}}{{.Size}} {{.ModTime|pretty_date}} {{.Name}}
|
||||
{{end}}
|
||||
`))
|
||||
}
|
||||
return cmdio.RenderWithTemplate(ctx, jsonDirEntries, cmdio.Heredoc(`
|
||||
{{range .}}{{.Name}}
|
||||
{{end}}
|
||||
`))
|
||||
},
|
||||
}
|
||||
|
||||
var longMode bool
|
||||
|
||||
func init() {
|
||||
lsCmd.Flags().BoolVarP(&longMode, "long", "l", false, "Displays full information including size, file type and modification time since Epoch in milliseconds.")
|
||||
fsCmd.AddCommand(lsCmd)
|
||||
}
|
||||
|
|
|
@ -106,9 +106,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ALERT_ID argument specified. Loading names for Alerts drop-down."
|
||||
names, err := w.Alerts.AlertNameToIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Alerts drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -161,9 +164,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ALERT_ID argument specified. Loading names for Alerts drop-down."
|
||||
names, err := w.Alerts.AlertNameToIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Alerts drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "Name"}} {{white "Type"}} {{white "Comment"}}
|
||||
{{header "Name"}} {{header "Type"}} {{header "Comment"}}
|
||||
{{range .}}{{.Name|green}} {{blue "%s" .CatalogType}} {{.Comment}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -67,7 +67,14 @@ var createCmd = &cobra.Command{
|
|||
Creates a new policy with prescribed settings.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
@ -77,20 +84,6 @@ var createCmd = &cobra.Command{
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
names, err := w.ClusterPolicies.PolicyNameToPolicyIdMap(ctx, compute.ListClusterPoliciesRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Cluster Policy name requested by the user")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have cluster policy name requested by the user")
|
||||
}
|
||||
createReq.Name = args[0]
|
||||
}
|
||||
|
||||
|
@ -134,9 +127,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No POLICY_ID argument specified. Loading names for Cluster Policies drop-down."
|
||||
names, err := w.ClusterPolicies.PolicyNameToPolicyIdMap(ctx, compute.ListClusterPoliciesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Cluster Policies drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID of the policy to delete")
|
||||
if err != nil {
|
||||
|
@ -245,9 +241,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No POLICY_ID argument specified. Loading names for Cluster Policies drop-down."
|
||||
names, err := w.ClusterPolicies.PolicyNameToPolicyIdMap(ctx, compute.ListClusterPoliciesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Cluster Policies drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Canonical unique identifier for the cluster policy")
|
||||
if err != nil {
|
||||
|
|
|
@ -158,7 +158,14 @@ var createCmd = &cobra.Command{
|
|||
informative error message.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
@ -168,20 +175,6 @@ var createCmd = &cobra.Command{
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The Spark version of the cluster, e.g")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the spark version of the cluster, e.g")
|
||||
}
|
||||
createReq.SparkVersion = args[0]
|
||||
}
|
||||
|
||||
|
@ -249,9 +242,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The cluster to be terminated")
|
||||
if err != nil {
|
||||
|
@ -437,9 +433,12 @@ var eventsCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID of the cluster to retrieve events about")
|
||||
if err != nil {
|
||||
|
@ -498,9 +497,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The cluster about which to retrieve information")
|
||||
if err != nil {
|
||||
|
@ -671,9 +673,12 @@ var permanentDeleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The cluster to be deleted")
|
||||
if err != nil {
|
||||
|
@ -728,9 +733,12 @@ var pinCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "<needs content added>")
|
||||
if err != nil {
|
||||
|
@ -792,9 +800,12 @@ var resizeCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The cluster to be resized")
|
||||
if err != nil {
|
||||
|
@ -872,9 +883,12 @@ var restartCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The cluster to be started")
|
||||
if err != nil {
|
||||
|
@ -984,9 +998,12 @@ var startCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The cluster to be started")
|
||||
if err != nil {
|
||||
|
@ -1058,9 +1075,12 @@ var unpinCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down."
|
||||
names, err := w.Clusters.ClusterInfoClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "<needs content added>")
|
||||
if err != nil {
|
||||
|
|
|
@ -4,8 +4,8 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "ID"}} {{white "Name"}} {{white "State"}}
|
||||
{{range .}}{{.ClusterId | green}} {{.ClusterName|white}} {{if eq .State "RUNNING"}}{{green "%s" .State}}{{else if eq .State "TERMINATED"}}{{red "%s" .State}}{{else}}{{blue "%s" .State}}{{end}}
|
||||
{{header "ID"}} {{header "Name"}} {{header "State"}}
|
||||
{{range .}}{{.ClusterId | green}} {{.ClusterName | cyan}} {{if eq .State "RUNNING"}}{{green "%s" .State}}{{else if eq .State "TERMINATED"}}{{red "%s" .State}}{{else}}{{blue "%s" .State}}{{end}}
|
||||
{{end}}`)
|
||||
|
||||
listNodeTypesCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
|
|
|
@ -105,9 +105,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No DASHBOARD_ID argument specified. Loading names for Dashboards drop-down."
|
||||
names, err := w.Dashboards.DashboardNameToIdMap(ctx, sql.ListDashboardsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Dashboards drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -161,9 +164,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No DASHBOARD_ID argument specified. Loading names for Dashboards drop-down."
|
||||
names, err := w.Dashboards.DashboardNameToIdMap(ctx, sql.ListDashboardsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Dashboards drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -268,9 +274,12 @@ var restoreCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No DASHBOARD_ID argument specified. Loading names for Dashboards drop-down."
|
||||
names, err := w.Dashboards.DashboardNameToIdMap(ctx, sql.ListDashboardsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Dashboards drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "ID"}} {{white "Name"}}
|
||||
{{header "ID"}} {{header "Name"}}
|
||||
{{range .}}{{.Id|green}} {{.Name}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "Name"}} {{white "Credential"}} {{white "URL"}}
|
||||
{{range .}}{{.Name|green}} {{.CredentialName|blue}} {{.Url}}
|
||||
{{header "Name"}} {{header "Credential"}} {{header "URL"}}
|
||||
{{range .}}{{.Name|green}} {{.CredentialName|cyan}} {{.Url}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -49,7 +49,14 @@ var createCmd = &cobra.Command{
|
|||
DELETE endpoint to delete existing credentials.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
@ -59,20 +66,6 @@ var createCmd = &cobra.Command{
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
names, err := w.GitCredentials.CredentialInfoGitProviderToCredentialIdMap(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Git provider")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have git provider")
|
||||
}
|
||||
createReq.GitProvider = args[0]
|
||||
}
|
||||
|
||||
|
@ -115,9 +108,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CREDENTIAL_ID argument specified. Loading names for Git Credentials drop-down."
|
||||
names, err := w.GitCredentials.CredentialInfoGitProviderToCredentialIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Git Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding credential to access")
|
||||
if err != nil {
|
||||
|
@ -173,9 +169,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CREDENTIAL_ID argument specified. Loading names for Git Credentials drop-down."
|
||||
names, err := w.GitCredentials.CredentialInfoGitProviderToCredentialIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Git Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding credential to access")
|
||||
if err != nil {
|
||||
|
@ -263,9 +262,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CREDENTIAL_ID argument specified. Loading names for Git Credentials drop-down."
|
||||
names, err := w.GitCredentials.CredentialInfoGitProviderToCredentialIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Git Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding credential to access")
|
||||
if err != nil {
|
||||
|
|
|
@ -109,9 +109,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No SCRIPT_ID argument specified. Loading names for Global Init Scripts drop-down."
|
||||
names, err := w.GlobalInitScripts.GlobalInitScriptDetailsNameToScriptIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Global Init Scripts drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID of the global init script")
|
||||
if err != nil {
|
||||
|
@ -164,9 +167,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No SCRIPT_ID argument specified. Loading names for Global Init Scripts drop-down."
|
||||
names, err := w.GlobalInitScripts.GlobalInitScriptDetailsNameToScriptIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Global Init Scripts drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID of the global init script")
|
||||
if err != nil {
|
||||
|
|
|
@ -111,9 +111,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Groups drop-down."
|
||||
names, err := w.Groups.GroupDisplayNameToIdMap(ctx, iam.ListGroupsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Groups drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a group in the Databricks workspace")
|
||||
if err != nil {
|
||||
|
@ -166,9 +169,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Groups drop-down."
|
||||
names, err := w.Groups.GroupDisplayNameToIdMap(ctx, iam.ListGroupsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Groups drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a group in the Databricks workspace")
|
||||
if err != nil {
|
||||
|
@ -278,9 +284,12 @@ var patchCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Groups drop-down."
|
||||
names, err := w.Groups.GroupDisplayNameToIdMap(ctx, iam.ListGroupsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Groups drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a group in the Databricks workspace")
|
||||
if err != nil {
|
||||
|
@ -341,9 +350,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Groups drop-down."
|
||||
names, err := w.Groups.GroupDisplayNameToIdMap(ctx, iam.ListGroupsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Groups drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks group ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -128,9 +128,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No INSTANCE_POOL_ID argument specified. Loading names for Instance Pools drop-down."
|
||||
names, err := w.InstancePools.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Instance Pools drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The instance pool to be terminated")
|
||||
if err != nil {
|
||||
|
@ -245,9 +248,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No INSTANCE_POOL_ID argument specified. Loading names for Instance Pools drop-down."
|
||||
names, err := w.InstancePools.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Instance Pools drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The canonical unique identifier for the instance pool")
|
||||
if err != nil {
|
||||
|
|
|
@ -133,9 +133,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No IP_ACCESS_LIST_ID argument specified. Loading names for Ip Access Lists drop-down."
|
||||
names, err := w.IpAccessLists.IpAccessListInfoLabelToListIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list to modify")
|
||||
if err != nil {
|
||||
|
@ -188,9 +191,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No IP_ACCESS_LIST_ID argument specified. Loading names for Ip Access Lists drop-down."
|
||||
names, err := w.IpAccessLists.IpAccessListInfoLabelToListIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Ip Access Lists drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding IP access list to modify")
|
||||
if err != nil {
|
||||
|
|
|
@ -69,9 +69,12 @@ var cancelAllRunsCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The canonical identifier of the job to cancel all runs of")
|
||||
if err != nil {
|
||||
|
@ -133,9 +136,12 @@ var cancelRunCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No RUN_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "This field is required")
|
||||
if err != nil {
|
||||
|
@ -203,6 +209,7 @@ func init() {
|
|||
createCmd.Flags().IntVar(&createReq.MaxConcurrentRuns, "max-concurrent-runs", createReq.MaxConcurrentRuns, `An optional maximum allowed number of concurrent runs of the job.`)
|
||||
createCmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `An optional name for the job.`)
|
||||
// TODO: complex arg: notification_settings
|
||||
// TODO: complex arg: run_as
|
||||
// TODO: complex arg: schedule
|
||||
// TODO: map via StringToStringVar: tags
|
||||
// TODO: array: tasks
|
||||
|
@ -278,9 +285,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The canonical identifier of the job to delete")
|
||||
if err != nil {
|
||||
|
@ -336,9 +346,12 @@ var deleteRunCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No RUN_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The canonical identifier of the run for which to retrieve the metadata")
|
||||
if err != nil {
|
||||
|
@ -396,9 +409,12 @@ var exportRunCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No RUN_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The canonical identifier for the run")
|
||||
if err != nil {
|
||||
|
@ -454,9 +470,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The canonical identifier of the job to retrieve information about")
|
||||
if err != nil {
|
||||
|
@ -519,9 +538,12 @@ var getRunCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No RUN_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The canonical identifier of the run for which to retrieve the metadata")
|
||||
if err != nil {
|
||||
|
@ -586,9 +608,12 @@ var getRunOutputCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No RUN_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The canonical identifier for the run")
|
||||
if err != nil {
|
||||
|
@ -772,9 +797,12 @@ var repairRunCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No RUN_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The job run ID of the run to repair")
|
||||
if err != nil {
|
||||
|
@ -918,9 +946,12 @@ var runNowCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID of the job to be executed")
|
||||
if err != nil {
|
||||
|
@ -1092,9 +1123,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down."
|
||||
names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The canonical identifier of the job to update")
|
||||
if err != nil {
|
||||
|
|
|
@ -8,7 +8,7 @@ func init() {
|
|||
{{end}}`)
|
||||
|
||||
listRunsCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{"Job ID"|white}} {{"Run ID"|white}} {{"Result State"|white}} URL
|
||||
{{range .}}{{green "%d" .JobId}} {{blue "%d" .RunId}} {{if eq .State.ResultState "SUCCESS"}}{{"SUCCESS"|green}}{{else}}{{red "%s" .State.ResultState}}{{end}} {{.RunPageUrl}}
|
||||
{{header "Job ID"}} {{header "Run ID"}} {{header "Result State"}} URL
|
||||
{{range .}}{{green "%d" .JobId}} {{cyan "%d" .RunId}} {{if eq .State.ResultState "SUCCESS"}}{{"SUCCESS"|green}}{{else}}{{red "%s" .State.ResultState}}{{end}} {{.RunPageUrl}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -196,9 +196,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down."
|
||||
names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID of the metastore")
|
||||
if err != nil {
|
||||
|
@ -252,9 +255,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down."
|
||||
names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID of the metastore")
|
||||
if err != nil {
|
||||
|
@ -477,9 +483,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down."
|
||||
names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID of the metastore")
|
||||
if err != nil {
|
||||
|
@ -538,9 +547,12 @@ var updateAssignmentCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Metastores drop-down."
|
||||
names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "A workspace ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "ID"}} {{white "Name"}} {{white "Region"}}
|
||||
{{range .}}{{.MetastoreId|green}} {{.Name|white}} {{.Region}}
|
||||
{{header "ID"}} {{header "Name"}} {{"Region"}}
|
||||
{{range .}}{{.MetastoreId|green}} {{.Name|cyan}} {{.Region}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -605,8 +605,8 @@ func init() {
|
|||
|
||||
var deleteTransitionRequestCmd = &cobra.Command{
|
||||
Use: "delete-transition-request NAME VERSION STAGE CREATOR",
|
||||
Short: `Delete a ransition request.`,
|
||||
Long: `Delete a ransition request.
|
||||
Short: `Delete a transition request.`,
|
||||
Long: `Delete a transition request.
|
||||
|
||||
Cancels a model version stage transition request.`,
|
||||
|
||||
|
|
|
@ -131,9 +131,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down."
|
||||
names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -189,9 +192,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down."
|
||||
names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -298,9 +304,12 @@ var listPipelineEventsCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down."
|
||||
names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -409,9 +418,12 @@ var listUpdatesCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down."
|
||||
names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The pipeline to return updates for")
|
||||
if err != nil {
|
||||
|
@ -469,9 +481,12 @@ var resetCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down."
|
||||
names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -546,9 +561,12 @@ var startUpdateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down."
|
||||
names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -606,9 +624,12 @@ var stopCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down."
|
||||
names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -698,9 +719,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down."
|
||||
names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique identifier for this pipeline")
|
||||
if err != nil {
|
||||
|
|
|
@ -106,9 +106,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Providers drop-down."
|
||||
names, err := w.Providers.ProviderInfoNameToMetastoreIdMap(ctx, sharing.ListProvidersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Providers drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Name of the provider")
|
||||
if err != nil {
|
||||
|
@ -163,9 +166,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Providers drop-down."
|
||||
names, err := w.Providers.ProviderInfoNameToMetastoreIdMap(ctx, sharing.ListProvidersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Providers drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Name of the provider")
|
||||
if err != nil {
|
||||
|
@ -272,9 +278,12 @@ var listSharesCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Providers drop-down."
|
||||
names, err := w.Providers.ProviderInfoNameToMetastoreIdMap(ctx, sharing.ListProvidersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Providers drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Name of the provider in which to list shares")
|
||||
if err != nil {
|
||||
|
@ -335,9 +344,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Providers drop-down."
|
||||
names, err := w.Providers.ProviderInfoNameToMetastoreIdMap(ctx, sharing.ListProvidersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Providers drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The name of the Provider")
|
||||
if err != nil {
|
||||
|
|
|
@ -5,7 +5,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
func init() {
|
||||
// TODO: figure out colored/non-colored headers and colspan shifts
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "ID"}} {{white "Name"}} {{white "Author"}}
|
||||
{{range .}}{{.Id|green}} {{.Name|white}} {{.User.Email|white}}
|
||||
{{header "ID"}} {{header "Name"}} {{header "Author"}}
|
||||
{{range .}}{{.Id|green}} {{.Name|cyan}} {{.User.Email|cyan}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -116,9 +116,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries drop-down."
|
||||
names, err := w.Queries.QueryNameToIdMap(ctx, sql.ListQueriesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Queries drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -172,9 +175,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries drop-down."
|
||||
names, err := w.Queries.QueryNameToIdMap(ctx, sql.ListQueriesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Queries drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -281,9 +287,12 @@ var restoreCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries drop-down."
|
||||
names, err := w.Queries.QueryNameToIdMap(ctx, sql.ListQueriesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Queries drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
@ -344,9 +353,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries drop-down."
|
||||
names, err := w.Queries.QueryNameToIdMap(ctx, sql.ListQueriesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Queries drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "")
|
||||
if err != nil {
|
||||
|
|
|
@ -5,6 +5,6 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
func init() {
|
||||
// TODO: figure out the right format
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{range .}}{{.UserName}} {{white "%s" .Status}} {{.QueryText}}
|
||||
{{range .}}{{.UserName}} {{cyan "%s" .Status}} {{.QueryText}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -111,9 +111,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down."
|
||||
names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Name of the recipient")
|
||||
if err != nil {
|
||||
|
@ -168,9 +171,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down."
|
||||
names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Name of the recipient")
|
||||
if err != nil {
|
||||
|
@ -329,9 +335,12 @@ var sharePermissionsCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down."
|
||||
names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The name of the Recipient")
|
||||
if err != nil {
|
||||
|
@ -392,9 +401,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down."
|
||||
names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Name of Recipient")
|
||||
if err != nil {
|
||||
|
|
|
@ -112,9 +112,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No REPO_ID argument specified. Loading names for Repos drop-down."
|
||||
names, err := w.Repos.RepoInfoPathToIdMap(ctx, workspace.ListReposRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Repos drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding repo to access")
|
||||
if err != nil {
|
||||
|
@ -170,9 +173,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No REPO_ID argument specified. Loading names for Repos drop-down."
|
||||
names, err := w.Repos.RepoInfoPathToIdMap(ctx, workspace.ListReposRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Repos drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding repo to access")
|
||||
if err != nil {
|
||||
|
@ -284,9 +290,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No REPO_ID argument specified. Loading names for Repos drop-down."
|
||||
names, err := w.Repos.RepoInfoPathToIdMap(ctx, workspace.ListReposRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Repos drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID for the corresponding repo to access")
|
||||
if err != nil {
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "Full Name"}} {{white "Owner"}} {{white "Comment"}}
|
||||
{{range .}}{{.FullName|green}} {{.Owner|white}} {{.Comment}}
|
||||
{{header "Full Name"}} {{header "Owner"}} {{header "Comment"}}
|
||||
{{range .}}{{.FullName|green}} {{.Owner|cyan}} {{.Comment}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -109,9 +109,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME argument specified. Loading names for Schemas drop-down."
|
||||
names, err := w.Schemas.SchemaInfoNameToFullNameMap(ctx, catalog.ListSchemasRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Schemas drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Full name of the schema")
|
||||
if err != nil {
|
||||
|
@ -166,9 +169,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME argument specified. Loading names for Schemas drop-down."
|
||||
names, err := w.Schemas.SchemaInfoNameToFullNameMap(ctx, catalog.ListSchemasRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Schemas drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Full name of the schema")
|
||||
if err != nil {
|
||||
|
@ -282,9 +288,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME argument specified. Loading names for Schemas drop-down."
|
||||
names, err := w.Schemas.SchemaInfoNameToFullNameMap(ctx, catalog.ListSchemasRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Schemas drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Full name of the schema")
|
||||
if err != nil {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
func init() {
|
||||
listScopesCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "Scope"}} {{white "Backend Type"}}
|
||||
{{header "Scope"}} {{header "Backend Type"}}
|
||||
{{range .}}{{.Name|green}} {{.BackendType}}
|
||||
{{end}}`)
|
||||
|
||||
|
|
|
@ -110,9 +110,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Service Principals drop-down."
|
||||
names, err := w.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListServicePrincipalsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Service Principals drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a service principal in the Databricks workspace")
|
||||
if err != nil {
|
||||
|
@ -166,9 +169,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Service Principals drop-down."
|
||||
names, err := w.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListServicePrincipalsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Service Principals drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a service principal in the Databricks workspace")
|
||||
if err != nil {
|
||||
|
@ -279,9 +285,12 @@ var patchCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Service Principals drop-down."
|
||||
names, err := w.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListServicePrincipalsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Service Principals drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a service principal in the Databricks workspace")
|
||||
if err != nil {
|
||||
|
@ -345,9 +354,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Service Principals drop-down."
|
||||
names, err := w.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListServicePrincipalsRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Service Principals drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks service principal ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "ID"}} {{white "Name"}} {{white "Credentials"}}
|
||||
{{range .}}{{.Id|green}} {{.Name|white}} {{if .AwsIamRole}}{{.AwsIamRole.RoleArn}}{{end}}{{if .AzureServicePrincipal}}{{.AzureServicePrincipal.ApplicationId}}{{end}}{{if .GcpServiceAccountKey}}{{.Email}}{{end}}
|
||||
{{header "ID"}} {{header "Name"}} {{header "Credentials"}}
|
||||
{{range .}}{{.Id|green}} {{.Name|cyan}} {{if .AwsIamRole}}{{.AwsIamRole.RoleArn}}{{end}}{{if .AzureServicePrincipal}}{{.AzureServicePrincipal.ApplicationId}}{{end}}{{if .GcpServiceAccountKey}}{{.Email}}{{end}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "Full Name"}} {{white "Table Type"}}
|
||||
{{header "Full Name"}} {{header "Table Type"}}
|
||||
{{range .}}{{.FullName|green}} {{blue "%s" .TableType}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -61,9 +61,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME argument specified. Loading names for Tables drop-down."
|
||||
names, err := w.Tables.TableInfoNameToTableIdMap(ctx, catalog.ListTablesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Tables drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Full name of the table")
|
||||
if err != nil {
|
||||
|
@ -122,9 +125,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME argument specified. Loading names for Tables drop-down."
|
||||
names, err := w.Tables.TableInfoNameToTableIdMap(ctx, catalog.ListTablesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Tables drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Full name of the table")
|
||||
if err != nil {
|
||||
|
@ -250,9 +256,12 @@ var listSummariesCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CATALOG_NAME argument specified. Loading names for Tables drop-down."
|
||||
names, err := w.Tables.TableInfoNameToTableIdMap(ctx, catalog.ListTablesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Tables drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Name of parent catalog for tables of interest")
|
||||
if err != nil {
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "ID"}} {{white "Created By"}} {{white "Comment"}}
|
||||
{{range .}}{{.TokenId|green}} {{.CreatedByUsername|white}} {{.Comment|white}}
|
||||
{{header "ID"}} {{header "Created By"}} {{header "Comment"}}
|
||||
{{range .}}{{.TokenId|green}} {{.CreatedByUsername|cyan}} {{.Comment|cyan}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -105,9 +105,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No TOKEN_ID argument specified. Loading names for Token Management drop-down."
|
||||
names, err := w.TokenManagement.TokenInfoCommentToTokenIdMap(ctx, settings.ListTokenManagementRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Token Management drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID of the token to get")
|
||||
if err != nil {
|
||||
|
@ -160,9 +163,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No TOKEN_ID argument specified. Loading names for Token Management drop-down."
|
||||
names, err := w.TokenManagement.TokenInfoCommentToTokenIdMap(ctx, settings.ListTokenManagementRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Token Management drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID of the token to get")
|
||||
if err != nil {
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "ID"}} {{white "Expiry time"}} {{white "Comment"}}
|
||||
{{range .}}{{.TokenId|green}} {{white "%d" .ExpiryTime}} {{.Comment|white}}
|
||||
{{header "ID"}} {{header "Expiry time"}} {{header "Comment"}}
|
||||
{{range .}}{{.TokenId|green}} {{cyan "%d" .ExpiryTime}} {{.Comment|cyan}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -106,9 +106,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No TOKEN_ID argument specified. Loading names for Tokens drop-down."
|
||||
names, err := w.Tokens.TokenInfoCommentToTokenIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Tokens drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The ID of the token to be revoked")
|
||||
if err != nil {
|
||||
|
|
|
@ -119,9 +119,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Users drop-down."
|
||||
names, err := w.Users.UserUserNameToIdMap(ctx, iam.ListUsersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Users drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a user in the Databricks workspace")
|
||||
if err != nil {
|
||||
|
@ -174,9 +177,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Users drop-down."
|
||||
names, err := w.Users.UserUserNameToIdMap(ctx, iam.ListUsersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Users drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a user in the Databricks workspace")
|
||||
if err != nil {
|
||||
|
@ -287,9 +293,12 @@ var patchCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Users drop-down."
|
||||
names, err := w.Users.UserUserNameToIdMap(ctx, iam.ListUsersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Users drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Unique ID for a user in the Databricks workspace")
|
||||
if err != nil {
|
||||
|
@ -353,9 +362,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Users drop-down."
|
||||
names, err := w.Users.UserUserNameToIdMap(ctx, iam.ListUsersRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Users drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks user ID")
|
||||
if err != nil {
|
||||
|
|
|
@ -134,9 +134,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down."
|
||||
names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Volumes drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The three-level (fully qualified) name of the volume")
|
||||
if err != nil {
|
||||
|
@ -253,9 +256,12 @@ var readCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down."
|
||||
names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Volumes drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The three-level (fully qualified) name of the volume")
|
||||
if err != nil {
|
||||
|
@ -319,9 +325,12 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down."
|
||||
names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Volumes drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The three-level (fully qualified) name of the volume")
|
||||
if err != nil {
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
|
||||
func init() {
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "ID"}} {{white "Name"}} {{white "Size"}} {{white "State"}}
|
||||
{{range .}}{{.Id|green}} {{.Name|white}} {{.ClusterSize|white}} {{if eq .State "RUNNING"}}{{"RUNNING"|green}}{{else if eq .State "STOPPED"}}{{"STOPPED"|red}}{{else}}{{blue "%s" .State}}{{end}}
|
||||
{{header "ID"}} {{header "Name"}} {{header "Size"}} {{header "State"}}
|
||||
{{range .}}{{.Id|green}} {{.Name|cyan}} {{.ClusterSize|cyan}} {{if eq .State "RUNNING"}}{{"RUNNING"|green}}{{else if eq .State "STOPPED"}}{{"STOPPED"|red}}{{else}}{{blue "%s" .State}}{{end}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -148,9 +148,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Warehouses drop-down."
|
||||
names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Required")
|
||||
if err != nil {
|
||||
|
@ -246,9 +249,12 @@ var editCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Warehouses drop-down."
|
||||
names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Required")
|
||||
if err != nil {
|
||||
|
@ -330,9 +336,12 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Warehouses drop-down."
|
||||
names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Required")
|
||||
if err != nil {
|
||||
|
@ -525,9 +534,12 @@ var startCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Warehouses drop-down."
|
||||
names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Required")
|
||||
if err != nil {
|
||||
|
@ -609,9 +621,12 @@ var stopCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No ID argument specified. Loading names for Warehouses drop-down."
|
||||
names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Required")
|
||||
if err != nil {
|
||||
|
|
|
@ -5,7 +5,7 @@ import "github.com/databricks/cli/libs/cmdio"
|
|||
func init() {
|
||||
listReq.Path = "/"
|
||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||
{{white "ID"}} {{white "Type"}} {{white "Language"}} {{white "Path"}}
|
||||
{{range .}}{{green "%d" .ObjectId}} {{blue "%s" .ObjectType}} {{cyan "%s" .Language}} {{.Path|white}}
|
||||
{{header "ID"}} {{header "Type"}} {{header "Language"}} {{header "Path"}}
|
||||
{{range .}}{{green "%d" .ObjectId}} {{blue "%s" .ObjectType}} {{cyan "%s" .Language}} {{.Path|cyan}}
|
||||
{{end}}`)
|
||||
}
|
||||
|
|
|
@ -62,9 +62,12 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PATH argument specified. Loading names for Workspace drop-down."
|
||||
names, err := w.Workspace.ObjectInfoPathToObjectIdMap(ctx, workspace.ListWorkspaceRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Workspace drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The absolute path of the notebook or directory")
|
||||
if err != nil {
|
||||
|
@ -127,9 +130,12 @@ var exportCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PATH argument specified. Loading names for Workspace drop-down."
|
||||
names, err := w.Workspace.ObjectInfoPathToObjectIdMap(ctx, workspace.ListWorkspaceRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Workspace drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The absolute path of the object or directory")
|
||||
if err != nil {
|
||||
|
@ -172,7 +178,14 @@ var getStatusCmd = &cobra.Command{
|
|||
call returns an error RESOURCE_DOES_NOT_EXIST.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
@ -182,20 +195,6 @@ var getStatusCmd = &cobra.Command{
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
names, err := w.Workspace.ObjectInfoPathToObjectIdMap(ctx, workspace.ListWorkspaceRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The absolute path of the notebook or directory")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the absolute path of the notebook or directory")
|
||||
}
|
||||
getStatusReq.Path = args[0]
|
||||
}
|
||||
|
||||
|
@ -235,7 +234,14 @@ var importCmd = &cobra.Command{
|
|||
use DBC format to import a directory.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustWorkspaceClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
@ -245,20 +251,6 @@ var importCmd = &cobra.Command{
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
names, err := w.Workspace.ObjectInfoPathToObjectIdMap(ctx, workspace.ListWorkspaceRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The absolute path of the object or directory")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the absolute path of the object or directory")
|
||||
}
|
||||
importReq.Path = args[0]
|
||||
}
|
||||
|
||||
|
@ -358,9 +350,12 @@ var mkdirsCmd = &cobra.Command{
|
|||
}
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No PATH argument specified. Loading names for Workspace drop-down."
|
||||
names, err := w.Workspace.ObjectInfoPathToObjectIdMap(ctx, workspace.ListWorkspaceRequest{})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load names for Workspace drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "The absolute path of the directory")
|
||||
if err != nil {
|
||||
|
|
|
@ -33,3 +33,8 @@ func TestAccClustersGet(t *testing.T) {
|
|||
assert.Contains(t, outStr, fmt.Sprintf(`"cluster_id":"%s"`, clusterId))
|
||||
assert.Equal(t, "", stderr.String())
|
||||
}
|
||||
|
||||
func TestClusterCreateErrorWhenNoArguments(t *testing.T) {
|
||||
_, _, err := RequireErrorRun(t, "clusters", "create")
|
||||
assert.Equal(t, "accepts 1 arg(s), received 0", err.Error())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/fs"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/databricks/cli/cmd/fs"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFsLsForDbfs(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
ctx := context.Background()
|
||||
w, err := databricks.NewWorkspaceClient()
|
||||
require.NoError(t, err)
|
||||
|
||||
tmpDir := temporaryDbfsDir(t, w)
|
||||
|
||||
f, err := filer.NewDbfsClient(w, tmpDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = f.Mkdir(ctx, "a")
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories)
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "bye.txt", strings.NewReader("def"))
|
||||
require.NoError(t, err)
|
||||
|
||||
stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", "dbfs:"+tmpDir, "--output=json")
|
||||
assert.Equal(t, "", stderr.String())
|
||||
var parsedStdout []map[string]any
|
||||
err = json.Unmarshal(stdout.Bytes(), &parsedStdout)
|
||||
require.NoError(t, err)
|
||||
|
||||
// assert on ls output
|
||||
assert.Equal(t, "a", parsedStdout[0]["name"])
|
||||
assert.Equal(t, true, parsedStdout[0]["is_directory"])
|
||||
assert.Equal(t, float64(0), parsedStdout[0]["size"])
|
||||
assert.Equal(t, "bye.txt", parsedStdout[1]["name"])
|
||||
assert.Equal(t, false, parsedStdout[1]["is_directory"])
|
||||
assert.Equal(t, float64(3), parsedStdout[1]["size"])
|
||||
}
|
||||
|
||||
func TestFsLsForDbfsOnFile(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
ctx := context.Background()
|
||||
w, err := databricks.NewWorkspaceClient()
|
||||
require.NoError(t, err)
|
||||
|
||||
tmpDir := temporaryDbfsDir(t, w)
|
||||
|
||||
f, err := filer.NewDbfsClient(w, tmpDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = f.Mkdir(ctx, "a")
|
||||
require.NoError(t, err)
|
||||
err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = RequireErrorRun(t, "fs", "ls", "dbfs:"+path.Join(tmpDir, "a", "hello.txt"), "--output=json")
|
||||
assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error())
|
||||
}
|
||||
|
||||
func TestFsLsForDbfsOnEmptyDir(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
w, err := databricks.NewWorkspaceClient()
|
||||
require.NoError(t, err)
|
||||
|
||||
tmpDir := temporaryDbfsDir(t, w)
|
||||
|
||||
stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", "dbfs:"+tmpDir, "--output=json")
|
||||
assert.Equal(t, "", stderr.String())
|
||||
var parsedStdout []map[string]any
|
||||
err = json.Unmarshal(stdout.Bytes(), &parsedStdout)
|
||||
require.NoError(t, err)
|
||||
|
||||
// assert on ls output
|
||||
assert.Equal(t, 0, len(parsedStdout))
|
||||
}
|
||||
|
||||
func TestFsLsForDbfsForNonexistingDir(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
_, _, err := RequireErrorRun(t, "fs", "ls", "dbfs:/john-cena", "--output=json")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestFsLsWithoutScheme(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
_, _, err := RequireErrorRun(t, "fs", "ls", "/ray-mysterio", "--output=json")
|
||||
assert.ErrorContains(t, err, "expected dbfs path (with the dbfs:/ prefix): /ray-mysterio")
|
||||
}
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
var (
|
||||
repoUrl = "https://github.com/databricks/databricks-empty-ide-project.git"
|
||||
repoFiles = []string{"README-IDE.md"}
|
||||
repoFiles = []string{}
|
||||
)
|
||||
|
||||
// This test needs auth env vars to run.
|
||||
|
@ -59,7 +59,7 @@ func setupRepo(t *testing.T, wsc *databricks.WorkspaceClient, ctx context.Contex
|
|||
return localRoot, remoteRoot
|
||||
}
|
||||
|
||||
type assertSync struct {
|
||||
type syncTest struct {
|
||||
t *testing.T
|
||||
c *cobraTestRunner
|
||||
w *databricks.WorkspaceClient
|
||||
|
@ -67,7 +67,54 @@ type assertSync struct {
|
|||
remoteRoot string
|
||||
}
|
||||
|
||||
func (a *assertSync) remoteDirContent(ctx context.Context, relativeDir string, expectedFiles []string) {
|
||||
func setupSyncTest(t *testing.T, args ...string) *syncTest {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
w := databricks.Must(databricks.NewWorkspaceClient())
|
||||
localRoot := t.TempDir()
|
||||
remoteRoot := temporaryWorkspaceDir(t, w)
|
||||
|
||||
// Prepend common arguments.
|
||||
args = append([]string{
|
||||
"sync",
|
||||
localRoot,
|
||||
remoteRoot,
|
||||
"--output",
|
||||
"json",
|
||||
}, args...)
|
||||
|
||||
c := NewCobraTestRunner(t, args...)
|
||||
c.RunBackground()
|
||||
|
||||
return &syncTest{
|
||||
t: t,
|
||||
c: c,
|
||||
w: w,
|
||||
localRoot: localRoot,
|
||||
remoteRoot: remoteRoot,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *syncTest) waitForCompletionMarker() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
s.t.Fatal("timed out waiting for sync to complete")
|
||||
case line := <-s.c.stdoutLines:
|
||||
var event sync.EventBase
|
||||
err := json.Unmarshal([]byte(line), &event)
|
||||
require.NoError(s.t, err)
|
||||
if event.Type == sync.EventTypeComplete {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *syncTest) remoteDirContent(ctx context.Context, relativeDir string, expectedFiles []string) {
|
||||
remoteDir := path.Join(a.remoteRoot, relativeDir)
|
||||
a.c.Eventually(func() bool {
|
||||
objects, err := a.w.Workspace.ListAll(ctx, workspace.ListWorkspaceRequest{
|
||||
|
@ -92,7 +139,7 @@ func (a *assertSync) remoteDirContent(ctx context.Context, relativeDir string, e
|
|||
}
|
||||
}
|
||||
|
||||
func (a *assertSync) remoteFileContent(ctx context.Context, relativePath string, expectedContent string) {
|
||||
func (a *syncTest) remoteFileContent(ctx context.Context, relativePath string, expectedContent string) {
|
||||
filePath := path.Join(a.remoteRoot, relativePath)
|
||||
|
||||
// Remove leading "/" so we can use it in the URL.
|
||||
|
@ -113,7 +160,7 @@ func (a *assertSync) remoteFileContent(ctx context.Context, relativePath string,
|
|||
}, 30*time.Second, 5*time.Second)
|
||||
}
|
||||
|
||||
func (a *assertSync) objectType(ctx context.Context, relativePath string, expected string) {
|
||||
func (a *syncTest) objectType(ctx context.Context, relativePath string, expected string) {
|
||||
path := path.Join(a.remoteRoot, relativePath)
|
||||
|
||||
a.c.Eventually(func() bool {
|
||||
|
@ -125,7 +172,7 @@ func (a *assertSync) objectType(ctx context.Context, relativePath string, expect
|
|||
}, 30*time.Second, 5*time.Second)
|
||||
}
|
||||
|
||||
func (a *assertSync) language(ctx context.Context, relativePath string, expected string) {
|
||||
func (a *syncTest) language(ctx context.Context, relativePath string, expected string) {
|
||||
path := path.Join(a.remoteRoot, relativePath)
|
||||
|
||||
a.c.Eventually(func() bool {
|
||||
|
@ -137,7 +184,7 @@ func (a *assertSync) language(ctx context.Context, relativePath string, expected
|
|||
}, 30*time.Second, 5*time.Second)
|
||||
}
|
||||
|
||||
func (a *assertSync) snapshotContains(files []string) {
|
||||
func (a *syncTest) snapshotContains(files []string) {
|
||||
snapshotPath := filepath.Join(a.localRoot, ".databricks/sync-snapshots", sync.GetFileName(a.w.Config.Host, a.remoteRoot))
|
||||
assert.FileExists(a.t, snapshotPath)
|
||||
|
||||
|
@ -160,123 +207,87 @@ func (a *assertSync) snapshotContains(files []string) {
|
|||
assert.Equal(a.t, len(files), len(s.LastUpdatedTimes))
|
||||
}
|
||||
|
||||
func TestAccFullFileSync(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
wsc := databricks.Must(databricks.NewWorkspaceClient())
|
||||
func TestAccSyncFullFileSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
localRepoPath, remoteRepoPath := setupRepo(t, wsc, ctx)
|
||||
|
||||
// Run `databricks sync` in the background.
|
||||
c := NewCobraTestRunner(t, "sync", localRepoPath, remoteRepoPath, "--full", "--watch")
|
||||
c.RunBackground()
|
||||
|
||||
assertSync := assertSync{
|
||||
t: t,
|
||||
c: c,
|
||||
w: wsc,
|
||||
localRoot: localRepoPath,
|
||||
remoteRoot: remoteRepoPath,
|
||||
}
|
||||
assertSync := setupSyncTest(t, "--full", "--watch")
|
||||
|
||||
// .gitignore is created by the sync process to enforce .databricks is not synced
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore"))
|
||||
|
||||
// New file
|
||||
localFilePath := filepath.Join(localRepoPath, "foo.txt")
|
||||
localFilePath := filepath.Join(assertSync.localRoot, "foo.txt")
|
||||
f := testfile.CreateFile(t, localFilePath)
|
||||
defer f.Close(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, "foo.txt", ".gitignore"))
|
||||
assertSync.remoteFileContent(ctx, "foo.txt", "")
|
||||
|
||||
// Write to file
|
||||
f.Overwrite(t, `{"statement": "Mi Gente"}`)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteFileContent(ctx, "foo.txt", `{"statement": "Mi Gente"}`)
|
||||
|
||||
// Write again
|
||||
f.Overwrite(t, `{"statement": "Young Dumb & Broke"}`)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteFileContent(ctx, "foo.txt", `{"statement": "Young Dumb & Broke"}`)
|
||||
|
||||
// delete
|
||||
f.Remove(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore"))
|
||||
}
|
||||
|
||||
func TestAccIncrementalFileSync(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
wsc := databricks.Must(databricks.NewWorkspaceClient())
|
||||
func TestAccSyncIncrementalFileSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
localRepoPath, remoteRepoPath := setupRepo(t, wsc, ctx)
|
||||
|
||||
// Run `databricks sync` in the background.
|
||||
c := NewCobraTestRunner(t, "sync", localRepoPath, remoteRepoPath, "--watch")
|
||||
c.RunBackground()
|
||||
|
||||
assertSync := assertSync{
|
||||
t: t,
|
||||
c: c,
|
||||
w: wsc,
|
||||
localRoot: localRepoPath,
|
||||
remoteRoot: remoteRepoPath,
|
||||
}
|
||||
assertSync := setupSyncTest(t, "--watch")
|
||||
|
||||
// .gitignore is created by the sync process to enforce .databricks is not synced
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore"))
|
||||
|
||||
// New file
|
||||
localFilePath := filepath.Join(localRepoPath, "foo.txt")
|
||||
localFilePath := filepath.Join(assertSync.localRoot, "foo.txt")
|
||||
f := testfile.CreateFile(t, localFilePath)
|
||||
defer f.Close(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, "foo.txt", ".gitignore"))
|
||||
assertSync.remoteFileContent(ctx, "foo.txt", "")
|
||||
assertSync.snapshotContains(append(repoFiles, "foo.txt", ".gitignore"))
|
||||
|
||||
// Write to file
|
||||
f.Overwrite(t, `{"statement": "Mi Gente"}`)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteFileContent(ctx, "foo.txt", `{"statement": "Mi Gente"}`)
|
||||
|
||||
// Write again
|
||||
f.Overwrite(t, `{"statement": "Young Dumb & Broke"}`)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteFileContent(ctx, "foo.txt", `{"statement": "Young Dumb & Broke"}`)
|
||||
|
||||
// delete
|
||||
f.Remove(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore"))
|
||||
assertSync.snapshotContains(append(repoFiles, ".gitignore"))
|
||||
}
|
||||
|
||||
func TestAccNestedFolderSync(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
wsc := databricks.Must(databricks.NewWorkspaceClient())
|
||||
func TestAccSyncNestedFolderSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
localRepoPath, remoteRepoPath := setupRepo(t, wsc, ctx)
|
||||
|
||||
// Run `databricks sync` in the background.
|
||||
c := NewCobraTestRunner(t, "sync", localRepoPath, remoteRepoPath, "--watch")
|
||||
c.RunBackground()
|
||||
|
||||
assertSync := assertSync{
|
||||
t: t,
|
||||
c: c,
|
||||
w: wsc,
|
||||
localRoot: localRepoPath,
|
||||
remoteRoot: remoteRepoPath,
|
||||
}
|
||||
assertSync := setupSyncTest(t, "--watch")
|
||||
|
||||
// .gitignore is created by the sync process to enforce .databricks is not synced
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore"))
|
||||
|
||||
// New file
|
||||
localFilePath := filepath.Join(localRepoPath, "dir1/dir2/dir3/foo.txt")
|
||||
localFilePath := filepath.Join(assertSync.localRoot, "dir1/dir2/dir3/foo.txt")
|
||||
err := os.MkdirAll(filepath.Dir(localFilePath), 0o755)
|
||||
assert.NoError(t, err)
|
||||
f := testfile.CreateFile(t, localFilePath)
|
||||
defer f.Close(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "dir1"))
|
||||
assertSync.remoteDirContent(ctx, "dir1", []string{"dir2"})
|
||||
assertSync.remoteDirContent(ctx, "dir1/dir2", []string{"dir3"})
|
||||
|
@ -285,40 +296,27 @@ func TestAccNestedFolderSync(t *testing.T) {
|
|||
|
||||
// delete
|
||||
f.Remove(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
// directories are not cleaned up right now. This is not ideal
|
||||
assertSync.remoteDirContent(ctx, "dir1/dir2/dir3", []string{})
|
||||
assertSync.snapshotContains(append(repoFiles, ".gitignore"))
|
||||
}
|
||||
|
||||
func TestAccNestedSpacePlusAndHashAreEscapedSync(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
wsc := databricks.Must(databricks.NewWorkspaceClient())
|
||||
func TestAccSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
localRepoPath, remoteRepoPath := setupRepo(t, wsc, ctx)
|
||||
|
||||
// Run `databricks sync` in the background.
|
||||
c := NewCobraTestRunner(t, "sync", localRepoPath, remoteRepoPath, "--watch")
|
||||
c.RunBackground()
|
||||
|
||||
assertSync := assertSync{
|
||||
t: t,
|
||||
c: c,
|
||||
w: wsc,
|
||||
localRoot: localRepoPath,
|
||||
remoteRoot: remoteRepoPath,
|
||||
}
|
||||
assertSync := setupSyncTest(t, "--watch")
|
||||
|
||||
// .gitignore is created by the sync process to enforce .databricks is not synced
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore"))
|
||||
|
||||
// New file
|
||||
localFilePath := filepath.Join(localRepoPath, "dir1/a b+c/c+d e/e+f g#i.txt")
|
||||
localFilePath := filepath.Join(assertSync.localRoot, "dir1/a b+c/c+d e/e+f g#i.txt")
|
||||
err := os.MkdirAll(filepath.Dir(localFilePath), 0o755)
|
||||
assert.NoError(t, err)
|
||||
f := testfile.CreateFile(t, localFilePath)
|
||||
defer f.Close(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "dir1"))
|
||||
assertSync.remoteDirContent(ctx, "dir1", []string{"a b+c"})
|
||||
assertSync.remoteDirContent(ctx, "dir1/a b+c", []string{"c+d e"})
|
||||
|
@ -327,6 +325,7 @@ func TestAccNestedSpacePlusAndHashAreEscapedSync(t *testing.T) {
|
|||
|
||||
// delete
|
||||
f.Remove(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
// directories are not cleaned up right now. This is not ideal
|
||||
assertSync.remoteDirContent(ctx, "dir1/a b+c/c+d e", []string{})
|
||||
assertSync.snapshotContains(append(repoFiles, ".gitignore"))
|
||||
|
@ -341,77 +340,49 @@ func TestAccNestedSpacePlusAndHashAreEscapedSync(t *testing.T) {
|
|||
//
|
||||
// In the above scenario sync should delete the empty folder and add foo to the remote
|
||||
// file system
|
||||
func TestAccIncrementalFileOverwritesFolder(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
wsc := databricks.Must(databricks.NewWorkspaceClient())
|
||||
func TestAccSyncIncrementalFileOverwritesFolder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
localRepoPath, remoteRepoPath := setupRepo(t, wsc, ctx)
|
||||
|
||||
// Run `databricks sync` in the background.
|
||||
c := NewCobraTestRunner(t, "sync", localRepoPath, remoteRepoPath, "--watch")
|
||||
c.RunBackground()
|
||||
|
||||
assertSync := assertSync{
|
||||
t: t,
|
||||
c: c,
|
||||
w: wsc,
|
||||
localRoot: localRepoPath,
|
||||
remoteRoot: remoteRepoPath,
|
||||
}
|
||||
assertSync := setupSyncTest(t, "--watch")
|
||||
|
||||
// create foo/bar.txt
|
||||
localFilePath := filepath.Join(localRepoPath, "foo/bar.txt")
|
||||
localFilePath := filepath.Join(assertSync.localRoot, "foo/bar.txt")
|
||||
err := os.MkdirAll(filepath.Dir(localFilePath), 0o755)
|
||||
assert.NoError(t, err)
|
||||
f := testfile.CreateFile(t, localFilePath)
|
||||
defer f.Close(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "foo"))
|
||||
assertSync.remoteDirContent(ctx, "foo", []string{"bar.txt"})
|
||||
assertSync.snapshotContains(append(repoFiles, ".gitignore", filepath.FromSlash("foo/bar.txt")))
|
||||
|
||||
// delete foo/bar.txt
|
||||
f.Remove(t)
|
||||
os.Remove(filepath.Join(localRepoPath, "foo"))
|
||||
os.Remove(filepath.Join(assertSync.localRoot, "foo"))
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "foo", []string{})
|
||||
assertSync.objectType(ctx, "foo", "DIRECTORY")
|
||||
assertSync.snapshotContains(append(repoFiles, ".gitignore"))
|
||||
|
||||
f2 := testfile.CreateFile(t, filepath.Join(localRepoPath, "foo"))
|
||||
f2 := testfile.CreateFile(t, filepath.Join(assertSync.localRoot, "foo"))
|
||||
defer f2.Close(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "foo"))
|
||||
assertSync.objectType(ctx, "foo", "FILE")
|
||||
assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo"))
|
||||
}
|
||||
|
||||
func TestAccIncrementalSyncPythonNotebookToFile(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
wsc := databricks.Must(databricks.NewWorkspaceClient())
|
||||
func TestAccSyncIncrementalSyncPythonNotebookToFile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
localRepoPath, remoteRepoPath := setupRepo(t, wsc, ctx)
|
||||
assertSync := setupSyncTest(t, "--watch")
|
||||
|
||||
// create python notebook
|
||||
localFilePath := filepath.Join(localRepoPath, "foo.py")
|
||||
localFilePath := filepath.Join(assertSync.localRoot, "foo.py")
|
||||
f := testfile.CreateFile(t, localFilePath)
|
||||
defer f.Close(t)
|
||||
f.Overwrite(t, "# Databricks notebook source")
|
||||
|
||||
// Run `databricks sync` in the background.
|
||||
c := NewCobraTestRunner(t, "sync", localRepoPath, remoteRepoPath, "--watch")
|
||||
c.RunBackground()
|
||||
|
||||
assertSync := assertSync{
|
||||
t: t,
|
||||
c: c,
|
||||
w: wsc,
|
||||
localRoot: localRepoPath,
|
||||
remoteRoot: remoteRepoPath,
|
||||
}
|
||||
|
||||
// notebook was uploaded properly
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "foo"))
|
||||
assertSync.objectType(ctx, "foo", "NOTEBOOK")
|
||||
assertSync.language(ctx, "foo", "PYTHON")
|
||||
|
@ -419,40 +390,27 @@ func TestAccIncrementalSyncPythonNotebookToFile(t *testing.T) {
|
|||
|
||||
// convert to vanilla python file
|
||||
f.Overwrite(t, "# No longer a python notebook")
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.objectType(ctx, "foo.py", "FILE")
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "foo.py"))
|
||||
assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo.py"))
|
||||
|
||||
// delete the vanilla python file
|
||||
f.Remove(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore"))
|
||||
assertSync.snapshotContains(append(repoFiles, ".gitignore"))
|
||||
}
|
||||
|
||||
func TestAccIncrementalSyncFileToPythonNotebook(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
wsc := databricks.Must(databricks.NewWorkspaceClient())
|
||||
func TestAccSyncIncrementalSyncFileToPythonNotebook(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
localRepoPath, remoteRepoPath := setupRepo(t, wsc, ctx)
|
||||
|
||||
// Run `databricks sync` in the background.
|
||||
c := NewCobraTestRunner(t, "sync", localRepoPath, remoteRepoPath, "--watch")
|
||||
c.RunBackground()
|
||||
|
||||
assertSync := assertSync{
|
||||
t: t,
|
||||
c: c,
|
||||
w: wsc,
|
||||
localRoot: localRepoPath,
|
||||
remoteRoot: remoteRepoPath,
|
||||
}
|
||||
assertSync := setupSyncTest(t, "--watch")
|
||||
|
||||
// create vanilla python file
|
||||
localFilePath := filepath.Join(localRepoPath, "foo.py")
|
||||
localFilePath := filepath.Join(assertSync.localRoot, "foo.py")
|
||||
f := testfile.CreateFile(t, localFilePath)
|
||||
defer f.Close(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
|
||||
// assert file upload
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "foo.py"))
|
||||
|
@ -461,37 +419,23 @@ func TestAccIncrementalSyncFileToPythonNotebook(t *testing.T) {
|
|||
|
||||
// convert to notebook
|
||||
f.Overwrite(t, "# Databricks notebook source")
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.objectType(ctx, "foo", "NOTEBOOK")
|
||||
assertSync.language(ctx, "foo", "PYTHON")
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "foo"))
|
||||
assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo.py"))
|
||||
}
|
||||
|
||||
func TestAccIncrementalSyncPythonNotebookDelete(t *testing.T) {
|
||||
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
|
||||
|
||||
wsc := databricks.Must(databricks.NewWorkspaceClient())
|
||||
func TestAccSyncIncrementalSyncPythonNotebookDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
localRepoPath, remoteRepoPath := setupRepo(t, wsc, ctx)
|
||||
assertSync := setupSyncTest(t, "--watch")
|
||||
|
||||
// create python notebook
|
||||
localFilePath := filepath.Join(localRepoPath, "foo.py")
|
||||
localFilePath := filepath.Join(assertSync.localRoot, "foo.py")
|
||||
f := testfile.CreateFile(t, localFilePath)
|
||||
defer f.Close(t)
|
||||
f.Overwrite(t, "# Databricks notebook source")
|
||||
|
||||
// Run `databricks sync` in the background.
|
||||
c := NewCobraTestRunner(t, "sync", localRepoPath, remoteRepoPath, "--watch")
|
||||
c.RunBackground()
|
||||
|
||||
assertSync := assertSync{
|
||||
t: t,
|
||||
c: c,
|
||||
w: wsc,
|
||||
localRoot: localRepoPath,
|
||||
remoteRoot: remoteRepoPath,
|
||||
}
|
||||
assertSync.waitForCompletionMarker()
|
||||
|
||||
// notebook was uploaded properly
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "foo"))
|
||||
|
@ -500,6 +444,7 @@ func TestAccIncrementalSyncPythonNotebookDelete(t *testing.T) {
|
|||
|
||||
// Delete notebook
|
||||
f.Remove(t)
|
||||
assertSync.waitForCompletionMarker()
|
||||
assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore"))
|
||||
}
|
||||
|
||||
|
|
|
@ -22,3 +22,8 @@ func TestWorkpaceListErrorWhenNoArguments(t *testing.T) {
|
|||
_, _, err := RequireErrorRun(t, "workspace", "list")
|
||||
assert.Equal(t, "accepts 1 arg(s), received 0", err.Error())
|
||||
}
|
||||
|
||||
func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) {
|
||||
_, _, err := RequireErrorRun(t, "workspace", "get-status")
|
||||
assert.Equal(t, "accepts 1 arg(s), received 0", err.Error())
|
||||
}
|
||||
|
|
|
@ -16,6 +16,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/databricks/cli/libs/auth/cache"
|
||||
"github.com/databricks/cli/libs/databrickscfg"
|
||||
"github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/retries"
|
||||
"github.com/pkg/browser"
|
||||
"golang.org/x/oauth2"
|
||||
|
@ -95,6 +97,16 @@ func (a *PersistentAuth) Load(ctx context.Context) (*oauth2.Token, error) {
|
|||
return refreshed, nil
|
||||
}
|
||||
|
||||
func (a *PersistentAuth) profileName() string {
|
||||
// TODO: get profile name from interactive input
|
||||
if a.AccountID != "" {
|
||||
return fmt.Sprintf("ACCOUNT-%s", a.AccountID)
|
||||
}
|
||||
host := strings.TrimPrefix(a.Host, "https://")
|
||||
split := strings.Split(host, ".")
|
||||
return split[0]
|
||||
}
|
||||
|
||||
func (a *PersistentAuth) Challenge(ctx context.Context) error {
|
||||
err := a.init(ctx)
|
||||
if err != nil {
|
||||
|
@ -120,7 +132,12 @@ func (a *PersistentAuth) Challenge(ctx context.Context) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("store: %w", err)
|
||||
}
|
||||
return nil
|
||||
return databrickscfg.SaveToProfile(ctx, &config.Config{
|
||||
Host: a.Host,
|
||||
AccountID: a.AccountID,
|
||||
AuthType: "databricks-cli",
|
||||
Profile: a.profileName(),
|
||||
})
|
||||
}
|
||||
|
||||
func (a *PersistentAuth) init(ctx context.Context) error {
|
||||
|
|
|
@ -66,14 +66,20 @@ func (c *cmdIO) IsTTY() bool {
|
|||
return isatty.IsTerminal(fd) || isatty.IsCygwinTerminal(fd)
|
||||
}
|
||||
|
||||
func (c *cmdIO) Render(v any) error {
|
||||
func Render(ctx context.Context, v any) error {
|
||||
c := fromContext(ctx)
|
||||
return RenderWithTemplate(ctx, v, c.template)
|
||||
}
|
||||
|
||||
func RenderWithTemplate(ctx context.Context, v any, template string) error {
|
||||
// TODO: add terminal width & white/dark theme detection
|
||||
c := fromContext(ctx)
|
||||
switch c.outputFormat {
|
||||
case flags.OutputJSON:
|
||||
return renderJson(c.out, v)
|
||||
case flags.OutputText:
|
||||
if c.template != "" {
|
||||
return renderTemplate(c.out, c.template, v)
|
||||
if template != "" {
|
||||
return renderTemplate(c.out, template, v)
|
||||
}
|
||||
return renderJson(c.out, v)
|
||||
default:
|
||||
|
@ -81,11 +87,6 @@ func (c *cmdIO) Render(v any) error {
|
|||
}
|
||||
}
|
||||
|
||||
func Render(ctx context.Context, v any) error {
|
||||
c := fromContext(ctx)
|
||||
return c.Render(v)
|
||||
}
|
||||
|
||||
type tuple struct{ Name, Id string }
|
||||
|
||||
func (c *cmdIO) Select(names map[string]string, label string) (id string, err error) {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/nwidger/jsoncolor"
|
||||
|
@ -58,8 +59,7 @@ func renderTemplate(w io.Writer, tmpl string, v any) error {
|
|||
// we render colored output if stdout is TTY, otherwise we render text.
|
||||
// in the future we'll check if we can explicitly check for stderr being
|
||||
// a TTY
|
||||
"black": color.BlackString,
|
||||
"white": color.WhiteString,
|
||||
"header": color.BlueString,
|
||||
"red": color.RedString,
|
||||
"green": color.GreenString,
|
||||
"blue": color.BlueString,
|
||||
|
@ -86,6 +86,9 @@ func renderTemplate(w io.Writer, tmpl string, v any) error {
|
|||
}
|
||||
return string(b), nil
|
||||
},
|
||||
"pretty_date": func(t time.Time) string {
|
||||
return t.Format("2006-01-02T15:04:05Z")
|
||||
},
|
||||
}).Parse(tmpl)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -107,8 +110,9 @@ func fancyJSON(v any) ([]byte, error) {
|
|||
f.FalseColor = color.New(color.FgRed)
|
||||
f.NumberColor = color.New(color.FgCyan)
|
||||
f.NullColor = color.New(color.FgMagenta)
|
||||
f.FieldColor = color.New(color.FgWhite, color.Bold)
|
||||
f.FieldQuoteColor = color.New(color.FgWhite)
|
||||
f.ObjectColor = color.New(color.Reset)
|
||||
f.CommaColor = color.New(color.Reset)
|
||||
f.ColonColor = color.New(color.Reset)
|
||||
|
||||
return jsoncolor.MarshalIndentWithFormatter(v, "", " ", f)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package databrickscfg
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -13,6 +14,43 @@ import (
|
|||
|
||||
var ResolveProfileFromHost = profileFromHostLoader{}
|
||||
|
||||
var errNoMatchingProfiles = errors.New("no matching config profiles found")
|
||||
|
||||
type errMultipleProfiles []string
|
||||
|
||||
func (e errMultipleProfiles) Error() string {
|
||||
return fmt.Sprintf("multiple profiles matched: %s", strings.Join(e, ", "))
|
||||
}
|
||||
|
||||
func findMatchingProfile(configFile *config.File, matcher func(*ini.Section) bool) (*ini.Section, error) {
|
||||
// Look for sections in the configuration file that match the configured host.
|
||||
var matching []*ini.Section
|
||||
for _, section := range configFile.Sections() {
|
||||
if !matcher(section) {
|
||||
continue
|
||||
}
|
||||
matching = append(matching, section)
|
||||
}
|
||||
|
||||
// If there are no matching sections, we don't do anything.
|
||||
if len(matching) == 0 {
|
||||
return nil, errNoMatchingProfiles
|
||||
}
|
||||
|
||||
// If there are multiple matching sections, let the user know it is impossible
|
||||
// to unambiguously select a profile to use.
|
||||
if len(matching) > 1 {
|
||||
var names errMultipleProfiles
|
||||
for _, section := range matching {
|
||||
names = append(names, section.Name())
|
||||
}
|
||||
|
||||
return nil, names
|
||||
}
|
||||
|
||||
return matching[0], nil
|
||||
}
|
||||
|
||||
type profileFromHostLoader struct{}
|
||||
|
||||
func (l profileFromHostLoader) Name() string {
|
||||
|
@ -27,6 +65,7 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
configFile, err := config.LoadFile(cfg.ConfigFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
|
@ -34,56 +73,37 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error {
|
|||
}
|
||||
return fmt.Errorf("cannot parse config file: %w", err)
|
||||
}
|
||||
|
||||
// Normalized version of the configured host.
|
||||
host := normalizeHost(cfg.Host)
|
||||
|
||||
// Look for sections in the configuration file that match the configured host.
|
||||
var matching []*ini.Section
|
||||
for _, section := range configFile.Sections() {
|
||||
key, err := section.GetKey("host")
|
||||
match, err := findMatchingProfile(configFile, func(s *ini.Section) bool {
|
||||
key, err := s.GetKey("host")
|
||||
if err != nil {
|
||||
log.Tracef(context.Background(), "section %s: %s", section.Name(), err)
|
||||
continue
|
||||
log.Tracef(ctx, "section %s: %s", s.Name(), err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore this section if the normalized host doesn't match.
|
||||
if normalizeHost(key.Value()) != host {
|
||||
continue
|
||||
}
|
||||
|
||||
matching = append(matching, section)
|
||||
}
|
||||
|
||||
// If there are no matching sections, we don't do anything.
|
||||
if len(matching) == 0 {
|
||||
// Check if this section matches the normalized host
|
||||
return normalizeHost(key.Value()) == host
|
||||
})
|
||||
if err == errNoMatchingProfiles {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If there are multiple matching sections, let the user know it is impossible
|
||||
// to unambiguously select a profile to use.
|
||||
if len(matching) > 1 {
|
||||
var names []string
|
||||
for _, section := range matching {
|
||||
names = append(names, section.Name())
|
||||
}
|
||||
|
||||
if err, ok := err.(errMultipleProfiles); ok {
|
||||
return fmt.Errorf(
|
||||
"multiple profiles for host %s (%s): please set DATABRICKS_CONFIG_PROFILE to specify one",
|
||||
host,
|
||||
strings.Join(names, ", "),
|
||||
)
|
||||
"%s: %w: please set DATABRICKS_CONFIG_PROFILE to specify one",
|
||||
host, err)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
match := matching[0]
|
||||
log.Debugf(context.Background(), "Loading profile %s because of host match", match.Name())
|
||||
log.Debugf(ctx, "Loading profile %s because of host match", match.Name())
|
||||
err = config.ConfigAttributes.ResolveFromStringMap(cfg, match.KeysHash())
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s %s profile: %w", configFile.Path(), match.Name(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (l profileFromHostLoader) isAnyAuthConfigured(cfg *config.Config) bool {
|
||||
|
|
|
@ -126,5 +126,5 @@ func TestLoaderErrorsOnMultipleMatches(t *testing.T) {
|
|||
|
||||
err := cfg.EnsureResolved()
|
||||
assert.Error(t, err)
|
||||
assert.ErrorContains(t, err, "multiple profiles for host https://foo (foo1, foo2): ")
|
||||
assert.ErrorContains(t, err, "https://foo: multiple profiles matched: foo1, foo2")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
package databrickscfg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go/config"
|
||||
"gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
const fileMode = 0o600
|
||||
|
||||
func loadOrCreateConfigFile(filename string) (*config.File, error) {
|
||||
if filename == "" {
|
||||
filename = "~/.databrickscfg"
|
||||
}
|
||||
// Expand ~ to home directory, as we need a deterministic name for os.OpenFile
|
||||
// to work in the cases when ~/.databrickscfg does not exist yet
|
||||
if strings.HasPrefix(filename, "~") {
|
||||
homedir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot find homedir: %w", err)
|
||||
}
|
||||
filename = fmt.Sprintf("%s%s", homedir, filename[1:])
|
||||
}
|
||||
configFile, err := config.LoadFile(filename)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create %s: %w", filename, err)
|
||||
}
|
||||
defer file.Close()
|
||||
configFile, err = config.LoadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load created %s: %w", filename, err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("parse %s: %w", filename, err)
|
||||
}
|
||||
return configFile, nil
|
||||
}
|
||||
|
||||
func matchOrCreateSection(ctx context.Context, configFile *config.File, cfg *config.Config) (*ini.Section, error) {
|
||||
section, err := findMatchingProfile(configFile, func(s *ini.Section) bool {
|
||||
if cfg.Profile == s.Name() {
|
||||
return true
|
||||
}
|
||||
raw := s.KeysHash()
|
||||
if cfg.AccountID != "" {
|
||||
// here we rely on map zerovals for matching with accounts:
|
||||
// if profile has no account id, the raw["account_id"] will be empty
|
||||
return cfg.AccountID == raw["account_id"]
|
||||
}
|
||||
if cfg.Host == "" {
|
||||
return false
|
||||
}
|
||||
host, ok := raw["host"]
|
||||
if !ok {
|
||||
log.Tracef(ctx, "section %s: no host", s.Name())
|
||||
return false
|
||||
}
|
||||
// Check if this section matches the normalized host
|
||||
return normalizeHost(host) == normalizeHost(cfg.Host)
|
||||
})
|
||||
if err == errNoMatchingProfiles {
|
||||
section, err = configFile.NewSection(cfg.Profile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create new profile: %w", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return section, nil
|
||||
}
|
||||
|
||||
func SaveToProfile(ctx context.Context, cfg *config.Config) error {
|
||||
configFile, err := loadOrCreateConfigFile(cfg.ConfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
section, err := matchOrCreateSection(ctx, configFile, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// zeroval profile name before adding it to a section
|
||||
cfg.Profile = ""
|
||||
cfg.ConfigFile = ""
|
||||
|
||||
// clear old keys in case we're overriding the section
|
||||
for _, oldKey := range section.KeyStrings() {
|
||||
section.DeleteKey(oldKey)
|
||||
}
|
||||
|
||||
for _, attr := range config.ConfigAttributes {
|
||||
if attr.IsZero(cfg) {
|
||||
continue
|
||||
}
|
||||
key := section.Key(attr.Name)
|
||||
key.SetValue(attr.GetString(cfg))
|
||||
}
|
||||
|
||||
orig, backupErr := os.ReadFile(configFile.Path())
|
||||
if len(orig) > 0 && backupErr == nil {
|
||||
log.Infof(ctx, "Backing up in %s.bak", configFile.Path())
|
||||
err = os.WriteFile(configFile.Path()+".bak", orig, fileMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("backup: %w", err)
|
||||
}
|
||||
log.Infof(ctx, "Overwriting %s", configFile.Path())
|
||||
} else if backupErr != nil {
|
||||
log.Warnf(ctx, "Failed to backup %s: %v. Proceeding to save",
|
||||
configFile.Path(), backupErr)
|
||||
} else {
|
||||
log.Infof(ctx, "Saving %s", configFile.Path())
|
||||
}
|
||||
return configFile.SaveTo(configFile.Path())
|
||||
}
|
|
@ -0,0 +1,192 @@
|
|||
package databrickscfg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLoadOrCreate(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
path := filepath.Join(dir, "databrickscfg")
|
||||
file, err := loadOrCreateConfigFile(path)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, file)
|
||||
assert.FileExists(t, path)
|
||||
}
|
||||
|
||||
func TestLoadOrCreate_NotAllowed(t *testing.T) {
|
||||
path := "/dev/databrickscfg"
|
||||
file, err := loadOrCreateConfigFile(path)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, file)
|
||||
assert.NoFileExists(t, path)
|
||||
}
|
||||
|
||||
func TestLoadOrCreate_Bad(t *testing.T) {
|
||||
path := "testdata/badcfg"
|
||||
file, err := loadOrCreateConfigFile(path)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, file)
|
||||
}
|
||||
|
||||
func TestMatchOrCreateSection_Direct(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Profile: "query",
|
||||
}
|
||||
file, err := loadOrCreateConfigFile("testdata/databrickscfg")
|
||||
assert.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
section, err := matchOrCreateSection(ctx, file, cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, section)
|
||||
assert.Equal(t, "query", section.Name())
|
||||
}
|
||||
|
||||
func TestMatchOrCreateSection_AccountID(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
AccountID: "abc",
|
||||
}
|
||||
file, err := loadOrCreateConfigFile("testdata/databrickscfg")
|
||||
assert.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
section, err := matchOrCreateSection(ctx, file, cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, section)
|
||||
assert.Equal(t, "acc", section.Name())
|
||||
}
|
||||
|
||||
func TestMatchOrCreateSection_NormalizeHost(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Host: "https://query/?o=abracadabra",
|
||||
}
|
||||
file, err := loadOrCreateConfigFile("testdata/databrickscfg")
|
||||
assert.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
section, err := matchOrCreateSection(ctx, file, cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, section)
|
||||
assert.Equal(t, "query", section.Name())
|
||||
}
|
||||
|
||||
func TestMatchOrCreateSection_NoProfileOrHost(t *testing.T) {
|
||||
cfg := &config.Config{}
|
||||
file, err := loadOrCreateConfigFile("testdata/databrickscfg")
|
||||
assert.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = matchOrCreateSection(ctx, file, cfg)
|
||||
assert.EqualError(t, err, "cannot create new profile: empty section name")
|
||||
}
|
||||
|
||||
func TestMatchOrCreateSection_MultipleProfiles(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Host: "https://foo",
|
||||
}
|
||||
file, err := loadOrCreateConfigFile("testdata/databrickscfg")
|
||||
assert.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = matchOrCreateSection(ctx, file, cfg)
|
||||
assert.EqualError(t, err, "multiple profiles matched: foo1, foo2")
|
||||
}
|
||||
|
||||
func TestMatchOrCreateSection_NewProfile(t *testing.T) {
|
||||
cfg := &config.Config{
|
||||
Host: "https://bar",
|
||||
Profile: "delirium",
|
||||
}
|
||||
file, err := loadOrCreateConfigFile("testdata/databrickscfg")
|
||||
assert.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
section, err := matchOrCreateSection(ctx, file, cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, section)
|
||||
assert.Equal(t, "delirium", section.Name())
|
||||
}
|
||||
|
||||
func TestSaveToProfile_ErrorOnLoad(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
err := SaveToProfile(ctx, &config.Config{
|
||||
ConfigFile: "testdata/badcfg",
|
||||
})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSaveToProfile_ErrorOnMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
err := SaveToProfile(ctx, &config.Config{
|
||||
Host: "https://foo",
|
||||
})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSaveToProfile_NewFile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
path := filepath.Join(t.TempDir(), "databrickscfg")
|
||||
|
||||
err := SaveToProfile(ctx, &config.Config{
|
||||
ConfigFile: path,
|
||||
Profile: "abc",
|
||||
Host: "https://foo",
|
||||
Token: "xyz",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.NoFileExists(t, path+".bak")
|
||||
}
|
||||
|
||||
func TestSaveToProfile_ClearingPreviousProfile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
path := filepath.Join(t.TempDir(), "databrickscfg")
|
||||
|
||||
err := SaveToProfile(ctx, &config.Config{
|
||||
ConfigFile: path,
|
||||
Profile: "abc",
|
||||
Host: "https://foo",
|
||||
Token: "xyz",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = SaveToProfile(ctx, &config.Config{
|
||||
ConfigFile: path,
|
||||
Profile: "bcd",
|
||||
Host: "https://bar",
|
||||
Token: "zyx",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.FileExists(t, path+".bak")
|
||||
|
||||
err = SaveToProfile(ctx, &config.Config{
|
||||
ConfigFile: path,
|
||||
Host: "https://foo",
|
||||
AuthType: "databricks-cli",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
file, err := loadOrCreateConfigFile(path)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, file.Sections(), 3)
|
||||
assert.True(t, file.HasSection("DEFAULT"))
|
||||
assert.True(t, file.HasSection("bcd"))
|
||||
assert.True(t, file.HasSection("bcd"))
|
||||
|
||||
dlft, err := file.GetSection("DEFAULT")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dlft.KeysHash(), 0)
|
||||
|
||||
abc, err := file.GetSection("abc")
|
||||
assert.NoError(t, err)
|
||||
raw := abc.KeysHash()
|
||||
assert.Len(t, raw, 2)
|
||||
assert.Equal(t, "https://foo", raw["host"])
|
||||
assert.Equal(t, "databricks-cli", raw["auth_type"])
|
||||
}
|
|
@ -14,6 +14,10 @@ token = query
|
|||
host = https://foo
|
||||
token = foo1
|
||||
|
||||
[acc]
|
||||
host = https://accounts.cloud.databricks.com
|
||||
account_id = abc
|
||||
|
||||
# Duplicate entry for https://foo
|
||||
[foo2]
|
||||
host = https://foo
|
||||
|
|
|
@ -222,6 +222,10 @@ func (w *DbfsClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, e
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if len(res.Files) == 1 && res.Files[0].Path == absPath {
|
||||
return nil, NotADirectory{absPath}
|
||||
}
|
||||
|
||||
info := make([]fs.DirEntry, len(res.Files))
|
||||
for i, v := range res.Files {
|
||||
info[i] = dbfsDirEntry{dbfsFileInfo: dbfsFileInfo{fi: v}}
|
||||
|
|
|
@ -51,6 +51,18 @@ func (err NoSuchDirectoryError) Is(other error) bool {
|
|||
return other == fs.ErrNotExist
|
||||
}
|
||||
|
||||
type NotADirectory struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func (err NotADirectory) Error() string {
|
||||
return fmt.Sprintf("not a directory: %s", err.path)
|
||||
}
|
||||
|
||||
func (err NotADirectory) Is(other error) bool {
|
||||
return other == fs.ErrInvalid
|
||||
}
|
||||
|
||||
// Filer is used to access files in a workspace.
|
||||
// It has implementations for accessing files in WSFS and in DBFS.
|
||||
type Filer interface {
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
package filer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
// filerFS implements the fs.FS interface for a filer.
|
||||
type filerFS struct {
|
||||
ctx context.Context
|
||||
filer Filer
|
||||
}
|
||||
|
||||
// NewFS returns an fs.FS backed by a filer.
|
||||
func NewFS(ctx context.Context, filer Filer) fs.FS {
|
||||
return &filerFS{ctx: ctx, filer: filer}
|
||||
}
|
||||
|
||||
func (fs *filerFS) Open(name string) (fs.File, error) {
|
||||
stat, err := fs.filer.Stat(fs.ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if stat.IsDir() {
|
||||
return &fsDir{fs: fs, name: name, stat: stat}, nil
|
||||
}
|
||||
|
||||
return &fsFile{fs: fs, name: name, stat: stat}, nil
|
||||
}
|
||||
|
||||
func (fs *filerFS) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||
return fs.filer.ReadDir(fs.ctx, name)
|
||||
}
|
||||
|
||||
func (fs *filerFS) ReadFile(name string) ([]byte, error) {
|
||||
reader, err := fs.filer.Read(fs.ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (fs *filerFS) Stat(name string) (fs.FileInfo, error) {
|
||||
return fs.filer.Stat(fs.ctx, name)
|
||||
}
|
||||
|
||||
// Type that implements fs.File for a filer-backed fs.FS.
|
||||
type fsFile struct {
|
||||
fs *filerFS
|
||||
name string
|
||||
stat fs.FileInfo
|
||||
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (f *fsFile) Stat() (fs.FileInfo, error) {
|
||||
return f.stat, nil
|
||||
}
|
||||
|
||||
func (f *fsFile) Read(buf []byte) (int, error) {
|
||||
if f.reader == nil {
|
||||
reader, err := f.fs.filer.Read(f.fs.ctx, f.name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.reader = reader
|
||||
}
|
||||
|
||||
return f.reader.Read(buf)
|
||||
}
|
||||
|
||||
func (f *fsFile) Close() error {
|
||||
if f.reader == nil {
|
||||
return fs.ErrClosed
|
||||
}
|
||||
f.reader = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type that implements fs.ReadDirFile for a filer-backed fs.FS.
|
||||
type fsDir struct {
|
||||
fs *filerFS
|
||||
name string
|
||||
stat fs.FileInfo
|
||||
|
||||
open bool
|
||||
entries []fs.DirEntry
|
||||
}
|
||||
|
||||
func (f *fsDir) Stat() (fs.FileInfo, error) {
|
||||
return f.stat, nil
|
||||
}
|
||||
|
||||
func (f *fsDir) Read(buf []byte) (int, error) {
|
||||
return 0, fs.ErrInvalid
|
||||
}
|
||||
|
||||
func (f *fsDir) ReadDir(n int) ([]fs.DirEntry, error) {
|
||||
// Load all directory entries if not already loaded.
|
||||
if !f.open {
|
||||
entries, err := f.fs.ReadDir(f.name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.open = true
|
||||
f.entries = entries
|
||||
}
|
||||
|
||||
// Return all entries if n <= 0.
|
||||
if n <= 0 {
|
||||
entries := f.entries
|
||||
f.entries = nil
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// If there are no more entries, return io.EOF.
|
||||
if len(f.entries) == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
// If there are less than n entries, return all entries.
|
||||
if len(f.entries) < n {
|
||||
entries := f.entries
|
||||
f.entries = nil
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return n entries.
|
||||
entries := f.entries[:n]
|
||||
f.entries = f.entries[n:]
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *fsDir) Close() error {
|
||||
if !f.open {
|
||||
return fs.ErrClosed
|
||||
}
|
||||
f.open = false
|
||||
f.entries = nil
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,288 @@
|
|||
package filer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type fakeDirEntry struct {
|
||||
fakeFileInfo
|
||||
}
|
||||
|
||||
func (entry fakeDirEntry) Type() fs.FileMode {
|
||||
typ := fs.ModePerm
|
||||
if entry.dir {
|
||||
typ |= fs.ModeDir
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
func (entry fakeDirEntry) Info() (fs.FileInfo, error) {
|
||||
return entry.fakeFileInfo, nil
|
||||
}
|
||||
|
||||
type fakeFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
dir bool
|
||||
mode fs.FileMode
|
||||
}
|
||||
|
||||
func (info fakeFileInfo) Name() string {
|
||||
return info.name
|
||||
}
|
||||
|
||||
func (info fakeFileInfo) Size() int64 {
|
||||
return info.size
|
||||
}
|
||||
|
||||
func (info fakeFileInfo) Mode() fs.FileMode {
|
||||
return info.mode
|
||||
}
|
||||
|
||||
func (info fakeFileInfo) ModTime() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (info fakeFileInfo) IsDir() bool {
|
||||
return info.dir
|
||||
}
|
||||
|
||||
func (info fakeFileInfo) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeFiler struct {
|
||||
entries map[string]fakeFileInfo
|
||||
}
|
||||
|
||||
func (f *fakeFiler) Write(ctx context.Context, p string, reader io.Reader, mode ...WriteMode) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (f *fakeFiler) Read(ctx context.Context, p string) (io.Reader, error) {
|
||||
_, ok := f.entries[p]
|
||||
if !ok {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
|
||||
return strings.NewReader("foo"), nil
|
||||
}
|
||||
|
||||
func (f *fakeFiler) Delete(ctx context.Context, p string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (f *fakeFiler) ReadDir(ctx context.Context, p string) ([]fs.DirEntry, error) {
|
||||
entry, ok := f.entries[p]
|
||||
if !ok {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
|
||||
if !entry.dir {
|
||||
return nil, fs.ErrInvalid
|
||||
}
|
||||
|
||||
// Find all entries contained in the specified directory `p`.
|
||||
var out []fs.DirEntry
|
||||
for k, v := range f.entries {
|
||||
if k == p || path.Dir(k) != p {
|
||||
continue
|
||||
}
|
||||
|
||||
out = append(out, fakeDirEntry{v})
|
||||
}
|
||||
|
||||
sort.Slice(out, func(i, j int) bool { return out[i].Name() < out[j].Name() })
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (f *fakeFiler) Mkdir(ctx context.Context, path string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (f *fakeFiler) Stat(ctx context.Context, path string) (fs.FileInfo, error) {
|
||||
entry, ok := f.entries[path]
|
||||
if !ok {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func TestFsImplementsFS(t *testing.T) {
|
||||
var _ fs.FS = &filerFS{}
|
||||
}
|
||||
|
||||
func TestFsImplementsReadDirFS(t *testing.T) {
|
||||
var _ fs.ReadDirFS = &filerFS{}
|
||||
}
|
||||
|
||||
func TestFsImplementsReadFileFS(t *testing.T) {
|
||||
var _ fs.ReadDirFS = &filerFS{}
|
||||
}
|
||||
|
||||
func TestFsImplementsStatFS(t *testing.T) {
|
||||
var _ fs.StatFS = &filerFS{}
|
||||
}
|
||||
|
||||
func TestFsFileImplementsFsFile(t *testing.T) {
|
||||
var _ fs.File = &fsFile{}
|
||||
}
|
||||
|
||||
func TestFsDirImplementsFsReadDirFile(t *testing.T) {
|
||||
var _ fs.ReadDirFile = &fsDir{}
|
||||
}
|
||||
|
||||
func fakeFS() fs.FS {
|
||||
fakeFiler := &fakeFiler{
|
||||
entries: map[string]fakeFileInfo{
|
||||
".": {name: "root", dir: true},
|
||||
"dirA": {dir: true},
|
||||
"dirB": {dir: true},
|
||||
"fileA": {size: 3},
|
||||
},
|
||||
}
|
||||
|
||||
for k, v := range fakeFiler.entries {
|
||||
if v.name != "" {
|
||||
continue
|
||||
}
|
||||
v.name = path.Base(k)
|
||||
fakeFiler.entries[k] = v
|
||||
}
|
||||
|
||||
return NewFS(context.Background(), fakeFiler)
|
||||
}
|
||||
|
||||
func TestFsGlob(t *testing.T) {
|
||||
fakeFS := fakeFS()
|
||||
matches, err := fs.Glob(fakeFS, "*")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"dirA", "dirB", "fileA"}, matches)
|
||||
}
|
||||
|
||||
func TestFsOpenFile(t *testing.T) {
|
||||
fakeFS := fakeFS()
|
||||
fakeFile, err := fakeFS.Open("fileA")
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := fakeFile.Stat()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "fileA", info.Name())
|
||||
assert.Equal(t, int64(3), info.Size())
|
||||
assert.Equal(t, fs.FileMode(0), info.Mode())
|
||||
assert.Equal(t, false, info.IsDir())
|
||||
|
||||
// Read until closed.
|
||||
b := make([]byte, 3)
|
||||
n, err := fakeFile.Read(b)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, n)
|
||||
assert.Equal(t, []byte{'f', 'o', 'o'}, b)
|
||||
_, err = fakeFile.Read(b)
|
||||
assert.ErrorIs(t, err, io.EOF)
|
||||
|
||||
// Close.
|
||||
err = fakeFile.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Close again.
|
||||
err = fakeFile.Close()
|
||||
assert.ErrorIs(t, err, fs.ErrClosed)
|
||||
}
|
||||
|
||||
func TestFsOpenDir(t *testing.T) {
|
||||
fakeFS := fakeFS()
|
||||
fakeFile, err := fakeFS.Open(".")
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := fakeFile.Stat()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "root", info.Name())
|
||||
assert.Equal(t, true, info.IsDir())
|
||||
|
||||
de, ok := fakeFile.(fs.ReadDirFile)
|
||||
require.True(t, ok)
|
||||
|
||||
// Read all entries in one shot.
|
||||
reference, err := de.ReadDir(-1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read entries one at a time.
|
||||
{
|
||||
var tmp, entries []fs.DirEntry
|
||||
var err error
|
||||
|
||||
de.Close()
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
tmp, err = de.ReadDir(1)
|
||||
require.NoError(t, err)
|
||||
entries = append(entries, tmp...)
|
||||
}
|
||||
|
||||
_, err = de.ReadDir(1)
|
||||
require.ErrorIs(t, err, io.EOF, err)
|
||||
|
||||
// Compare to reference.
|
||||
assert.Equal(t, reference, entries)
|
||||
}
|
||||
|
||||
// Read entries and overshoot at the end.
|
||||
{
|
||||
var tmp, entries []fs.DirEntry
|
||||
var err error
|
||||
|
||||
de.Close()
|
||||
|
||||
tmp, err = de.ReadDir(1)
|
||||
require.NoError(t, err)
|
||||
entries = append(entries, tmp...)
|
||||
|
||||
tmp, err = de.ReadDir(20)
|
||||
require.NoError(t, err)
|
||||
entries = append(entries, tmp...)
|
||||
|
||||
_, err = de.ReadDir(1)
|
||||
require.ErrorIs(t, err, io.EOF, err)
|
||||
|
||||
// Compare to reference.
|
||||
assert.Equal(t, reference, entries)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFsReadDir(t *testing.T) {
|
||||
fakeFS := fakeFS().(fs.ReadDirFS)
|
||||
entries, err := fakeFS.ReadDir(".")
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, entries, 3)
|
||||
assert.Equal(t, "dirA", entries[0].Name())
|
||||
assert.Equal(t, "dirB", entries[1].Name())
|
||||
assert.Equal(t, "fileA", entries[2].Name())
|
||||
}
|
||||
|
||||
func TestFsReadFile(t *testing.T) {
|
||||
fakeFS := fakeFS().(fs.ReadFileFS)
|
||||
buf, err := fakeFS.ReadFile("fileA")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("foo"), buf)
|
||||
}
|
||||
|
||||
func TestFsStat(t *testing.T) {
|
||||
fakeFS := fakeFS().(fs.StatFS)
|
||||
info, err := fakeFS.Stat("fileA")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "fileA", info.Name())
|
||||
assert.Equal(t, int64(3), info.Size())
|
||||
}
|
|
@ -222,6 +222,11 @@ func (w *WorkspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.D
|
|||
objects, err := w.workspaceClient.Workspace.ListAll(ctx, workspace.ListWorkspaceRequest{
|
||||
Path: absPath,
|
||||
})
|
||||
|
||||
if len(objects) == 1 && objects[0].Path == absPath {
|
||||
return nil, NotADirectory{absPath}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// If we got an API error we deal with it below.
|
||||
var aerr *apierr.APIError
|
||||
|
|
Loading…
Reference in New Issue