From 3270afaff43e4ad7e7998a67060d9ce8539e7e98 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 10 Oct 2024 15:02:25 +0200 Subject: [PATCH 01/25] Move utility functions dealing with IAM to libs/iamutil (#1820) ## Changes The two functions `GetShortUserName` and `IsServicePrincipal` are unrelated to auth or the purpose of the auth package. This change moves them into their own package and updates `IsServicePrincipal` to take an `*iam.User` argument instead of a string username. ## Tests Tests pass. --- bundle/config/mutator/populate_current_user.go | 4 ++-- bundle/config/mutator/process_target_mode.go | 4 ++-- bundle/permissions/permission_report.go | 13 +++++++------ internal/init_test.go | 6 +++--- libs/{auth => iamutil}/service_principal.go | 9 +++++---- libs/{auth => iamutil}/service_principal_test.go | 15 ++++++++++----- libs/{auth => iamutil}/user.go | 4 ++-- libs/{auth => iamutil}/user_test.go | 2 +- libs/template/helpers.go | 6 +++--- 9 files changed, 35 insertions(+), 28 deletions(-) rename libs/{auth => iamutil}/service_principal.go (61%) rename libs/{auth => iamutil}/service_principal_test.go (57%) rename libs/{auth => iamutil}/user.go (87%) rename libs/{auth => iamutil}/user_test.go (99%) diff --git a/bundle/config/mutator/populate_current_user.go b/bundle/config/mutator/populate_current_user.go index 1e99b327c..cab5db1ba 100644 --- a/bundle/config/mutator/populate_current_user.go +++ b/bundle/config/mutator/populate_current_user.go @@ -5,8 +5,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/iamutil" "github.com/databricks/cli/libs/tags" ) @@ -33,7 +33,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag. } b.Config.Workspace.CurrentUser = &config.User{ - ShortName: auth.GetShortUserName(me), + ShortName: iamutil.GetShortUserName(me), User: me, } diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 9944f6ffd..44b53681d 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -6,9 +6,9 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/iamutil" "github.com/databricks/cli/libs/log" ) @@ -174,7 +174,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di transformDevelopmentMode(ctx, b) return diags case config.Production: - isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName) + isPrincipal := iamutil.IsServicePrincipal(b.Config.Workspace.CurrentUser.User) return validateProductionMode(ctx, b, isPrincipal) case "": // No action diff --git a/bundle/permissions/permission_report.go b/bundle/permissions/permission_report.go index a9468edb6..36526eeeb 100644 --- a/bundle/permissions/permission_report.go +++ b/bundle/permissions/permission_report.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/iamutil" "github.com/databricks/cli/libs/log" ) @@ -17,9 +17,10 @@ import ( func ReportPossiblePermissionDenied(ctx context.Context, b *bundle.Bundle, path string) diag.Diagnostics { log.Errorf(ctx, "Failed to update, encountered possible permission error: %v", path) - user := b.Config.Workspace.CurrentUser.UserName - if auth.IsServicePrincipal(user) { - user = b.Config.Workspace.CurrentUser.DisplayName + me := b.Config.Workspace.CurrentUser.User + userName := me.UserName + if iamutil.IsServicePrincipal(me) { + userName = me.DisplayName } canManageBundle, assistance := analyzeBundlePermissions(b) @@ -30,7 +31,7 @@ func ReportPossiblePermissionDenied(ctx context.Context, b *bundle.Bundle, path "%s\n"+ "They may need to redeploy the bundle to apply the new permissions.\n"+ "Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions.", - path, user, assistance), + path, userName, assistance), Severity: diag.Error, ID: diag.PathPermissionDenied, }} @@ -44,7 +45,7 @@ func ReportPossiblePermissionDenied(ctx context.Context, b *bundle.Bundle, path "%s\n"+ "They can redeploy the project to apply the latest set of permissions.\n"+ "Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions.", - path, user, assistance), + path, userName, assistance), Severity: diag.Error, ID: diag.CannotChangePathPermissions, }} diff --git a/internal/init_test.go b/internal/init_test.go index d1a89f7b7..a6241d629 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -12,7 +12,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/internal/testutil" - "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/iamutil" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -126,7 +126,7 @@ func TestAccBundleInitHelpers(t *testing.T) { }{ { funcName: "{{short_name}}", - expected: auth.GetShortUserName(me), + expected: iamutil.GetShortUserName(me), }, { funcName: "{{user_name}}", @@ -138,7 +138,7 @@ func TestAccBundleInitHelpers(t *testing.T) { }, { funcName: "{{is_service_principal}}", - expected: strconv.FormatBool(auth.IsServicePrincipal(me.UserName)), + expected: strconv.FormatBool(iamutil.IsServicePrincipal(me)), }, { funcName: "{{smallest_node_type}}", diff --git a/libs/auth/service_principal.go b/libs/iamutil/service_principal.go similarity index 61% rename from libs/auth/service_principal.go rename to libs/iamutil/service_principal.go index 5f1854e3a..7b65f1f55 100644 --- a/libs/auth/service_principal.go +++ b/libs/iamutil/service_principal.go @@ -1,15 +1,16 @@ -package auth +package iamutil import ( + "github.com/databricks/databricks-sdk-go/service/iam" "github.com/google/uuid" ) -// Determines whether a given user name is a service principal. +// Determines whether a given user is a service principal. // This function uses a heuristic: if the user name is a UUID, then we assume // it's a service principal. Unfortunately, the service principal listing API is too // slow for our purposes. And the "users" and "service principals get" APIs // only allow access by workspace admins. -func IsServicePrincipal(userName string) bool { - _, err := uuid.Parse(userName) +func IsServicePrincipal(user *iam.User) bool { + _, err := uuid.Parse(user.UserName) return err == nil } diff --git a/libs/auth/service_principal_test.go b/libs/iamutil/service_principal_test.go similarity index 57% rename from libs/auth/service_principal_test.go rename to libs/iamutil/service_principal_test.go index 95e8ab5cb..07e076692 100644 --- a/libs/auth/service_principal_test.go +++ b/libs/iamutil/service_principal_test.go @@ -1,19 +1,24 @@ -package auth +package iamutil import ( "testing" + "github.com/databricks/databricks-sdk-go/service/iam" "github.com/stretchr/testify/assert" ) func TestIsServicePrincipal_ValidUUID(t *testing.T) { - userId := "8b948b2e-d2b5-4b9e-8274-11b596f3b652" - isSP := IsServicePrincipal(userId) + user := &iam.User{ + UserName: "8b948b2e-d2b5-4b9e-8274-11b596f3b652", + } + isSP := IsServicePrincipal(user) assert.True(t, isSP, "Expected user ID to be recognized as a service principal") } func TestIsServicePrincipal_InvalidUUID(t *testing.T) { - userId := "invalid" - isSP := IsServicePrincipal(userId) + user := &iam.User{ + UserName: "invalid", + } + isSP := IsServicePrincipal(user) assert.False(t, isSP, "Expected user ID to not be recognized as a service principal") } diff --git a/libs/auth/user.go b/libs/iamutil/user.go similarity index 87% rename from libs/auth/user.go rename to libs/iamutil/user.go index c6aa974f3..53704dab6 100644 --- a/libs/auth/user.go +++ b/libs/iamutil/user.go @@ -1,4 +1,4 @@ -package auth +package iamutil import ( "strings" @@ -12,7 +12,7 @@ import ( // including dots, which are not supported in e.g. experiment names. func GetShortUserName(user *iam.User) string { name := user.UserName - if IsServicePrincipal(user.UserName) && user.DisplayName != "" { + if IsServicePrincipal(user) && user.DisplayName != "" { name = user.DisplayName } local, _, _ := strings.Cut(name, "@") diff --git a/libs/auth/user_test.go b/libs/iamutil/user_test.go similarity index 99% rename from libs/auth/user_test.go rename to libs/iamutil/user_test.go index 24b61464b..8aa863e6e 100644 --- a/libs/auth/user_test.go +++ b/libs/iamutil/user_test.go @@ -1,4 +1,4 @@ -package auth +package iamutil import ( "testing" diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 88c73cc47..d00d75ed0 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -11,7 +11,7 @@ import ( "text/template" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/iamutil" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/iam" @@ -119,7 +119,7 @@ func loadHelpers(ctx context.Context) template.FuncMap { return "", err } } - return auth.GetShortUserName(cachedUser), nil + return iamutil.GetShortUserName(cachedUser), nil }, // Get the default workspace catalog. If there is no default, or if // Unity Catalog is not enabled, return an empty string. @@ -151,7 +151,7 @@ func loadHelpers(ctx context.Context) template.FuncMap { return false, err } } - result := auth.IsServicePrincipal(cachedUser.UserName) + result := iamutil.IsServicePrincipal(cachedUser) cachedIsServicePrincipal = &result return result, nil }, From b9085de53388cffc12c325653f05eeb16c7dc526 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 10 Oct 2024 15:43:21 +0200 Subject: [PATCH 02/25] Remove unused `IS_OWNER` constant (#1823) ## Changes Leftover from #1386. ## Tests All tests pass (indicating it really wasn't used). --- bundle/permissions/mutator.go | 1 - 1 file changed, 1 deletion(-) diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go index 247ffcdac..7787bc048 100644 --- a/bundle/permissions/mutator.go +++ b/bundle/permissions/mutator.go @@ -13,7 +13,6 @@ import ( const CAN_MANAGE = "CAN_MANAGE" const CAN_VIEW = "CAN_VIEW" const CAN_RUN = "CAN_RUN" -const IS_OWNER = "IS_OWNER" var allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} var levelsMap = map[string](map[string]string){ From 845d23ac21b7a945714150ef68d48e6629a8bc6f Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 10 Oct 2024 16:10:16 +0200 Subject: [PATCH 03/25] Fixed typo in converting cluster permissions (#1826) ## Changes Fixed typo in converting cluster permissions --- bundle/deploy/terraform/tfdyn/convert_cluster.go | 2 +- bundle/deploy/terraform/tfdyn/convert_cluster_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster.go b/bundle/deploy/terraform/tfdyn/convert_cluster.go index f25f09ea8..18819c00f 100644 --- a/bundle/deploy/terraform/tfdyn/convert_cluster.go +++ b/bundle/deploy/terraform/tfdyn/convert_cluster.go @@ -40,7 +40,7 @@ func (clusterConverter) Convert(ctx context.Context, key string, vin dyn.Value, // Configure permissions for this resource. if permissions := convertPermissionsResource(ctx, vin); permissions != nil { - permissions.JobId = fmt.Sprintf("${databricks_cluster.%s.id}", key) + permissions.ClusterId = fmt.Sprintf("${databricks_cluster.%s.id}", key) out.Permissions["cluster_"+key] = permissions } diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go index e7d2542fd..e6d2620c6 100644 --- a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go @@ -81,7 +81,7 @@ func TestConvertCluster(t *testing.T) { // Assert equality on the permissions assert.Equal(t, &schema.ResourcePermissions{ - JobId: "${databricks_cluster.my_cluster.id}", + ClusterId: "${databricks_cluster.my_cluster.id}", AccessControl: []schema.ResourcePermissionsAccessControl{ { PermissionLevel: "CAN_RUN", From 08a0d083c3bafb6cd5f87f4d39f0863417c71e2b Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Fri, 11 Oct 2024 14:28:56 +0200 Subject: [PATCH 04/25] Ignore metastore permission error during template generation (#1819) ## Changes This extends the `{{default_catalog}}` helper in templates to ignore any `PERMISSION_DENIED` error. We're still reviewing when exactly this error occurs, but if it does, it should not break templates. We should fall back to assuming there's no default catalog (and no UC) instead. ## Testing I have not been able to reproduce this issue, but there is a customer report about "access denied to clusters that don't have unity catalog enabled" being returned on a non-UC workspace. The error code in this PR corresponds to that message. ## Next steps We'll work together with the UC team to review if this error even makes sense for this API. If that discussion leads to a behavior change in the API we can update the CLI code again. --- libs/template/helpers.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/template/helpers.go b/libs/template/helpers.go index d00d75ed0..f25cbee4f 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -128,8 +128,8 @@ func loadHelpers(ctx context.Context) template.FuncMap { metastore, err := w.Metastores.Current(ctx) if err != nil { var aerr *apierr.APIError - if errors.As(err, &aerr) && aerr.ErrorCode == "METASTORE_DOES_NOT_EXIST" { - // Workspace doesn't have a metastore assigned, ignore error + if errors.As(err, &aerr) && (aerr.ErrorCode == "PERMISSION_DENIED" || aerr.ErrorCode == "METASTORE_DOES_NOT_EXIST") { + // Ignore: access denied or workspace doesn't have a metastore assigned empty_default := "" cachedCatalog = &empty_default return "", nil From f0e2981596488af1af3fbbdbd9572a41bfaae4e8 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 11 Oct 2024 16:39:53 +0200 Subject: [PATCH 05/25] Added JSON input validation for CLI commands (#1771) ## Changes Added JSON input validation for CLI commands. Now when invalid JSON passed as a payload to CLI commands, CLI performs input normalisation and detects if there are any mismatches such as incorrect types, unknown fields and etc. This diagnostic information is printed in standard error output and does not block command execution, so the change is backward compatible. Fixes #1769 #1764 #1625 #1560 ## Tests Added unit tests ``` andrew.nester@HFW9Y94129 ~ % databricks jobs create --json '{"seeti}' Error: error decoding JSON at (inline):1:2: unexpected EOF andrew.nester@HFW9Y94129 ~ % databricks jobs create --json '{"seeti": true}' Warning: unknown field: seeti in (inline):1:9 Error: Job settings must be specified. ``` --------- Co-authored-by: Pieter Noordhuis --- .codegen/service.go.tmpl | 13 +- bundle/render/render_text_output.go | 66 +---- cmd/account/access-control/access-control.go | 12 +- cmd/account/budgets/budgets.go | 24 +- cmd/account/credentials/credentials.go | 12 +- .../csp-enablement-account.go | 12 +- .../custom-app-integration.go | 24 +- .../disable-legacy-features.go | 12 +- .../encryption-keys/encryption-keys.go | 12 +- .../esm-enablement-account.go | 12 +- cmd/account/groups/groups.go | 36 ++- .../ip-access-lists/ip-access-lists.go | 36 ++- cmd/account/log-delivery/log-delivery.go | 24 +- .../metastore-assignments.go | 24 +- cmd/account/metastores/metastores.go | 24 +- .../network-connectivity.go | 24 +- cmd/account/networks/networks.go | 12 +- .../personal-compute/personal-compute.go | 12 +- cmd/account/private-access/private-access.go | 24 +- .../published-app-integration.go | 24 +- .../service-principals/service-principals.go | 36 ++- .../storage-credentials.go | 24 +- cmd/account/storage/storage.go | 12 +- .../usage-dashboards/usage-dashboards.go | 12 +- cmd/account/users/users.go | 36 ++- cmd/account/vpc-endpoints/vpc-endpoints.go | 12 +- .../workspace-assignment.go | 12 +- cmd/account/workspaces/workspaces.go | 24 +- cmd/api/api.go | 6 +- cmd/workspace/alerts-legacy/alerts-legacy.go | 24 +- cmd/workspace/alerts/alerts.go | 24 +- cmd/workspace/apps/apps.go | 60 +++- .../artifact-allowlists.go | 12 +- .../automatic-cluster-update.go | 12 +- cmd/workspace/catalogs/catalogs.go | 24 +- cmd/workspace/clean-rooms/clean-rooms.go | 24 +- .../cluster-policies/cluster-policies.go | 60 +++- cmd/workspace/clusters/clusters.go | 168 ++++++++--- .../compliance-security-profile.go | 12 +- cmd/workspace/connections/connections.go | 24 +- .../consumer-installations.go | 24 +- .../consumer-personalization-requests.go | 12 +- .../credentials-manager.go | 12 +- .../dashboard-widgets/dashboard-widgets.go | 24 +- cmd/workspace/dashboards/dashboards.go | 24 +- .../default-namespace/default-namespace.go | 12 +- .../disable-legacy-access.go | 12 +- .../enhanced-security-monitoring.go | 12 +- cmd/workspace/experiments/experiments.go | 264 +++++++++++++----- .../external-locations/external-locations.go | 24 +- cmd/workspace/functions/functions.go | 24 +- cmd/workspace/genie/genie.go | 24 +- .../git-credentials/git-credentials.go | 24 +- .../global-init-scripts.go | 24 +- cmd/workspace/grants/grants.go | 12 +- cmd/workspace/groups/groups.go | 36 ++- .../instance-pools/instance-pools.go | 60 +++- .../instance-profiles/instance-profiles.go | 36 ++- .../ip-access-lists/ip-access-lists.go | 36 ++- cmd/workspace/jobs/jobs.go | 144 +++++++--- cmd/workspace/lakeview/lakeview.go | 84 ++++-- cmd/workspace/libraries/libraries.go | 24 +- cmd/workspace/metastores/metastores.go | 48 +++- .../model-registry/model-registry.go | 228 +++++++++++---- .../model-versions/model-versions.go | 12 +- .../notification-destinations.go | 24 +- cmd/workspace/online-tables/online-tables.go | 12 +- .../permission-migration.go | 12 +- cmd/workspace/permissions/permissions.go | 24 +- cmd/workspace/pipelines/pipelines.go | 60 +++- .../policy-compliance-for-clusters.go | 12 +- .../policy-compliance-for-jobs.go | 12 +- .../provider-exchange-filters.go | 24 +- .../provider-exchanges/provider-exchanges.go | 36 ++- .../provider-files/provider-files.go | 24 +- .../provider-listings/provider-listings.go | 24 +- .../provider-personalization-requests.go | 12 +- .../provider-provider-analytics-dashboards.go | 12 +- .../provider-providers/provider-providers.go | 24 +- cmd/workspace/providers/providers.go | 24 +- .../quality-monitors/quality-monitors.go | 36 ++- .../queries-legacy/queries-legacy.go | 24 +- cmd/workspace/queries/queries.go | 24 +- .../query-visualizations-legacy.go | 24 +- .../query-visualizations.go | 24 +- cmd/workspace/recipients/recipients.go | 36 ++- .../registered-models/registered-models.go | 36 ++- cmd/workspace/repos/overrides.go | 26 +- cmd/workspace/repos/repos.go | 48 +++- .../restrict-workspace-admins.go | 12 +- cmd/workspace/schemas/schemas.go | 24 +- cmd/workspace/secrets/put_secret.go | 14 +- cmd/workspace/secrets/secrets.go | 60 +++- .../service-principals/service-principals.go | 36 ++- .../serving-endpoints/serving-endpoints.go | 96 +++++-- cmd/workspace/shares/shares.go | 36 ++- .../storage-credentials.go | 36 ++- .../table-constraints/table-constraints.go | 12 +- cmd/workspace/tables/tables.go | 12 +- .../temporary-table-credentials.go | 12 +- .../token-management/token-management.go | 36 ++- cmd/workspace/tokens/tokens.go | 24 +- cmd/workspace/users/users.go | 60 +++- .../vector-search-endpoints.go | 12 +- .../vector-search-indexes.go | 72 +++-- cmd/workspace/volumes/volumes.go | 24 +- cmd/workspace/warehouses/warehouses.go | 60 +++- .../workspace-bindings/workspace-bindings.go | 24 +- .../workspace-conf/workspace-conf.go | 12 +- cmd/workspace/workspace/workspace.go | 60 +++- libs/cmdio/render.go | 201 +++++++++---- libs/dyn/convert/to_typed.go | 17 +- libs/dyn/convert/to_typed_test.go | 17 +- libs/dyn/jsonloader/json.go | 107 +++++++ libs/dyn/jsonloader/json_test.go | 93 ++++++ libs/dyn/jsonloader/locations.go | 53 ++++ libs/flags/json_flag.go | 47 +++- libs/flags/json_flag_test.go | 244 +++++++++++++++- 118 files changed, 3378 insertions(+), 1042 deletions(-) create mode 100644 libs/dyn/jsonloader/json.go create mode 100644 libs/dyn/jsonloader/json_test.go create mode 100644 libs/dyn/jsonloader/locations.go diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 281dfd6eb..b489a0b0a 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -5,6 +5,7 @@ package {{(.TrimPrefix "account").SnakeName}} import ( "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/cmd/root" "github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}" "github.com/spf13/cobra" @@ -231,9 +232,15 @@ func new{{.PascalName}}() *cobra.Command { {{- if .Request }} {{ if .CanUseJson }} if cmd.Flags().Changed("json") { - err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req) - if err != nil { - return err + diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } }{{end}}{{ if .MustUseJson }}else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go index 56387c386..3e52d5f16 100644 --- a/bundle/render/render_text_output.go +++ b/bundle/render/render_text_output.go @@ -8,6 +8,7 @@ import ( "text/template" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/fatih/color" @@ -28,48 +29,6 @@ var renderFuncMap = template.FuncMap{ }, } -const errorTemplate = `{{ "Error" | red }}: {{ .Summary }} -{{- range $index, $element := .Paths }} - {{ if eq $index 0 }}at {{else}} {{ end}}{{ $element.String | green }} -{{- end }} -{{- range $index, $element := .Locations }} - {{ if eq $index 0 }}in {{else}} {{ end}}{{ $element.String | cyan }} -{{- end }} -{{- if .Detail }} - -{{ .Detail }} -{{- end }} - -` - -const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }} -{{- range $index, $element := .Paths }} - {{ if eq $index 0 }}at {{else}} {{ end}}{{ $element.String | green }} -{{- end }} -{{- range $index, $element := .Locations }} - {{ if eq $index 0 }}in {{else}} {{ end}}{{ $element.String | cyan }} -{{- end }} -{{- if .Detail }} - -{{ .Detail }} -{{- end }} - -` - -const recommendationTemplate = `{{ "Recommendation" | blue }}: {{ .Summary }} -{{- range $index, $element := .Paths }} - {{ if eq $index 0 }}at {{else}} {{ end}}{{ $element.String | green }} -{{- end }} -{{- range $index, $element := .Locations }} - {{ if eq $index 0 }}in {{else}} {{ end}}{{ $element.String | cyan }} -{{- end }} -{{- if .Detail }} - -{{ .Detail }} -{{- end }} - -` - const summaryTemplate = `{{- if .Name -}} Name: {{ .Name | bold }} {{- if .Target }} @@ -153,22 +112,7 @@ func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnosti } func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { - errorT := template.Must(template.New("error").Funcs(renderFuncMap).Parse(errorTemplate)) - warningT := template.Must(template.New("warning").Funcs(renderFuncMap).Parse(warningTemplate)) - recommendationT := template.Must(template.New("recommendation").Funcs(renderFuncMap).Parse(recommendationTemplate)) - - // Print errors and warnings. for _, d := range diags { - var t *template.Template - switch d.Severity { - case diag.Error: - t = errorT - case diag.Warning: - t = warningT - case diag.Recommendation: - t = recommendationT - } - for i := range d.Locations { if b == nil { break @@ -183,15 +127,9 @@ func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) } } } - - // Render the diagnostic with the appropriate template. - err := t.Execute(out, d) - if err != nil { - return fmt.Errorf("failed to render template: %w", err) - } } - return nil + return cmdio.RenderDiagnostics(out, diags) } // RenderOptions contains options for rendering diagnostics. diff --git a/cmd/account/access-control/access-control.go b/cmd/account/access-control/access-control.go index f6761a1b4..07a7fce9b 100755 --- a/cmd/account/access-control/access-control.go +++ b/cmd/account/access-control/access-control.go @@ -205,9 +205,15 @@ func newUpdateRuleSet() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateRuleSetJson.Unmarshal(&updateRuleSetReq) - if err != nil { - return err + diags := updateRuleSetJson.Unmarshal(&updateRuleSetReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/account/budgets/budgets.go b/cmd/account/budgets/budgets.go index 6b47bb32c..87ed41d57 100755 --- a/cmd/account/budgets/budgets.go +++ b/cmd/account/budgets/budgets.go @@ -78,9 +78,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -316,9 +322,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/account/credentials/credentials.go b/cmd/account/credentials/credentials.go index ed071cda3..4ab70ef00 100755 --- a/cmd/account/credentials/credentials.go +++ b/cmd/account/credentials/credentials.go @@ -90,9 +90,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/account/csp-enablement-account/csp-enablement-account.go b/cmd/account/csp-enablement-account/csp-enablement-account.go index d6fce9537..9fb94a616 100755 --- a/cmd/account/csp-enablement-account/csp-enablement-account.go +++ b/cmd/account/csp-enablement-account/csp-enablement-account.go @@ -129,9 +129,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index 5cdf422d7..9d16a44d4 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -88,9 +88,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -320,9 +326,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.IntegrationId = args[0] diff --git a/cmd/account/disable-legacy-features/disable-legacy-features.go b/cmd/account/disable-legacy-features/disable-legacy-features.go index 6d25b943d..5e732b87b 100755 --- a/cmd/account/disable-legacy-features/disable-legacy-features.go +++ b/cmd/account/disable-legacy-features/disable-legacy-features.go @@ -185,9 +185,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/account/encryption-keys/encryption-keys.go b/cmd/account/encryption-keys/encryption-keys.go index 44545ccfa..3f7082cd7 100755 --- a/cmd/account/encryption-keys/encryption-keys.go +++ b/cmd/account/encryption-keys/encryption-keys.go @@ -107,9 +107,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/account/esm-enablement-account/esm-enablement-account.go b/cmd/account/esm-enablement-account/esm-enablement-account.go index 71149e5ad..1f820e3a9 100755 --- a/cmd/account/esm-enablement-account/esm-enablement-account.go +++ b/cmd/account/esm-enablement-account/esm-enablement-account.go @@ -127,9 +127,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index a7e1ac430..7c2e83319 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -97,9 +97,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -358,9 +364,15 @@ func newPatch() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = patchJson.Unmarshal(&patchReq) - if err != nil { - return err + diags := patchJson.Unmarshal(&patchReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -446,9 +458,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/account/ip-access-lists/ip-access-lists.go b/cmd/account/ip-access-lists/ip-access-lists.go index 5c6d27dd7..61fa944ab 100755 --- a/cmd/account/ip-access-lists/ip-access-lists.go +++ b/cmd/account/ip-access-lists/ip-access-lists.go @@ -132,9 +132,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -411,9 +417,15 @@ func newReplace() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = replaceJson.Unmarshal(&replaceReq) - if err != nil { - return err + diags := replaceJson.Unmarshal(&replaceReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } replaceReq.IpAccessListId = args[0] @@ -505,9 +517,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index 4584f4d2b..cb083f3ba 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -162,9 +162,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -369,9 +375,15 @@ func newPatchStatus() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = patchStatusJson.Unmarshal(&patchStatusReq) - if err != nil { - return err + diags := patchStatusJson.Unmarshal(&patchStatusReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } patchStatusReq.LogDeliveryConfigurationId = args[0] diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index d7f32ccb9..9d2490754 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -85,9 +85,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } _, err = fmt.Sscan(args[0], &createReq.WorkspaceId) @@ -343,9 +349,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) diff --git a/cmd/account/metastores/metastores.go b/cmd/account/metastores/metastores.go index 7c8e3f2c1..eb5755782 100755 --- a/cmd/account/metastores/metastores.go +++ b/cmd/account/metastores/metastores.go @@ -80,9 +80,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -304,9 +310,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.MetastoreId = args[0] diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index cd8da2905..168de9c1d 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -96,9 +96,15 @@ func newCreateNetworkConnectivityConfiguration() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createNetworkConnectivityConfigurationJson.Unmarshal(&createNetworkConnectivityConfigurationReq) - if err != nil { - return err + diags := createNetworkConnectivityConfigurationJson.Unmarshal(&createNetworkConnectivityConfigurationReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -187,9 +193,15 @@ func newCreatePrivateEndpointRule() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createPrivateEndpointRuleJson.Unmarshal(&createPrivateEndpointRuleReq) - if err != nil { - return err + diags := createPrivateEndpointRuleJson.Unmarshal(&createPrivateEndpointRuleReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } createPrivateEndpointRuleReq.NetworkConnectivityConfigId = args[0] diff --git a/cmd/account/networks/networks.go b/cmd/account/networks/networks.go index 05ef0c815..086d24be8 100755 --- a/cmd/account/networks/networks.go +++ b/cmd/account/networks/networks.go @@ -97,9 +97,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { diff --git a/cmd/account/personal-compute/personal-compute.go b/cmd/account/personal-compute/personal-compute.go index 2a14b0b33..dac7e2e30 100755 --- a/cmd/account/personal-compute/personal-compute.go +++ b/cmd/account/personal-compute/personal-compute.go @@ -189,9 +189,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index d527fa64e..312f6d023 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -109,9 +109,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -411,9 +417,15 @@ func newReplace() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = replaceJson.Unmarshal(&replaceReq) - if err != nil { - return err + diags := replaceJson.Unmarshal(&replaceReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } replaceReq.PrivateAccessSettingsId = args[0] diff --git a/cmd/account/published-app-integration/published-app-integration.go b/cmd/account/published-app-integration/published-app-integration.go index 5143d53cc..c97bcfc61 100755 --- a/cmd/account/published-app-integration/published-app-integration.go +++ b/cmd/account/published-app-integration/published-app-integration.go @@ -85,9 +85,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -315,9 +321,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.IntegrationId = args[0] diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index c86810f1d..f6ec1fa54 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -95,9 +95,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -358,9 +364,15 @@ func newPatch() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = patchJson.Unmarshal(&patchReq) - if err != nil { - return err + diags := patchJson.Unmarshal(&patchReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -448,9 +460,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/account/storage-credentials/storage-credentials.go b/cmd/account/storage-credentials/storage-credentials.go index 4280ae8c3..b3b8ad367 100755 --- a/cmd/account/storage-credentials/storage-credentials.go +++ b/cmd/account/storage-credentials/storage-credentials.go @@ -88,9 +88,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } createReq.MetastoreId = args[0] @@ -340,9 +346,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.MetastoreId = args[0] diff --git a/cmd/account/storage/storage.go b/cmd/account/storage/storage.go index 50460ed0a..24bb6d80b 100755 --- a/cmd/account/storage/storage.go +++ b/cmd/account/storage/storage.go @@ -87,9 +87,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/account/usage-dashboards/usage-dashboards.go b/cmd/account/usage-dashboards/usage-dashboards.go index 8a1c32476..a482b466d 100755 --- a/cmd/account/usage-dashboards/usage-dashboards.go +++ b/cmd/account/usage-dashboards/usage-dashboards.go @@ -80,9 +80,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 289d2972f..10990084f 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -103,9 +103,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -374,9 +380,15 @@ func newPatch() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = patchJson.Unmarshal(&patchReq) - if err != nil { - return err + diags := patchJson.Unmarshal(&patchReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -465,9 +477,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/account/vpc-endpoints/vpc-endpoints.go b/cmd/account/vpc-endpoints/vpc-endpoints.go index e6c6c126a..c1aab0d24 100755 --- a/cmd/account/vpc-endpoints/vpc-endpoints.go +++ b/cmd/account/vpc-endpoints/vpc-endpoints.go @@ -104,9 +104,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index 58468d09f..e09095d37 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -273,9 +273,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 1ec6230b2..82d3d7db9 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -133,9 +133,15 @@ func newCreate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -551,9 +557,15 @@ func newUpdate() *cobra.Command { a := root.AccountClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/api/api.go b/cmd/api/api.go index 03460f717..d33939a52 100644 --- a/cmd/api/api.go +++ b/cmd/api/api.go @@ -42,9 +42,9 @@ func makeCommand(method string) *cobra.Command { var path = args[0] var request any - err := payload.Unmarshal(&request) - if err != nil { - return err + diags := payload.Unmarshal(&request) + if diags.HasError() { + return diags.Error() } cfg := &config.Config{} diff --git a/cmd/workspace/alerts-legacy/alerts-legacy.go b/cmd/workspace/alerts-legacy/alerts-legacy.go index 1046b1124..4f7d5f966 100755 --- a/cmd/workspace/alerts-legacy/alerts-legacy.go +++ b/cmd/workspace/alerts-legacy/alerts-legacy.go @@ -93,9 +93,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -357,9 +363,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/alerts/alerts.go b/cmd/workspace/alerts/alerts.go index cfaa3f55f..fcf18652b 100755 --- a/cmd/workspace/alerts/alerts.go +++ b/cmd/workspace/alerts/alerts.go @@ -85,9 +85,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -354,9 +360,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Id = args[0] diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 780f55945..4cee2f82a 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -113,9 +113,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -267,9 +273,15 @@ func newDeploy() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deployJson.Unmarshal(&deployReq) - if err != nil { - return err + diags := deployJson.Unmarshal(&deployReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } deployReq.AppName = args[0] @@ -702,9 +714,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } setPermissionsReq.AppName = args[0] @@ -936,9 +954,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Name = args[0] @@ -1005,9 +1029,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updatePermissionsReq.AppName = args[0] diff --git a/cmd/workspace/artifact-allowlists/artifact-allowlists.go b/cmd/workspace/artifact-allowlists/artifact-allowlists.go index fc25e3cb8..e66833697 100755 --- a/cmd/workspace/artifact-allowlists/artifact-allowlists.go +++ b/cmd/workspace/artifact-allowlists/artifact-allowlists.go @@ -145,9 +145,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go index 2385195bb..dca88f3d1 100755 --- a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go +++ b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go @@ -127,9 +127,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index a17bb0072..9294c192b 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -105,9 +105,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -363,9 +369,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Name = args[0] diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index 9466c4b94..72560b846 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -85,9 +85,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -344,9 +350,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Name = args[0] diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 830d44ca3..b34dd53db 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -113,9 +113,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -185,9 +191,15 @@ func newDelete() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteJson.Unmarshal(&deleteReq) - if err != nil { - return err + diags := deleteJson.Unmarshal(&deleteReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -284,9 +296,15 @@ func newEdit() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = editJson.Unmarshal(&editReq) - if err != nil { - return err + diags := editJson.Unmarshal(&editReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -630,9 +648,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -711,9 +735,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index b36102d98..0ed454de2 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -134,9 +134,15 @@ func newChangeOwner() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = changeOwnerJson.Unmarshal(&changeOwnerReq) - if err != nil { - return err + diags := changeOwnerJson.Unmarshal(&changeOwnerReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -268,9 +274,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -362,9 +374,15 @@ func newDelete() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteJson.Unmarshal(&deleteReq) - if err != nil { - return err + diags := deleteJson.Unmarshal(&deleteReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -519,9 +537,15 @@ func newEdit() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = editJson.Unmarshal(&editReq) - if err != nil { - return err + diags := editJson.Unmarshal(&editReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -617,9 +641,15 @@ func newEvents() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = eventsJson.Unmarshal(&eventsReq) - if err != nil { - return err + diags := eventsJson.Unmarshal(&eventsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1069,9 +1099,15 @@ func newPermanentDelete() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = permanentDeleteJson.Unmarshal(&permanentDeleteReq) - if err != nil { - return err + diags := permanentDeleteJson.Unmarshal(&permanentDeleteReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1161,9 +1197,15 @@ func newPin() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = pinJson.Unmarshal(&pinReq) - if err != nil { - return err + diags := pinJson.Unmarshal(&pinReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1260,9 +1302,15 @@ func newResize() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = resizeJson.Unmarshal(&resizeReq) - if err != nil { - return err + diags := resizeJson.Unmarshal(&resizeReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1370,9 +1418,15 @@ func newRestart() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = restartJson.Unmarshal(&restartReq) - if err != nil { - return err + diags := restartJson.Unmarshal(&restartReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1464,9 +1518,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -1608,9 +1668,15 @@ func newStart() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = startJson.Unmarshal(&startReq) - if err != nil { - return err + diags := startJson.Unmarshal(&startReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1712,9 +1778,15 @@ func newUnpin() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = unpinJson.Unmarshal(&unpinReq) - if err != nil { - return err + diags := unpinJson.Unmarshal(&unpinReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1824,9 +1896,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1905,9 +1983,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/compliance-security-profile/compliance-security-profile.go b/cmd/workspace/compliance-security-profile/compliance-security-profile.go index a7b45901f..58f3eddad 100755 --- a/cmd/workspace/compliance-security-profile/compliance-security-profile.go +++ b/cmd/workspace/compliance-security-profile/compliance-security-profile.go @@ -130,9 +130,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index f76420fbe..161f5ab41 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -92,9 +92,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -355,9 +361,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/consumer-installations/consumer-installations.go b/cmd/workspace/consumer-installations/consumer-installations.go index 92f61789f..1848cb8fc 100755 --- a/cmd/workspace/consumer-installations/consumer-installations.go +++ b/cmd/workspace/consumer-installations/consumer-installations.go @@ -86,9 +86,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } createReq.ListingId = args[0] @@ -319,9 +325,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go index 8b0af3cc6..6d751c631 100755 --- a/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go +++ b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go @@ -85,9 +85,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/credentials-manager/credentials-manager.go b/cmd/workspace/credentials-manager/credentials-manager.go index 5a40232b3..e29bc0bd4 100755 --- a/cmd/workspace/credentials-manager/credentials-manager.go +++ b/cmd/workspace/credentials-manager/credentials-manager.go @@ -75,9 +75,15 @@ func newExchangeToken() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = exchangeTokenJson.Unmarshal(&exchangeTokenReq) - if err != nil { - return err + diags := exchangeTokenJson.Unmarshal(&exchangeTokenReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/dashboard-widgets/dashboard-widgets.go b/cmd/workspace/dashboard-widgets/dashboard-widgets.go index 02b13739a..e42818266 100755 --- a/cmd/workspace/dashboard-widgets/dashboard-widgets.go +++ b/cmd/workspace/dashboard-widgets/dashboard-widgets.go @@ -75,9 +75,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -196,9 +202,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index fcab0aa2a..5bdd7b137 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -78,9 +78,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -405,9 +411,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/default-namespace/default-namespace.go b/cmd/workspace/default-namespace/default-namespace.go index b15907bec..e5039cbaf 100755 --- a/cmd/workspace/default-namespace/default-namespace.go +++ b/cmd/workspace/default-namespace/default-namespace.go @@ -199,9 +199,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/disable-legacy-access/disable-legacy-access.go b/cmd/workspace/disable-legacy-access/disable-legacy-access.go index fea2b3c40..c50de446b 100755 --- a/cmd/workspace/disable-legacy-access/disable-legacy-access.go +++ b/cmd/workspace/disable-legacy-access/disable-legacy-access.go @@ -187,9 +187,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go b/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go index a8acc5cd1..3d99ecef8 100755 --- a/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go +++ b/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go @@ -132,9 +132,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index b1af2f868..4c6b57d18 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -130,9 +130,15 @@ func newCreateExperiment() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createExperimentJson.Unmarshal(&createExperimentReq) - if err != nil { - return err + diags := createExperimentJson.Unmarshal(&createExperimentReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -203,9 +209,15 @@ func newCreateRun() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createRunJson.Unmarshal(&createRunReq) - if err != nil { - return err + diags := createRunJson.Unmarshal(&createRunReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -277,9 +289,15 @@ func newDeleteExperiment() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteExperimentJson.Unmarshal(&deleteExperimentReq) - if err != nil { - return err + diags := deleteExperimentJson.Unmarshal(&deleteExperimentReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -352,9 +370,15 @@ func newDeleteRun() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteRunJson.Unmarshal(&deleteRunReq) - if err != nil { - return err + diags := deleteRunJson.Unmarshal(&deleteRunReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -435,9 +459,15 @@ func newDeleteRuns() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteRunsJson.Unmarshal(&deleteRunsReq) - if err != nil { - return err + diags := deleteRunsJson.Unmarshal(&deleteRunsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -518,9 +548,15 @@ func newDeleteTag() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteTagJson.Unmarshal(&deleteTagReq) - if err != nil { - return err + diags := deleteTagJson.Unmarshal(&deleteTagReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1108,9 +1144,15 @@ func newLogBatch() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = logBatchJson.Unmarshal(&logBatchReq) - if err != nil { - return err + diags := logBatchJson.Unmarshal(&logBatchReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -1174,9 +1216,15 @@ func newLogInputs() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = logInputsJson.Unmarshal(&logInputsReq) - if err != nil { - return err + diags := logInputsJson.Unmarshal(&logInputsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -1254,9 +1302,15 @@ func newLogMetric() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = logMetricJson.Unmarshal(&logMetricReq) - if err != nil { - return err + diags := logMetricJson.Unmarshal(&logMetricReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1335,9 +1389,15 @@ func newLogModel() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = logModelJson.Unmarshal(&logModelReq) - if err != nil { - return err + diags := logModelJson.Unmarshal(&logModelReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -1414,9 +1474,15 @@ func newLogParam() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = logParamJson.Unmarshal(&logParamReq) - if err != nil { - return err + diags := logParamJson.Unmarshal(&logParamReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1497,9 +1563,15 @@ func newRestoreExperiment() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = restoreExperimentJson.Unmarshal(&restoreExperimentReq) - if err != nil { - return err + diags := restoreExperimentJson.Unmarshal(&restoreExperimentReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1572,9 +1644,15 @@ func newRestoreRun() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = restoreRunJson.Unmarshal(&restoreRunReq) - if err != nil { - return err + diags := restoreRunJson.Unmarshal(&restoreRunReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1655,9 +1733,15 @@ func newRestoreRuns() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = restoreRunsJson.Unmarshal(&restoreRunsReq) - if err != nil { - return err + diags := restoreRunsJson.Unmarshal(&restoreRunsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1732,9 +1816,15 @@ func newSearchExperiments() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = searchExperimentsJson.Unmarshal(&searchExperimentsReq) - if err != nil { - return err + diags := searchExperimentsJson.Unmarshal(&searchExperimentsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -1800,9 +1890,15 @@ func newSearchRuns() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = searchRunsJson.Unmarshal(&searchRunsReq) - if err != nil { - return err + diags := searchRunsJson.Unmarshal(&searchRunsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -1874,9 +1970,15 @@ func newSetExperimentTag() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setExperimentTagJson.Unmarshal(&setExperimentTagReq) - if err != nil { - return err + diags := setExperimentTagJson.Unmarshal(&setExperimentTagReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1951,9 +2053,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } setPermissionsReq.ExperimentId = args[0] @@ -2032,9 +2140,15 @@ func newSetTag() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setTagJson.Unmarshal(&setTagReq) - if err != nil { - return err + diags := setTagJson.Unmarshal(&setTagReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -2112,9 +2226,15 @@ func newUpdateExperiment() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateExperimentJson.Unmarshal(&updateExperimentReq) - if err != nil { - return err + diags := updateExperimentJson.Unmarshal(&updateExperimentReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -2183,9 +2303,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updatePermissionsReq.ExperimentId = args[0] @@ -2251,9 +2377,15 @@ func newUpdateRun() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateRunJson.Unmarshal(&updateRunReq) - if err != nil { - return err + diags := updateRunJson.Unmarshal(&updateRunReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 42493fc46..97d34df09 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -112,9 +112,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -381,9 +387,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Name = args[0] diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index c8de48797..86b292674 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -85,9 +85,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -381,9 +387,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/genie/genie.go b/cmd/workspace/genie/genie.go index e4a059091..287bcde63 100755 --- a/cmd/workspace/genie/genie.go +++ b/cmd/workspace/genie/genie.go @@ -105,9 +105,15 @@ func newCreateMessage() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createMessageJson.Unmarshal(&createMessageReq) - if err != nil { - return err + diags := createMessageJson.Unmarshal(&createMessageReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } createMessageReq.SpaceId = args[0] @@ -392,9 +398,15 @@ func newStartConversation() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = startConversationJson.Unmarshal(&startConversationReq) - if err != nil { - return err + diags := startConversationJson.Unmarshal(&startConversationReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } startConversationReq.SpaceId = args[0] diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index b5082d311..978ca6bac 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -103,9 +103,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -371,9 +377,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } _, err = fmt.Sscan(args[0], &updateReq.CredentialId) diff --git a/cmd/workspace/global-init-scripts/global-init-scripts.go b/cmd/workspace/global-init-scripts/global-init-scripts.go index 92dcb2592..52adde3fb 100755 --- a/cmd/workspace/global-init-scripts/global-init-scripts.go +++ b/cmd/workspace/global-init-scripts/global-init-scripts.go @@ -101,9 +101,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -367,9 +373,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.ScriptId = args[0] diff --git a/cmd/workspace/grants/grants.go b/cmd/workspace/grants/grants.go index 876f0343e..b1dd4e8cb 100755 --- a/cmd/workspace/grants/grants.go +++ b/cmd/workspace/grants/grants.go @@ -223,9 +223,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } _, err = fmt.Sscan(args[0], &updateReq.SecurableType) diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index 14650d984..a2d323106 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -97,9 +97,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -358,9 +364,15 @@ func newPatch() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = patchJson.Unmarshal(&patchReq) - if err != nil { - return err + diags := patchJson.Unmarshal(&patchReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -446,9 +458,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index db96f1466..8a84df946 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -128,9 +128,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -206,9 +212,15 @@ func newDelete() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteJson.Unmarshal(&deleteReq) - if err != nil { - return err + diags := deleteJson.Unmarshal(&deleteReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -309,9 +321,15 @@ func newEdit() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = editJson.Unmarshal(&editReq) - if err != nil { - return err + diags := editJson.Unmarshal(&editReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -631,9 +649,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -712,9 +736,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/instance-profiles/instance-profiles.go b/cmd/workspace/instance-profiles/instance-profiles.go index 7134c16c6..5c4bc8d9b 100755 --- a/cmd/workspace/instance-profiles/instance-profiles.go +++ b/cmd/workspace/instance-profiles/instance-profiles.go @@ -99,9 +99,15 @@ func newAdd() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = addJson.Unmarshal(&addReq) - if err != nil { - return err + diags := addJson.Unmarshal(&addReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -192,9 +198,15 @@ func newEdit() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = editJson.Unmarshal(&editReq) - if err != nil { - return err + diags := editJson.Unmarshal(&editReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -311,9 +323,15 @@ func newRemove() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = removeJson.Unmarshal(&removeReq) - if err != nil { - return err + diags := removeJson.Unmarshal(&removeReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index ec5958b5b..070f279ec 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -133,9 +133,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -414,9 +420,15 @@ func newReplace() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = replaceJson.Unmarshal(&replaceReq) - if err != nil { - return err + diags := replaceJson.Unmarshal(&replaceReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } replaceReq.IpAccessListId = args[0] @@ -510,9 +522,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 2d422fa8c..d4ceb0c28 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -116,9 +116,15 @@ func newCancelAllRuns() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = cancelAllRunsJson.Unmarshal(&cancelAllRunsReq) - if err != nil { - return err + diags := cancelAllRunsJson.Unmarshal(&cancelAllRunsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -193,9 +199,15 @@ func newCancelRun() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = cancelRunJson.Unmarshal(&cancelRunReq) - if err != nil { - return err + diags := cancelRunJson.Unmarshal(&cancelRunReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -291,9 +303,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -364,9 +382,15 @@ func newDelete() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteJson.Unmarshal(&deleteReq) - if err != nil { - return err + diags := deleteJson.Unmarshal(&deleteReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -457,9 +481,15 @@ func newDeleteRun() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteRunJson.Unmarshal(&deleteRunReq) - if err != nil { - return err + diags := deleteRunJson.Unmarshal(&deleteRunReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1143,9 +1173,15 @@ func newRepairRun() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = repairRunJson.Unmarshal(&repairRunReq) - if err != nil { - return err + diags := repairRunJson.Unmarshal(&repairRunReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1242,9 +1278,15 @@ func newReset() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = resetJson.Unmarshal(&resetReq) - if err != nil { - return err + diags := resetJson.Unmarshal(&resetReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -1332,9 +1374,15 @@ func newRunNow() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = runNowJson.Unmarshal(&runNowReq) - if err != nil { - return err + diags := runNowJson.Unmarshal(&runNowReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1436,9 +1484,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -1538,9 +1592,15 @@ func newSubmit() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = submitJson.Unmarshal(&submitReq) - if err != nil { - return err + diags := submitJson.Unmarshal(&submitReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -1632,9 +1692,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -1717,9 +1783,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index ef2d6845b..33a45c65f 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -108,9 +108,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -180,9 +186,15 @@ func newCreateSchedule() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createScheduleJson.Unmarshal(&createScheduleReq) - if err != nil { - return err + diags := createScheduleJson.Unmarshal(&createScheduleReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -250,9 +262,15 @@ func newCreateSubscription() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createSubscriptionJson.Unmarshal(&createSubscriptionReq) - if err != nil { - return err + diags := createSubscriptionJson.Unmarshal(&createSubscriptionReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -870,9 +888,15 @@ func newMigrate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = migrateJson.Unmarshal(&migrateReq) - if err != nil { - return err + diags := migrateJson.Unmarshal(&migrateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -941,9 +965,15 @@ func newPublish() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = publishJson.Unmarshal(&publishReq) - if err != nil { - return err + diags := publishJson.Unmarshal(&publishReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } publishReq.DashboardId = args[0] @@ -1128,9 +1158,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.DashboardId = args[0] @@ -1200,9 +1236,15 @@ func newUpdateSchedule() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateScheduleJson.Unmarshal(&updateScheduleReq) - if err != nil { - return err + diags := updateScheduleJson.Unmarshal(&updateScheduleReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index 2c10d8161..e6b332aef 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -190,9 +190,15 @@ func newInstall() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = installJson.Unmarshal(&installReq) - if err != nil { - return err + diags := installJson.Unmarshal(&installReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -251,9 +257,15 @@ func newUninstall() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = uninstallJson.Unmarshal(&uninstallReq) - if err != nil { - return err + diags := uninstallJson.Unmarshal(&uninstallReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 22bcd3dcf..563beb2f4 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -112,9 +112,15 @@ func newAssign() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = assignJson.Unmarshal(&assignReq) - if err != nil { - return err + diags := assignJson.Unmarshal(&assignReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } _, err = fmt.Sscan(args[0], &assignReq.WorkspaceId) @@ -201,9 +207,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -606,9 +618,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -690,9 +708,15 @@ func newUpdateAssignment() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateAssignmentJson.Unmarshal(&updateAssignmentReq) - if err != nil { - return err + diags := updateAssignmentJson.Unmarshal(&updateAssignmentReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index 41f06ac4d..b45d83e3d 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -141,9 +141,15 @@ func newApproveTransitionRequest() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = approveTransitionRequestJson.Unmarshal(&approveTransitionRequestReq) - if err != nil { - return err + diags := approveTransitionRequestJson.Unmarshal(&approveTransitionRequestReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -235,9 +241,15 @@ func newCreateComment() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createCommentJson.Unmarshal(&createCommentReq) - if err != nil { - return err + diags := createCommentJson.Unmarshal(&createCommentReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -322,9 +334,15 @@ func newCreateModel() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createModelJson.Unmarshal(&createModelReq) - if err != nil { - return err + diags := createModelJson.Unmarshal(&createModelReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -403,9 +421,15 @@ func newCreateModelVersion() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createModelVersionJson.Unmarshal(&createModelVersionReq) - if err != nil { - return err + diags := createModelVersionJson.Unmarshal(&createModelVersionReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -493,9 +517,15 @@ func newCreateTransitionRequest() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createTransitionRequestJson.Unmarshal(&createTransitionRequestReq) - if err != nil { - return err + diags := createTransitionRequestJson.Unmarshal(&createTransitionRequestReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -570,9 +600,15 @@ func newCreateWebhook() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createWebhookJson.Unmarshal(&createWebhookReq) - if err != nil { - return err + diags := createWebhookJson.Unmarshal(&createWebhookReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -1079,9 +1115,15 @@ func newGetLatestVersions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = getLatestVersionsJson.Unmarshal(&getLatestVersionsReq) - if err != nil { - return err + diags := getLatestVersionsJson.Unmarshal(&getLatestVersionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1629,9 +1671,15 @@ func newRejectTransitionRequest() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = rejectTransitionRequestJson.Unmarshal(&rejectTransitionRequestReq) - if err != nil { - return err + diags := rejectTransitionRequestJson.Unmarshal(&rejectTransitionRequestReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1715,9 +1763,15 @@ func newRenameModel() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = renameModelJson.Unmarshal(&renameModelReq) - if err != nil { - return err + diags := renameModelJson.Unmarshal(&renameModelReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1907,9 +1961,15 @@ func newSetModelTag() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setModelTagJson.Unmarshal(&setModelTagReq) - if err != nil { - return err + diags := setModelTagJson.Unmarshal(&setModelTagReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -1996,9 +2056,15 @@ func newSetModelVersionTag() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setModelVersionTagJson.Unmarshal(&setModelVersionTagReq) - if err != nil { - return err + diags := setModelVersionTagJson.Unmarshal(&setModelVersionTagReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -2076,9 +2142,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } setPermissionsReq.RegisteredModelId = args[0] @@ -2166,9 +2238,15 @@ func newTestRegistryWebhook() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = testRegistryWebhookJson.Unmarshal(&testRegistryWebhookReq) - if err != nil { - return err + diags := testRegistryWebhookJson.Unmarshal(&testRegistryWebhookReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -2259,9 +2337,15 @@ func newTransitionStage() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = transitionStageJson.Unmarshal(&transitionStageReq) - if err != nil { - return err + diags := transitionStageJson.Unmarshal(&transitionStageReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -2350,9 +2434,15 @@ func newUpdateComment() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateCommentJson.Unmarshal(&updateCommentReq) - if err != nil { - return err + diags := updateCommentJson.Unmarshal(&updateCommentReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -2430,9 +2520,15 @@ func newUpdateModel() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateModelJson.Unmarshal(&updateModelReq) - if err != nil { - return err + diags := updateModelJson.Unmarshal(&updateModelReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -2508,9 +2604,15 @@ func newUpdateModelVersion() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateModelVersionJson.Unmarshal(&updateModelVersionReq) - if err != nil { - return err + diags := updateModelVersionJson.Unmarshal(&updateModelVersionReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -2582,9 +2684,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updatePermissionsReq.RegisteredModelId = args[0] @@ -2663,9 +2771,15 @@ func newUpdateWebhook() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateWebhookJson.Unmarshal(&updateWebhookReq) - if err != nil { - return err + diags := updateWebhookJson.Unmarshal(&updateWebhookReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index d2f054045..439e5f657 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -377,9 +377,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.FullName = args[0] diff --git a/cmd/workspace/notification-destinations/notification-destinations.go b/cmd/workspace/notification-destinations/notification-destinations.go index 5ad47cc95..470765879 100755 --- a/cmd/workspace/notification-destinations/notification-destinations.go +++ b/cmd/workspace/notification-destinations/notification-destinations.go @@ -84,9 +84,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -313,9 +319,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Id = args[0] diff --git a/cmd/workspace/online-tables/online-tables.go b/cmd/workspace/online-tables/online-tables.go index da2f8c041..1c25d1e26 100755 --- a/cmd/workspace/online-tables/online-tables.go +++ b/cmd/workspace/online-tables/online-tables.go @@ -79,9 +79,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } diff --git a/cmd/workspace/permission-migration/permission-migration.go b/cmd/workspace/permission-migration/permission-migration.go index 2e50b1231..15ff1b751 100755 --- a/cmd/workspace/permission-migration/permission-migration.go +++ b/cmd/workspace/permission-migration/permission-migration.go @@ -92,9 +92,15 @@ func newMigratePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = migratePermissionsJson.Unmarshal(&migratePermissionsReq) - if err != nil { - return err + diags := migratePermissionsJson.Unmarshal(&migratePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index c6033e4a4..d007a425f 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -265,9 +265,15 @@ func newSet() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setJson.Unmarshal(&setReq) - if err != nil { - return err + diags := setJson.Unmarshal(&setReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } setReq.RequestObjectType = args[0] @@ -340,9 +346,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.RequestObjectType = args[0] diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index ac361e313..5bd94e0b6 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -98,9 +98,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -699,9 +705,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -788,9 +800,15 @@ func newStartUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = startUpdateJson.Unmarshal(&startUpdateReq) - if err != nil { - return err + diags := startUpdateJson.Unmarshal(&startUpdateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -977,9 +995,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -1058,9 +1082,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go b/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go index 1274c8790..d128d80b8 100755 --- a/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go +++ b/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go @@ -110,9 +110,15 @@ func newEnforceCompliance() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = enforceComplianceJson.Unmarshal(&enforceComplianceReq) - if err != nil { - return err + diags := enforceComplianceJson.Unmarshal(&enforceComplianceReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { diff --git a/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go index d74caa572..384dab2c9 100755 --- a/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go +++ b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go @@ -104,9 +104,15 @@ func newEnforceCompliance() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = enforceComplianceJson.Unmarshal(&enforceComplianceReq) - if err != nil { - return err + diags := enforceComplianceJson.Unmarshal(&enforceComplianceReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { diff --git a/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go index a3f746214..fea836d20 100755 --- a/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go +++ b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go @@ -73,9 +73,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -259,9 +265,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/provider-exchanges/provider-exchanges.go b/cmd/workspace/provider-exchanges/provider-exchanges.go index b92403755..a96f0673e 100755 --- a/cmd/workspace/provider-exchanges/provider-exchanges.go +++ b/cmd/workspace/provider-exchanges/provider-exchanges.go @@ -91,9 +91,15 @@ func newAddListingToExchange() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = addListingToExchangeJson.Unmarshal(&addListingToExchangeReq) - if err != nil { - return err + diags := addListingToExchangeJson.Unmarshal(&addListingToExchangeReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -154,9 +160,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -546,9 +558,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/provider-files/provider-files.go b/cmd/workspace/provider-files/provider-files.go index 62dcb6de9..392ed2890 100755 --- a/cmd/workspace/provider-files/provider-files.go +++ b/cmd/workspace/provider-files/provider-files.go @@ -77,9 +77,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -273,9 +279,15 @@ func newList() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err + diags := listJson.Unmarshal(&listReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/provider-listings/provider-listings.go b/cmd/workspace/provider-listings/provider-listings.go index 18c99c53d..4c7c6c563 100755 --- a/cmd/workspace/provider-listings/provider-listings.go +++ b/cmd/workspace/provider-listings/provider-listings.go @@ -75,9 +75,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -326,9 +332,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go index d18e2e578..48c444f1f 100755 --- a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go +++ b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go @@ -142,9 +142,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.ListingId = args[0] diff --git a/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go index bb3ca9666..a8d151a20 100755 --- a/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go +++ b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go @@ -208,9 +208,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Id = args[0] diff --git a/cmd/workspace/provider-providers/provider-providers.go b/cmd/workspace/provider-providers/provider-providers.go index 94d12d6f0..3c9c024e8 100755 --- a/cmd/workspace/provider-providers/provider-providers.go +++ b/cmd/workspace/provider-providers/provider-providers.go @@ -74,9 +74,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -325,9 +331,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index af2737a0f..504beac5e 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -97,9 +97,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -446,9 +452,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/quality-monitors/quality-monitors.go b/cmd/workspace/quality-monitors/quality-monitors.go index 1ff9b017e..58075aa54 100755 --- a/cmd/workspace/quality-monitors/quality-monitors.go +++ b/cmd/workspace/quality-monitors/quality-monitors.go @@ -196,9 +196,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } createReq.TableName = args[0] @@ -559,9 +565,15 @@ func newRegenerateDashboard() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = regenerateDashboardJson.Unmarshal(®enerateDashboardReq) - if err != nil { - return err + diags := regenerateDashboardJson.Unmarshal(®enerateDashboardReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } regenerateDashboardReq.TableName = args[0] @@ -724,9 +736,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.TableName = args[0] diff --git a/cmd/workspace/queries-legacy/queries-legacy.go b/cmd/workspace/queries-legacy/queries-legacy.go index fa78bb2b0..e35e1828b 100755 --- a/cmd/workspace/queries-legacy/queries-legacy.go +++ b/cmd/workspace/queries-legacy/queries-legacy.go @@ -96,9 +96,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -454,9 +460,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index fea01451a..208f887da 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -85,9 +85,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -425,9 +431,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Id = args[0] diff --git a/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go b/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go index 4f45ab23e..f48acff1e 100755 --- a/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go +++ b/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go @@ -87,9 +87,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -222,9 +228,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/query-visualizations/query-visualizations.go b/cmd/workspace/query-visualizations/query-visualizations.go index 042594529..621661952 100755 --- a/cmd/workspace/query-visualizations/query-visualizations.go +++ b/cmd/workspace/query-visualizations/query-visualizations.go @@ -84,9 +84,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -217,9 +223,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Id = args[0] diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index f4472cf37..56abd2014 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -118,9 +118,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -404,9 +410,15 @@ func newRotateToken() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = rotateTokenJson.Unmarshal(&rotateTokenReq) - if err != nil { - return err + diags := rotateTokenJson.Unmarshal(&rotateTokenReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } rotateTokenReq.Name = args[0] @@ -554,9 +566,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index 5aa6cdf15..63f307a32 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -135,9 +135,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -509,9 +515,15 @@ func newSetAlias() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setAliasJson.Unmarshal(&setAliasReq) - if err != nil { - return err + diags := setAliasJson.Unmarshal(&setAliasReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } setAliasReq.FullName = args[0] @@ -589,9 +601,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/repos/overrides.go b/cmd/workspace/repos/overrides.go index 9546d1c1e..aad38ecc7 100644 --- a/cmd/workspace/repos/overrides.go +++ b/cmd/workspace/repos/overrides.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/workspace" @@ -35,9 +36,15 @@ func createOverride(createCmd *cobra.Command, createReq *workspace.CreateRepoReq ctx := cmd.Context() w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(createReq) - if err != nil { - return err + diags := createJson.Unmarshal(createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { createReq.Url = args[0] @@ -101,11 +108,18 @@ func updateOverride(updateCmd *cobra.Command, updateReq *workspace.UpdateRepoReq updateJson := updateCmd.Flag("json").Value.(*flags.JsonFlag) updateCmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() + var diags diag.Diagnostics w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags = updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { updateReq.RepoId, err = repoArgumentToRepoID(ctx, w, args) diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index f11dd3ace..b77347b06 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -111,9 +111,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -521,9 +527,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -604,9 +616,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -688,9 +706,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go index 5e9f59d2c..5d0fba923 100755 --- a/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go +++ b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go @@ -197,9 +197,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index 3a398251f..3ce573bad 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -100,9 +100,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -386,9 +392,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/secrets/put_secret.go b/cmd/workspace/secrets/put_secret.go index e323c7a10..f24814f05 100644 --- a/cmd/workspace/secrets/put_secret.go +++ b/cmd/workspace/secrets/put_secret.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/spf13/cobra" @@ -60,6 +61,7 @@ func newPutSecret() *cobra.Command { cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() + var diags diag.Diagnostics w := root.WorkspaceClient(ctx) bytesValueChanged := cmd.Flags().Changed("bytes-value") @@ -69,9 +71,15 @@ func newPutSecret() *cobra.Command { } if cmd.Flags().Changed("json") { - err = putSecretJson.Unmarshal(&putSecretReq) - if err != nil { - return err + diags = putSecretJson.Unmarshal(&putSecretReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { putSecretReq.Scope = args[0] diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index f836a2670..e9547b621 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -110,9 +110,15 @@ func newCreateScope() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createScopeJson.Unmarshal(&createScopeReq) - if err != nil { - return err + diags := createScopeJson.Unmarshal(&createScopeReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -191,9 +197,15 @@ func newDeleteAcl() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteAclJson.Unmarshal(&deleteAclReq) - if err != nil { - return err + diags := deleteAclJson.Unmarshal(&deleteAclReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -273,9 +285,15 @@ func newDeleteScope() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteScopeJson.Unmarshal(&deleteScopeReq) - if err != nil { - return err + diags := deleteScopeJson.Unmarshal(&deleteScopeReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -354,9 +372,15 @@ func newDeleteSecret() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteSecretJson.Unmarshal(&deleteSecretReq) - if err != nil { - return err + diags := deleteSecretJson.Unmarshal(&deleteSecretReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -759,9 +783,15 @@ func newPutAcl() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = putAclJson.Unmarshal(&putAclReq) - if err != nil { - return err + diags := putAclJson.Unmarshal(&putAclReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index 957cb1265..317779f30 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -95,9 +95,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -358,9 +364,15 @@ func newPatch() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = patchJson.Unmarshal(&patchReq) - if err != nil { - return err + diags := patchJson.Unmarshal(&patchReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -448,9 +460,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 0837652db..363e9ea16 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -169,9 +169,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -706,9 +712,15 @@ func newPatch() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = patchJson.Unmarshal(&patchReq) - if err != nil { - return err + diags := patchJson.Unmarshal(&patchReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } patchReq.Name = args[0] @@ -777,9 +789,15 @@ func newPut() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = putJson.Unmarshal(&putReq) - if err != nil { - return err + diags := putJson.Unmarshal(&putReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } putReq.Name = args[0] @@ -850,9 +868,15 @@ func newPutAiGateway() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = putAiGatewayJson.Unmarshal(&putAiGatewayReq) - if err != nil { - return err + diags := putAiGatewayJson.Unmarshal(&putAiGatewayReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } putAiGatewayReq.Name = args[0] @@ -928,9 +952,15 @@ func newQuery() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = queryJson.Unmarshal(&queryReq) - if err != nil { - return err + diags := queryJson.Unmarshal(&queryReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } queryReq.Name = args[0] @@ -997,9 +1027,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } setPermissionsReq.ServingEndpointId = args[0] @@ -1076,9 +1112,15 @@ func newUpdateConfig() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateConfigJson.Unmarshal(&updateConfigReq) - if err != nil { - return err + diags := updateConfigJson.Unmarshal(&updateConfigReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateConfigReq.Name = args[0] @@ -1158,9 +1200,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updatePermissionsReq.ServingEndpointId = args[0] diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index 67f870177..62c3407f4 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -100,9 +100,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -427,9 +433,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Name = args[0] @@ -501,9 +513,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updatePermissionsReq.Name = args[0] diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index f4ec5eb4f..2caf09041 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -111,9 +111,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -377,9 +383,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -478,9 +490,15 @@ func newValidate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = validateJson.Unmarshal(&validateReq) - if err != nil { - return err + diags := validateJson.Unmarshal(&validateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } diff --git a/cmd/workspace/table-constraints/table-constraints.go b/cmd/workspace/table-constraints/table-constraints.go index 166da146c..4ac7cb9fb 100755 --- a/cmd/workspace/table-constraints/table-constraints.go +++ b/cmd/workspace/table-constraints/table-constraints.go @@ -92,9 +92,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index ec297f294..35775f17f 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -479,9 +479,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/temporary-table-credentials/temporary-table-credentials.go b/cmd/workspace/temporary-table-credentials/temporary-table-credentials.go index 8718f7ba1..210a59f8e 100755 --- a/cmd/workspace/temporary-table-credentials/temporary-table-credentials.go +++ b/cmd/workspace/temporary-table-credentials/temporary-table-credentials.go @@ -94,9 +94,15 @@ func newGenerateTemporaryTableCredentials() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = generateTemporaryTableCredentialsJson.Unmarshal(&generateTemporaryTableCredentialsReq) - if err != nil { - return err + diags := generateTemporaryTableCredentialsJson.Unmarshal(&generateTemporaryTableCredentialsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index dea94edb0..6deb8d125 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -96,9 +96,15 @@ func newCreateOboToken() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createOboTokenJson.Unmarshal(&createOboTokenReq) - if err != nil { - return err + diags := createOboTokenJson.Unmarshal(&createOboTokenReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -458,9 +464,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -523,9 +535,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } diff --git a/cmd/workspace/tokens/tokens.go b/cmd/workspace/tokens/tokens.go index afe4b9a03..5c9b4994d 100755 --- a/cmd/workspace/tokens/tokens.go +++ b/cmd/workspace/tokens/tokens.go @@ -84,9 +84,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -158,9 +164,15 @@ func newDelete() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteJson.Unmarshal(&deleteReq) - if err != nil { - return err + diags := deleteJson.Unmarshal(&deleteReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index 53ba2e85c..b085ab413 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -107,9 +107,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -463,9 +469,15 @@ func newPatch() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = patchJson.Unmarshal(&patchReq) - if err != nil { - return err + diags := patchJson.Unmarshal(&patchReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -546,9 +558,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -619,9 +637,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -702,9 +726,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } diff --git a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go index dd9d57835..0cfb76172 100755 --- a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go +++ b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go @@ -95,9 +95,15 @@ func newCreateEndpoint() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createEndpointJson.Unmarshal(&createEndpointReq) - if err != nil { - return err + diags := createEndpointJson.Unmarshal(&createEndpointReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index 158474770..832f4a6dc 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -114,9 +114,15 @@ func newCreateIndex() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createIndexJson.Unmarshal(&createIndexReq) - if err != nil { - return err + diags := createIndexJson.Unmarshal(&createIndexReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -195,9 +201,15 @@ func newDeleteDataVectorIndex() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteDataVectorIndexJson.Unmarshal(&deleteDataVectorIndexReq) - if err != nil { - return err + diags := deleteDataVectorIndexJson.Unmarshal(&deleteDataVectorIndexReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -443,9 +455,15 @@ func newQueryIndex() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = queryIndexJson.Unmarshal(&queryIndexReq) - if err != nil { - return err + diags := queryIndexJson.Unmarshal(&queryIndexReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") @@ -515,9 +533,15 @@ func newQueryNextPage() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = queryNextPageJson.Unmarshal(&queryNextPageReq) - if err != nil { - return err + diags := queryNextPageJson.Unmarshal(&queryNextPageReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } queryNextPageReq.IndexName = args[0] @@ -585,9 +609,15 @@ func newScanIndex() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = scanIndexJson.Unmarshal(&scanIndexReq) - if err != nil { - return err + diags := scanIndexJson.Unmarshal(&scanIndexReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } scanIndexReq.IndexName = args[0] @@ -718,9 +748,15 @@ func newUpsertDataVectorIndex() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = upsertDataVectorIndexJson.Unmarshal(&upsertDataVectorIndexReq) - if err != nil { - return err + diags := upsertDataVectorIndexJson.Unmarshal(&upsertDataVectorIndexReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } upsertDataVectorIndexReq.IndexName = args[0] diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 3fc1f447b..2f4555736 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -119,9 +119,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -427,9 +433,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index cdf106365..43d6c8ab9 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -109,9 +109,15 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -277,9 +283,15 @@ func newEdit() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = editJson.Unmarshal(&editReq) - if err != nil { - return err + diags := editJson.Unmarshal(&editReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -688,9 +700,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { @@ -779,9 +797,15 @@ func newSetWorkspaceWarehouseConfig() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setWorkspaceWarehouseConfigJson.Unmarshal(&setWorkspaceWarehouseConfigReq) - if err != nil { - return err + diags := setWorkspaceWarehouseConfigJson.Unmarshal(&setWorkspaceWarehouseConfigReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } @@ -1030,9 +1054,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if len(args) == 0 { diff --git a/cmd/workspace/workspace-bindings/workspace-bindings.go b/cmd/workspace/workspace-bindings/workspace-bindings.go index 4993f1aff..20f54e1dc 100755 --- a/cmd/workspace/workspace-bindings/workspace-bindings.go +++ b/cmd/workspace/workspace-bindings/workspace-bindings.go @@ -226,9 +226,15 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) - if err != nil { - return err + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updateReq.Name = args[0] @@ -297,9 +303,15 @@ func newUpdateBindings() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updateBindingsJson.Unmarshal(&updateBindingsReq) - if err != nil { - return err + diags := updateBindingsJson.Unmarshal(&updateBindingsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } _, err = fmt.Sscan(args[0], &updateBindingsReq.SecurableType) diff --git a/cmd/workspace/workspace-conf/workspace-conf.go b/cmd/workspace/workspace-conf/workspace-conf.go index 92b2f0f3a..a17bc1630 100755 --- a/cmd/workspace/workspace-conf/workspace-conf.go +++ b/cmd/workspace/workspace-conf/workspace-conf.go @@ -127,9 +127,15 @@ func newSetStatus() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setStatusJson.Unmarshal(&setStatusReq) - if err != nil { - return err + diags := setStatusJson.Unmarshal(&setStatusReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 183cac898..21da478c4 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -106,9 +106,15 @@ func newDelete() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = deleteJson.Unmarshal(&deleteReq) - if err != nil { - return err + diags := deleteJson.Unmarshal(&deleteReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -482,9 +488,15 @@ func newImport() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = importJson.Unmarshal(&importReq) - if err != nil { - return err + diags := importJson.Unmarshal(&importReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } if !cmd.Flags().Changed("json") { @@ -622,9 +634,15 @@ func newMkdirs() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = mkdirsJson.Unmarshal(&mkdirsReq) - if err != nil { - return err + diags := mkdirsJson.Unmarshal(&mkdirsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } else { if len(args) == 0 { @@ -710,9 +728,15 @@ func newSetPermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = setPermissionsJson.Unmarshal(&setPermissionsReq) - if err != nil { - return err + diags := setPermissionsJson.Unmarshal(&setPermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } setPermissionsReq.WorkspaceObjectType = args[0] @@ -781,9 +805,15 @@ func newUpdatePermissions() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) - if err != nil { - return err + diags := updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } } } updatePermissionsReq.WorkspaceObjectType = args[0] diff --git a/libs/cmdio/render.go b/libs/cmdio/render.go index 4114db5ca..72d95978a 100644 --- a/libs/cmdio/render.go +++ b/libs/cmdio/render.go @@ -13,6 +13,7 @@ import ( "text/template" "time" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/listing" "github.com/fatih/color" @@ -285,70 +286,72 @@ func RenderIteratorJson[T any](ctx context.Context, i listing.Iterator[T]) error return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, c.headerTemplate, c.template) } +var renderFuncMap = template.FuncMap{ + // we render colored output if stdout is TTY, otherwise we render text. + // in the future we'll check if we can explicitly check for stderr being + // a TTY + "header": color.BlueString, + "red": color.RedString, + "green": color.GreenString, + "blue": color.BlueString, + "yellow": color.YellowString, + "magenta": color.MagentaString, + "cyan": color.CyanString, + "bold": func(format string, a ...interface{}) string { + return color.New(color.Bold).Sprintf(format, a...) + }, + "italic": func(format string, a ...interface{}) string { + return color.New(color.Italic).Sprintf(format, a...) + }, + "replace": strings.ReplaceAll, + "join": strings.Join, + "bool": func(v bool) string { + if v { + return color.GreenString("YES") + } + return color.RedString("NO") + }, + "pretty_json": func(in string) (string, error) { + var tmp any + err := json.Unmarshal([]byte(in), &tmp) + if err != nil { + return "", err + } + b, err := fancyJSON(tmp) + if err != nil { + return "", err + } + return string(b), nil + }, + "pretty_date": func(t time.Time) string { + return t.Format("2006-01-02T15:04:05Z") + }, + "b64_encode": func(in string) (string, error) { + var out bytes.Buffer + enc := base64.NewEncoder(base64.StdEncoding, &out) + _, err := enc.Write([]byte(in)) + if err != nil { + return "", err + } + err = enc.Close() + if err != nil { + return "", err + } + return out.String(), nil + }, + "b64_decode": func(in string) (string, error) { + dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(in)) + out, err := io.ReadAll(dec) + if err != nil { + return "", err + } + return string(out), nil + }, +} + func renderUsingTemplate(ctx context.Context, r templateRenderer, w io.Writer, headerTmpl, tmpl string) error { tw := tabwriter.NewWriter(w, 0, 4, 2, ' ', 0) - base := template.New("command").Funcs(template.FuncMap{ - // we render colored output if stdout is TTY, otherwise we render text. - // in the future we'll check if we can explicitly check for stderr being - // a TTY - "header": color.BlueString, - "red": color.RedString, - "green": color.GreenString, - "blue": color.BlueString, - "yellow": color.YellowString, - "magenta": color.MagentaString, - "cyan": color.CyanString, - "bold": func(format string, a ...interface{}) string { - return color.New(color.Bold).Sprintf(format, a...) - }, - "italic": func(format string, a ...interface{}) string { - return color.New(color.Italic).Sprintf(format, a...) - }, - "replace": strings.ReplaceAll, - "join": strings.Join, - "bool": func(v bool) string { - if v { - return color.GreenString("YES") - } - return color.RedString("NO") - }, - "pretty_json": func(in string) (string, error) { - var tmp any - err := json.Unmarshal([]byte(in), &tmp) - if err != nil { - return "", err - } - b, err := fancyJSON(tmp) - if err != nil { - return "", err - } - return string(b), nil - }, - "pretty_date": func(t time.Time) string { - return t.Format("2006-01-02T15:04:05Z") - }, - "b64_encode": func(in string) (string, error) { - var out bytes.Buffer - enc := base64.NewEncoder(base64.StdEncoding, &out) - _, err := enc.Write([]byte(in)) - if err != nil { - return "", err - } - err = enc.Close() - if err != nil { - return "", err - } - return out.String(), nil - }, - "b64_decode": func(in string) (string, error) { - dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(in)) - out, err := io.ReadAll(dec) - if err != nil { - return "", err - } - return string(out), nil - }, - }) + base := template.New("command").Funcs(renderFuncMap) if headerTmpl != "" { headerT, err := base.Parse(headerTmpl) if err != nil { @@ -388,3 +391,77 @@ func fancyJSON(v any) ([]byte, error) { return jsoncolor.MarshalIndentWithFormatter(v, "", " ", f) } + +const errorTemplate = `{{ "Error" | red }}: {{ .Summary }} +{{- range $index, $element := .Paths }} + {{ if eq $index 0 }}at {{else}} {{ end}}{{ $element.String | green }} +{{- end }} +{{- range $index, $element := .Locations }} + {{ if eq $index 0 }}in {{else}} {{ end}}{{ $element.String | cyan }} +{{- end }} +{{- if .Detail }} + +{{ .Detail }} +{{- end }} + +` + +const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }} +{{- range $index, $element := .Paths }} + {{ if eq $index 0 }}at {{else}} {{ end}}{{ $element.String | green }} +{{- end }} +{{- range $index, $element := .Locations }} + {{ if eq $index 0 }}in {{else}} {{ end}}{{ $element.String | cyan }} +{{- end }} +{{- if .Detail }} + +{{ .Detail }} +{{- end }} + +` + +const recommendationTemplate = `{{ "Recommendation" | blue }}: {{ .Summary }} +{{- range $index, $element := .Paths }} + {{ if eq $index 0 }}at {{else}} {{ end}}{{ $element.String | green }} +{{- end }} +{{- range $index, $element := .Locations }} + {{ if eq $index 0 }}in {{else}} {{ end}}{{ $element.String | cyan }} +{{- end }} +{{- if .Detail }} + +{{ .Detail }} +{{- end }} + +` + +func RenderDiagnosticsToErrorOut(ctx context.Context, diags diag.Diagnostics) error { + c := fromContext(ctx) + return RenderDiagnostics(c.err, diags) +} + +func RenderDiagnostics(out io.Writer, diags diag.Diagnostics) error { + errorT := template.Must(template.New("error").Funcs(renderFuncMap).Parse(errorTemplate)) + warningT := template.Must(template.New("warning").Funcs(renderFuncMap).Parse(warningTemplate)) + recommendationT := template.Must(template.New("recommendation").Funcs(renderFuncMap).Parse(recommendationTemplate)) + + // Print errors and warnings. + for _, d := range diags { + var t *template.Template + switch d.Severity { + case diag.Error: + t = errorT + case diag.Warning: + t = warningT + case diag.Recommendation: + t = recommendationT + } + + // Render the diagnostic with the appropriate template. + err := t.Execute(out, d) + if err != nil { + return fmt.Errorf("failed to render template: %w", err) + } + } + + return nil +} diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index 839d0111a..e2aa5d263 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -221,10 +221,10 @@ func toTypedBool(dst reflect.Value, src dyn.Value) error { case dyn.KindString: // See https://github.com/go-yaml/yaml/blob/f6f7691b1fdeb513f56608cd2c32c51f8194bf51/decode.go#L684-L693. switch src.MustString() { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", "true": dst.SetBool(true) return nil - case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + case "n", "N", "no", "No", "NO", "off", "Off", "OFF", "false": dst.SetBool(false) return nil } @@ -246,6 +246,19 @@ func toTypedInt(dst reflect.Value, src dyn.Value) error { case dyn.KindInt: dst.SetInt(src.MustInt()) return nil + case dyn.KindFloat: + v := src.MustFloat() + if v == float64(int64(v)) { + // If the destination is smaller than int64, but the value to set is bigger + // then destination overflows and is set to -1 + dst.SetInt(int64(src.MustFloat())) + return nil + } + + return TypeError{ + value: src, + msg: fmt.Sprintf("expected an int, found a %s", src.Kind()), + } case dyn.KindString: if i64, err := strconv.ParseInt(src.MustString(), 10, 64); err == nil { dst.SetInt(i64) diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index 37d85539c..78221c299 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -341,14 +341,14 @@ func TestToTypedBoolFromString(t *testing.T) { var out bool // True-ish - for _, v := range []string{"y", "yes", "on"} { + for _, v := range []string{"y", "yes", "on", "true"} { err := ToTyped(&out, dyn.V(v)) require.NoError(t, err) assert.Equal(t, true, out) } // False-ish - for _, v := range []string{"n", "no", "off"} { + for _, v := range []string{"n", "no", "off", "false"} { err := ToTyped(&out, dyn.V(v)) require.NoError(t, err) assert.Equal(t, false, out) @@ -428,6 +428,19 @@ func TestToTypedIntFromStringVariableReference(t *testing.T) { assert.Equal(t, int(0), out) } +func TestToTypedIntFromFloat(t *testing.T) { + var out int + err := ToTyped(&out, dyn.V(1.0)) + require.NoError(t, err) + assert.Equal(t, int(1), out) +} + +func TestToTypedIntFromFloatError(t *testing.T) { + var out int + err := ToTyped(&out, dyn.V(1.2)) + require.ErrorContains(t, err, "expected an int, found a float") +} + func TestToTypedFloat32(t *testing.T) { var out float32 err := ToTyped(&out, dyn.V(float32(1.0))) diff --git a/libs/dyn/jsonloader/json.go b/libs/dyn/jsonloader/json.go new file mode 100644 index 000000000..cbf539263 --- /dev/null +++ b/libs/dyn/jsonloader/json.go @@ -0,0 +1,107 @@ +package jsonloader + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/databricks/cli/libs/dyn" +) + +func LoadJSON(data []byte, source string) (dyn.Value, error) { + offset := BuildLineOffsets(data) + offset.SetSource(source) + + reader := bytes.NewReader(data) + decoder := json.NewDecoder(reader) + + // Start decoding from the top-level value + value, err := decodeValue(decoder, &offset) + if err != nil { + if err == io.EOF { + err = fmt.Errorf("unexpected end of JSON input") + } + return dyn.InvalidValue, fmt.Errorf("error decoding JSON at %s: %v", value.Location(), err) + } + return value, nil +} + +func decodeValue(decoder *json.Decoder, o *Offset) (dyn.Value, error) { + // Read the next JSON token + token, err := decoder.Token() + if err != nil { + return dyn.InvalidValue, err + } + + // Get the current byte offset and the location. + // We will later use this location to store the location of the value in the file + // For objects and arrays, we will store the location of the opening '{' or '[' + // For primitive types, we will store the location of the value itself (end of the value) + // We can't reliably calculate the beginning of the value for primitive types because + // the decoder doesn't provide the offset of the beginning of the value and the value might or might not be quoted. + offset := decoder.InputOffset() + location := o.GetPosition(offset) + + switch tok := token.(type) { + case json.Delim: + if tok == '{' { + location = o.GetPosition(offset - 1) + // Decode JSON object + obj := dyn.NewMapping() + for decoder.More() { + // Decode the key + keyToken, err := decoder.Token() + if err != nil { + return invalidValueWithLocation(decoder, o), err + } + key, ok := keyToken.(string) + if !ok { + return invalidValueWithLocation(decoder, o), fmt.Errorf("expected string for object key") + } + + // Get the offset of the key by subtracting the length of the key and the '"' character + keyOffset := decoder.InputOffset() - int64(len(key)+1) + keyVal := dyn.NewValue(key, []dyn.Location{o.GetPosition(keyOffset)}) + + // Decode the value recursively + val, err := decodeValue(decoder, o) + if err != nil { + return invalidValueWithLocation(decoder, o), err + } + + obj.Set(keyVal, val) + } + // Consume the closing '}' + if _, err := decoder.Token(); err != nil { + return invalidValueWithLocation(decoder, o), err + } + return dyn.NewValue(obj, []dyn.Location{location}), nil + } else if tok == '[' { + location = o.GetPosition(offset - 1) + // Decode JSON array + var arr []dyn.Value + for decoder.More() { + val, err := decodeValue(decoder, o) + if err != nil { + return invalidValueWithLocation(decoder, o), err + } + arr = append(arr, val) + } + // Consume the closing ']' + if _, err := decoder.Token(); err != nil { + return invalidValueWithLocation(decoder, o), err + } + return dyn.NewValue(arr, []dyn.Location{location}), nil + } + default: + return dyn.NewValue(tok, []dyn.Location{location}), nil + } + + return invalidValueWithLocation(decoder, o), fmt.Errorf("unexpected token: %v", token) +} + +func invalidValueWithLocation(decoder *json.Decoder, o *Offset) dyn.Value { + location := o.GetPosition(decoder.InputOffset()) + return dyn.InvalidValue.WithLocations([]dyn.Location{location}) +} diff --git a/libs/dyn/jsonloader/json_test.go b/libs/dyn/jsonloader/json_test.go new file mode 100644 index 000000000..7fdc4d7f5 --- /dev/null +++ b/libs/dyn/jsonloader/json_test.go @@ -0,0 +1,93 @@ +package jsonloader + +import ( + "testing" + + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +const jsonData = ` +{ + "job_id": 123, + "new_settings": { + "name": "xxx", + "email_notifications": { + "on_start": [], + "on_success": [], + "on_failure": [] + }, + "webhook_notifications": { + "on_start": [], + "on_failure": [] + }, + "notification_settings": { + "no_alert_for_skipped_runs": true, + "no_alert_for_canceled_runs": true + }, + "timeout_seconds": 0, + "max_concurrent_runs": 1, + "tasks": [ + { + "task_key": "xxx", + "email_notifications": {}, + "notification_settings": {}, + "timeout_seconds": 0, + "max_retries": 0, + "min_retry_interval_millis": 0, + "retry_on_timeout": "true" + } + ] + } +} +` + +func TestJsonLoader(t *testing.T) { + v, err := LoadJSON([]byte(jsonData), "(inline)") + dynassert.NoError(t, err) + + var r jobs.ResetJob + err = convert.ToTyped(&r, v) + dynassert.NoError(t, err) +} + +const malformedMap = ` +{ + "job_id": 123, + "new_settings": { + "name": "xxx", + "wrong", + } +} +` + +func TestJsonLoaderMalformedMap(t *testing.T) { + _, err := LoadJSON([]byte(malformedMap), "(inline)") + dynassert.ErrorContains(t, err, "error decoding JSON at (inline):6:16: invalid character ',' after object key") +} + +const malformedArray = ` +{ + "job_id": 123, + "new_settings": { + "name": "xxx", + "tasks": [1, "asd",] + } +}` + +func TestJsonLoaderMalformedArray(t *testing.T) { + _, err := LoadJSON([]byte(malformedArray), "path/to/file.json") + dynassert.ErrorContains(t, err, "error decoding JSON at path/to/file.json:6:28: invalid character ']' looking for beginning of value") +} + +const eofData = ` +{ + "job_id": 123, + "new_settings": { + "name": "xxx",` + +func TestJsonLoaderEOF(t *testing.T) { + _, err := LoadJSON([]byte(eofData), "path/to/file.json") + dynassert.ErrorContains(t, err, "unexpected end of JSON input") +} diff --git a/libs/dyn/jsonloader/locations.go b/libs/dyn/jsonloader/locations.go new file mode 100644 index 000000000..a692c7d03 --- /dev/null +++ b/libs/dyn/jsonloader/locations.go @@ -0,0 +1,53 @@ +package jsonloader + +import ( + "sort" + + "github.com/databricks/cli/libs/dyn" +) + +type LineOffset struct { + Line int + Start int64 +} + +type Offset struct { + offsets []LineOffset + source string +} + +// buildLineOffsets scans the input data and records the starting byte offset of each line. +func BuildLineOffsets(data []byte) Offset { + offsets := []LineOffset{{Line: 1, Start: 0}} + line := 1 + for i, b := range data { + if b == '\n' { + line++ + offsets = append(offsets, LineOffset{Line: line, Start: int64(i + 1)}) + } + } + return Offset{offsets: offsets} +} + +// GetPosition maps a byte offset to its corresponding line and column numbers. +func (o Offset) GetPosition(offset int64) dyn.Location { + // Binary search to find the line + idx := sort.Search(len(o.offsets), func(i int) bool { + return o.offsets[i].Start > offset + }) - 1 + + if idx < 0 { + idx = 0 + } + + lineOffset := o.offsets[idx] + return dyn.Location{ + File: o.source, + Line: lineOffset.Line, + Column: int(offset-lineOffset.Start) + 1, + } +} + +func (o *Offset) SetSource(source string) { + o.source = source +} diff --git a/libs/flags/json_flag.go b/libs/flags/json_flag.go index 8dbc3b2d9..0ed8be74a 100644 --- a/libs/flags/json_flag.go +++ b/libs/flags/json_flag.go @@ -4,10 +4,16 @@ import ( "encoding/json" "fmt" "os" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/jsonloader" + "github.com/databricks/databricks-sdk-go/marshal" ) type JsonFlag struct { - raw []byte + raw []byte + source string } func (j *JsonFlag) String() string { @@ -19,21 +25,52 @@ func (j *JsonFlag) Set(v string) error { // Load request from file if it starts with '@' (like curl). if v[0] != '@' { j.raw = []byte(v) + j.source = "(inline)" return nil } - buf, err := os.ReadFile(v[1:]) + filePath := v[1:] + buf, err := os.ReadFile(filePath) + j.source = filePath if err != nil { - return fmt.Errorf("read %s: %w", v, err) + return fmt.Errorf("read %s: %w", filePath, err) } j.raw = buf return nil } -func (j *JsonFlag) Unmarshal(v any) error { +func (j *JsonFlag) Unmarshal(v any) diag.Diagnostics { if j.raw == nil { return nil } - return json.Unmarshal(j.raw, v) + + dv, err := jsonloader.LoadJSON(j.raw, j.source) + if err != nil { + return diag.FromErr(err) + } + + // First normalize the input data. + // It will convert all the values to the correct types. + // For example string literals for booleans and integers will be converted to the correct types. + nv, diags := convert.Normalize(v, dv) + if diags.HasError() { + return diags + } + + // Then marshal the normalized data to the output. + // It will serialize all set data with the correct types. + data, err := json.Marshal(nv.AsAny()) + if err != nil { + return diags.Extend(diag.FromErr(err)) + } + + // Finally unmarshal the normalized data to the output. + // It will fill in the ForceSendFields field if the struct contains it. + err = marshal.Unmarshal(data, v) + if err != nil { + return diags.Extend(diag.FromErr(err)) + } + + return diags } func (j *JsonFlag) Type() string { diff --git a/libs/flags/json_flag_test.go b/libs/flags/json_flag_test.go index 2a8170fe6..16769f4e1 100644 --- a/libs/flags/json_flag_test.go +++ b/libs/flags/json_flag_test.go @@ -6,18 +6,26 @@ import ( "path" "testing" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +type requestType struct { + Foo string `json:"foo"` +} + func TestJsonFlagEmpty(t *testing.T) { var body JsonFlag var request any - err := body.Unmarshal(&request) + diags := body.Unmarshal(&request) assert.Equal(t, "JSON (0 bytes)", body.String()) - assert.NoError(t, err) + assert.NoError(t, diags.Error()) + assert.Empty(t, diags) assert.Nil(t, request) } @@ -27,12 +35,13 @@ func TestJsonFlagInline(t *testing.T) { err := body.Set(`{"foo": "bar"}`) assert.NoError(t, err) - var request any - err = body.Unmarshal(&request) - assert.NoError(t, err) + var request requestType + diags := body.Unmarshal(&request) + assert.NoError(t, diags.Error()) + assert.Empty(t, diags) assert.Equal(t, "JSON (14 bytes)", body.String()) - assert.Equal(t, map[string]any{"foo": "bar"}, request) + assert.Equal(t, requestType{"bar"}, request) } func TestJsonFlagError(t *testing.T) { @@ -41,18 +50,18 @@ func TestJsonFlagError(t *testing.T) { err := body.Set(`{"foo":`) assert.NoError(t, err) - var request any - err = body.Unmarshal(&request) - assert.EqualError(t, err, "unexpected end of JSON input") + var request requestType + diags := body.Unmarshal(&request) + assert.EqualError(t, diags.Error(), "error decoding JSON at (inline):1:8: unexpected end of JSON input") assert.Equal(t, "JSON (7 bytes)", body.String()) } func TestJsonFlagFile(t *testing.T) { var body JsonFlag - var request any + var request requestType var fpath string - var payload = []byte(`"hello world"`) + var payload = []byte(`{"foo": "bar"}`) { f, err := os.Create(path.Join(t.TempDir(), "file")) @@ -65,8 +74,217 @@ func TestJsonFlagFile(t *testing.T) { err := body.Set(fmt.Sprintf("@%s", fpath)) require.NoError(t, err) - err = body.Unmarshal(&request) + diags := body.Unmarshal(&request) + assert.NoError(t, diags.Error()) + assert.Empty(t, diags) + + assert.Equal(t, requestType{"bar"}, request) +} + +const jsonData = ` +{ + "job_id": 123, + "new_settings": { + "name": "new job", + "email_notifications": { + "on_start": [], + "on_success": [], + "on_failure": [] + }, + "notification_settings": { + "no_alert_for_canceled_runs": false + }, + "timeout_seconds": 0, + "max_concurrent_runs": 1, + "tasks": [ + { + "task_key": "new task", + "email_notifications": {}, + "notification_settings": {}, + "timeout_seconds": 0, + "max_retries": 0, + "min_retry_interval_millis": 0, + "retry_on_timeout": "true" + } + ] + } +} +` + +func TestJsonUnmarshalForRequest(t *testing.T) { + var body JsonFlag + + var r jobs.ResetJob + err := body.Set(jsonData) require.NoError(t, err) - assert.Equal(t, "hello world", request) + diags := body.Unmarshal(&r) + assert.NoError(t, diags.Error()) + assert.Empty(t, diags) + + assert.Equal(t, int64(123), r.JobId) + assert.Equal(t, "new job", r.NewSettings.Name) + assert.Equal(t, 0, r.NewSettings.TimeoutSeconds) + assert.Equal(t, 1, r.NewSettings.MaxConcurrentRuns) + assert.Equal(t, 1, len(r.NewSettings.Tasks)) + assert.Equal(t, "new task", r.NewSettings.Tasks[0].TaskKey) + assert.Equal(t, 0, r.NewSettings.Tasks[0].TimeoutSeconds) + assert.Equal(t, 0, r.NewSettings.Tasks[0].MaxRetries) + assert.Equal(t, 0, r.NewSettings.Tasks[0].MinRetryIntervalMillis) + assert.Equal(t, true, r.NewSettings.Tasks[0].RetryOnTimeout) +} + +const incorrectJsonData = `{ + "job_id": 123, + "settings": { + "name": "new job", + "email_notifications": { + "on_start": [], + "on_success": [], + "on_failure": [] + }, + "notification_settings": { + "no_alert_for_skipped_runs": true, + "no_alert_for_canceled_runs": true + }, + "timeout_seconds": {}, + "max_concurrent_runs": {}, + "tasks": [ + { + "task_key": "new task", + "email_notifications": {}, + "notification_settings": {}, + "timeout_seconds": 0, + "max_retries": 0, + "min_retry_interval_millis": 0, + "retry_on_timeout": "true" + } + ] + } +} +` + +func TestJsonUnmarshalRequestMismatch(t *testing.T) { + var body JsonFlag + + var r jobs.ResetJob + err := body.Set(incorrectJsonData) + require.NoError(t, err) + + diags := body.Unmarshal(&r) + assert.NoError(t, diags.Error()) + assert.NotEmpty(t, diags) + + assert.Contains(t, diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "unknown field: settings", + Locations: []dyn.Location{ + { + File: "(inline)", + Line: 3, + Column: 6, + }, + }, + Paths: []dyn.Path{{}}, + }) +} + +const wrontTypeJsonData = `{ + "job_id": 123, + "new_settings": { + "name": "new job", + "email_notifications": { + "on_start": [], + "on_success": [], + "on_failure": [] + }, + "notification_settings": { + "no_alert_for_skipped_runs": true, + "no_alert_for_canceled_runs": true + }, + "timeout_seconds": "wrong_type", + "max_concurrent_runs": {}, + "tasks": [ + { + "task_key": "new task", + "email_notifications": {}, + "notification_settings": {}, + "timeout_seconds": 0, + "max_retries": 0, + "min_retry_interval_millis": 0, + "retry_on_timeout": "true" + } + ] + } +} +` + +func TestJsonUnmarshalWrongTypeReportsCorrectLocation(t *testing.T) { + var body JsonFlag + + var r jobs.ResetJob + err := body.Set(`{ + "job_id": [1, 2, 3] +} +`) + require.NoError(t, err) + + diags := body.Unmarshal(&r) + assert.NoError(t, diags.Error()) + assert.NotEmpty(t, diags) + + assert.Contains(t, diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "expected int, found sequence", + Locations: []dyn.Location{ + { + File: "(inline)", + Line: 2, + Column: 15, + }, + }, + Paths: []dyn.Path{dyn.NewPath(dyn.Key("job_id"))}, + }) +} + +func TestJsonUnmarshalArrayInsteadOfIntReportsCorrectLocation(t *testing.T) { + var body JsonFlag + + var r jobs.ResetJob + err := body.Set(wrontTypeJsonData) + require.NoError(t, err) + + diags := body.Unmarshal(&r) + assert.NoError(t, diags.Error()) + assert.NotEmpty(t, diags) + + assert.Contains(t, diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "cannot parse \"wrong_type\" as an integer", + Locations: []dyn.Location{ + { + File: "(inline)", + Line: 14, + Column: 40, + }, + }, + Paths: []dyn.Path{dyn.NewPath(dyn.Key("new_settings"), dyn.Key("timeout_seconds"))}, + }) +} + +func TestJsonUnmarshalForRequestWithForceSendFields(t *testing.T) { + var body JsonFlag + + var r jobs.ResetJob + err := body.Set(jsonData) + require.NoError(t, err) + + diags := body.Unmarshal(&r) + assert.NoError(t, diags.Error()) + assert.Empty(t, diags) + + assert.Equal(t, false, r.NewSettings.NotificationSettings.NoAlertForSkippedRuns) + assert.Equal(t, false, r.NewSettings.NotificationSettings.NoAlertForCanceledRuns) + assert.NotContains(t, r.NewSettings.NotificationSettings.ForceSendFields, "NoAlertForSkippedRuns") + assert.Contains(t, r.NewSettings.NotificationSettings.ForceSendFields, "NoAlertForCanceledRuns") } From ab206242060a05bd7a3b5073cc3a6869973446f3 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 14 Oct 2024 21:49:48 +0530 Subject: [PATCH 06/25] Assert SDK version is consistent in the CLI generation process (#1814) ## Changes Followup from https://github.com/databricks/cli/pull/1809#discussion_r1790045086. User will see the following error if the SDK version does not match during CLI code generation. ``` --- FAIL: TestConsistentDatabricksSdkVersion (1.34s) info_test.go:53: Error Trace: /Users/shreyas.goenka/cli/internal/build/info_test.go:53 Error: Not equal: expected: "0c86ea6dbd9a730c24ff0d4e509603e476955ac5" actual : "6f6b1371e640f2dfeba72d365ac566368656f6b6" Diff: --- Expected +++ Actual @@ -1 +1 @@ -0c86ea6dbd9a730c24ff0d4e509603e476955ac5 +6f6b1371e640f2dfeba72d365ac566368656f6b6 Test: TestConsistentDatabricksSdkVersion Messages: please update the SDK version before generating the CLI ``` ## Tests Manually asserted that: 1. Generating the CLI without the SDK bump causes an error. 2. Generating the CLI after the SDK bump works as expected. 3. The test works even when the SDK is pinned to a specific commit like `v0.47.1-0.20241002195128-6cecc224cbf7` --- .codegen.json | 1 + internal/build/sdk_consistency_test.go | 73 ++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) create mode 100644 internal/build/sdk_consistency_test.go diff --git a/.codegen.json b/.codegen.json index 077d072bb..4524ab55d 100644 --- a/.codegen.json +++ b/.codegen.json @@ -11,6 +11,7 @@ "toolchain": { "required": ["go"], "post_generate": [ + "go test -timeout 240s -run TestConsistentDatabricksSdkVersion github.com/databricks/cli/internal/build", "go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json", "echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes", "echo 'go.sum linguist-generated=true' >> ./.gitattributes", diff --git a/internal/build/sdk_consistency_test.go b/internal/build/sdk_consistency_test.go new file mode 100644 index 000000000..37f60250c --- /dev/null +++ b/internal/build/sdk_consistency_test.go @@ -0,0 +1,73 @@ +package build + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/mod/modfile" +) + +// This test ensures that the OpenAPI SHA the CLI is being generated from matches +// the OpenAPI SHA of the Go SDK version used in the CLI. We should always upgrade +// the Go SDK version before generating the CLI because downstream generated assets +// like the bundle schema depend on the Go SDK itself. +func TestConsistentDatabricksSdkVersion(t *testing.T) { + // Read the go.mod file + b, err := os.ReadFile("../../go.mod") + require.NoError(t, err) + + // Parse the go.mod file to get the databricks-sdk version + modFile, err := modfile.Parse("../../go.mod", b, nil) + require.NoError(t, err) + + modulePath := "github.com/databricks/databricks-sdk-go" + var version string + for _, r := range modFile.Require { + if r.Mod.Path == modulePath { + version = r.Mod.Version + } + } + require.NotEmpty(t, version) + + // Full path of the package. For example: github.com/databricks/databricks-sdk-go@v0.47.1-0.20241002195128-6cecc224cbf7 + fullPath := fmt.Sprintf("%s@%s", modulePath, version) + + type goListResponse struct { + Origin struct { + Hash string + } + } + + // Using the go CLI query for the git hash corresponding to the databricks-sdk-go version + cmd := exec.Command("go", "list", "-m", "-json", "-mod=readonly", fullPath) + out, err := cmd.Output() + require.NoError(t, err) + parsedOutput := new(goListResponse) + err = json.Unmarshal(out, parsedOutput) + require.NoError(t, err) + hash := parsedOutput.Origin.Hash + require.NotEmpty(t, hash) + + // Read the OpenAPI SHA from the Go SDK. + url := fmt.Sprintf("https://raw.githubusercontent.com/databricks/databricks-sdk-go/%s/.codegen/_openapi_sha", hash) + resp, err := http.Get(url) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode) + + sdkSha, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + cliSha, err := os.ReadFile("../../.codegen/_openapi_sha") + require.NoError(t, err) + + assert.Equal(t, strings.TrimSpace(string(cliSha)), strings.TrimSpace(string(sdkSha)), "please update the SDK version before generating the CLI") +} From 0753dfe2f488c834724e2bfdf1618ebe8db271ab Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 15 Oct 2024 14:10:02 +0200 Subject: [PATCH 07/25] Fixed unmarshalling json input into `interface{}` type (#1832) ## Changes Fixed unmarshalling json input into `interface{}` type Commands like `api post` support free form request input, so it should be unmarshaled correctly ## Tests Added regression test + E2E test pass --- libs/flags/json_flag.go | 24 +++++++++++++++++++----- libs/flags/json_flag_test.go | 14 +++++--------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/libs/flags/json_flag.go b/libs/flags/json_flag.go index 0ed8be74a..0324f1145 100644 --- a/libs/flags/json_flag.go +++ b/libs/flags/json_flag.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "reflect" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn/convert" @@ -63,11 +64,24 @@ func (j *JsonFlag) Unmarshal(v any) diag.Diagnostics { return diags.Extend(diag.FromErr(err)) } - // Finally unmarshal the normalized data to the output. - // It will fill in the ForceSendFields field if the struct contains it. - err = marshal.Unmarshal(data, v) - if err != nil { - return diags.Extend(diag.FromErr(err)) + kind := reflect.ValueOf(v).Kind() + if kind == reflect.Ptr { + kind = reflect.ValueOf(v).Elem().Kind() + } + + if kind == reflect.Struct { + // Finally unmarshal the normalized data to the output. + // It will fill in the ForceSendFields field if the struct contains it. + err = marshal.Unmarshal(data, v) + if err != nil { + return diags.Extend(diag.FromErr(err)) + } + } else { + // If the output is not a struct, just unmarshal the data to the output. + err = json.Unmarshal(data, v) + if err != nil { + return diags.Extend(diag.FromErr(err)) + } } return diags diff --git a/libs/flags/json_flag_test.go b/libs/flags/json_flag_test.go index 16769f4e1..77530086a 100644 --- a/libs/flags/json_flag_test.go +++ b/libs/flags/json_flag_test.go @@ -13,10 +13,6 @@ import ( "github.com/stretchr/testify/require" ) -type requestType struct { - Foo string `json:"foo"` -} - func TestJsonFlagEmpty(t *testing.T) { var body JsonFlag @@ -35,13 +31,13 @@ func TestJsonFlagInline(t *testing.T) { err := body.Set(`{"foo": "bar"}`) assert.NoError(t, err) - var request requestType + var request any diags := body.Unmarshal(&request) assert.NoError(t, diags.Error()) assert.Empty(t, diags) assert.Equal(t, "JSON (14 bytes)", body.String()) - assert.Equal(t, requestType{"bar"}, request) + assert.Equal(t, map[string]any{"foo": "bar"}, request) } func TestJsonFlagError(t *testing.T) { @@ -50,7 +46,7 @@ func TestJsonFlagError(t *testing.T) { err := body.Set(`{"foo":`) assert.NoError(t, err) - var request requestType + var request any diags := body.Unmarshal(&request) assert.EqualError(t, diags.Error(), "error decoding JSON at (inline):1:8: unexpected end of JSON input") assert.Equal(t, "JSON (7 bytes)", body.String()) @@ -58,7 +54,7 @@ func TestJsonFlagError(t *testing.T) { func TestJsonFlagFile(t *testing.T) { var body JsonFlag - var request requestType + var request any var fpath string var payload = []byte(`{"foo": "bar"}`) @@ -78,7 +74,7 @@ func TestJsonFlagFile(t *testing.T) { assert.NoError(t, diags.Error()) assert.Empty(t, diags) - assert.Equal(t, requestType{"bar"}, request) + assert.Equal(t, map[string]any{"foo": "bar"}, request) } const jsonData = ` From cc112961ce8aa3ced65635d5174aa38d5a27be8a Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 16 Oct 2024 18:20:17 +0530 Subject: [PATCH 08/25] Fix `TestAccFsMkdirWhenFileExistsAtPath` in isolated Azure environments (#1833) ## Changes This test passes on normal `azure-prod` but started to fail on `azure-prod-is`, which is the isolated version of azure-prod. This PR patches the test to include the error returned from the cloud setup in `azure-prod-is`. ## Tests The test passes now on `azure-prod-is`. --- internal/fs_mkdir_test.go | 4 ++-- internal/helpers.go | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/internal/fs_mkdir_test.go b/internal/fs_mkdir_test.go index dd75c7c32..9191f6143 100644 --- a/internal/fs_mkdir_test.go +++ b/internal/fs_mkdir_test.go @@ -112,8 +112,8 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { // assert mkdir fails _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) - // Different cloud providers return different errors. - regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$`) + // Different cloud providers or cloud configurations return different errors. + regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$|(^|: )"The specified path already exists.".*$`) assert.Regexp(t, regex, err.Error()) }) diff --git a/internal/helpers.go b/internal/helpers.go index 9387706bb..3bf387757 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -20,6 +20,7 @@ import ( "time" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/cmd" @@ -591,13 +592,10 @@ func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) { } func setupDbfsFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + _, wt := acc.WorkspaceTest(t) - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) - f, err := filer.NewDbfsClient(w, tmpDir) + tmpDir := TemporaryDbfsDir(t, wt.W) + f, err := filer.NewDbfsClient(wt.W, tmpDir) require.NoError(t, err) return f, path.Join("dbfs:/", tmpDir) From e4d039a1aa6fb7b29dd029a58aeae9b137fdb179 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 17 Oct 2024 12:00:40 +0200 Subject: [PATCH 09/25] Handle normalization of `dyn.KindTime` into an any type (#1836) ## Changes The issue reported in #1828 illustrates how using a YAML timestamp-like value (a date in this case) causes an issue during conversion to and from the typed configuration tree. We use the `AsAny()` function on the `dyn.Value` when normalizing for the `any` type. We only use the `any` type for variable values, because they can assume every type. The `AsAny()` function returns a `time.Time` for the time value during conversion **to** the typed configuration tree. Upon conversion **from** the typed configuration tree back into the dynamic configuration tree, we cannot distinguish a `time.Time` struct from any other struct. To address this, we use the underlying string value of the time value when we normalize for the `any` type. Fixes #1828. ## Tests Existing unit tests pass --- bundle/tests/issue_1828/databricks.yml | 33 +++++++++++++++++ bundle/tests/issue_1828_test.go | 48 +++++++++++++++++++++++++ libs/dyn/convert/normalize.go | 30 +++++++++++++++- libs/dyn/convert/normalize_test.go | 50 +++++++++++++++++--------- 4 files changed, 143 insertions(+), 18 deletions(-) create mode 100644 bundle/tests/issue_1828/databricks.yml create mode 100644 bundle/tests/issue_1828_test.go diff --git a/bundle/tests/issue_1828/databricks.yml b/bundle/tests/issue_1828/databricks.yml new file mode 100644 index 000000000..d5f60ce73 --- /dev/null +++ b/bundle/tests/issue_1828/databricks.yml @@ -0,0 +1,33 @@ +bundle: + name: issue_1828 + +variables: + # One entry for each of the underlying YAML (or [dyn.Kind]) types. + # The test confirms we can convert to and from the typed configuration without losing information. + + map: + default: + foo: bar + + sequence: + default: + - foo + - bar + + string: + default: foo + + bool: + default: true + + int: + default: 42 + + float: + default: 3.14 + + time: + default: 2021-01-01 + + nil: + default: diff --git a/bundle/tests/issue_1828_test.go b/bundle/tests/issue_1828_test.go new file mode 100644 index 000000000..5f2becce5 --- /dev/null +++ b/bundle/tests/issue_1828_test.go @@ -0,0 +1,48 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIssue1828(t *testing.T) { + b := load(t, "./issue_1828") + + if assert.Contains(t, b.Config.Variables, "map") { + assert.Equal(t, map[string]any{ + "foo": "bar", + }, b.Config.Variables["map"].Default) + } + + if assert.Contains(t, b.Config.Variables, "sequence") { + assert.Equal(t, []any{ + "foo", + "bar", + }, b.Config.Variables["sequence"].Default) + } + + if assert.Contains(t, b.Config.Variables, "string") { + assert.Equal(t, "foo", b.Config.Variables["string"].Default) + } + + if assert.Contains(t, b.Config.Variables, "bool") { + assert.Equal(t, true, b.Config.Variables["bool"].Default) + } + + if assert.Contains(t, b.Config.Variables, "int") { + assert.Equal(t, 42, b.Config.Variables["int"].Default) + } + + if assert.Contains(t, b.Config.Variables, "float") { + assert.Equal(t, 3.14, b.Config.Variables["float"].Default) + } + + if assert.Contains(t, b.Config.Variables, "time") { + assert.Equal(t, "2021-01-01", b.Config.Variables["time"].Default) + } + + if assert.Contains(t, b.Config.Variables, "nil") { + assert.Equal(t, nil, b.Config.Variables["nil"].Default) + } +} diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index bc80a150a..106add35d 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -398,6 +398,34 @@ func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value, path d return dyn.NewValue(out, src.Locations()), diags } -func (n normalizeOptions) normalizeInterface(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeInterface(_ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { + // Deal with every [dyn.Kind] here to ensure completeness. + switch src.Kind() { + case dyn.KindMap: + // Fall through + case dyn.KindSequence: + // Fall through + case dyn.KindString: + // Fall through + case dyn.KindBool: + // Fall through + case dyn.KindInt: + // Fall through + case dyn.KindFloat: + // Fall through + case dyn.KindTime: + // Conversion of a time value to an interface{}. + // The [dyn.Value.AsAny] equivalent for this kind is the [time.Time] struct. + // If we convert to a typed representation and back again, we cannot distinguish + // a [time.Time] struct from any other struct. + // + // Therefore, we normalize the time value to a string. + return dyn.NewValue(src.MustTime().String(), src.Locations()), nil + case dyn.KindNil: + // Fall through + default: + return dyn.InvalidValue, diag.Errorf("unsupported kind: %s", src.Kind()) + } + return src, nil } diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 4b2a3c189..ab0a1cec1 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -858,23 +858,7 @@ func TestNormalizeAnchors(t *testing.T) { }, vout.AsAny()) } -func TestNormalizeBoolToAny(t *testing.T) { - var typ any - vin := dyn.NewValue(false, []dyn.Location{{File: "file", Line: 1, Column: 1}}) - vout, err := Normalize(&typ, vin) - assert.Len(t, err, 0) - assert.Equal(t, dyn.NewValue(false, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) -} - -func TestNormalizeIntToAny(t *testing.T) { - var typ any - vin := dyn.NewValue(10, []dyn.Location{{File: "file", Line: 1, Column: 1}}) - vout, err := Normalize(&typ, vin) - assert.Len(t, err, 0) - assert.Equal(t, dyn.NewValue(10, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) -} - -func TestNormalizeSliceToAny(t *testing.T) { +func TestNormalizeAnyFromSlice(t *testing.T) { var typ any v1 := dyn.NewValue(1, []dyn.Location{{File: "file", Line: 1, Column: 1}}) v2 := dyn.NewValue(2, []dyn.Location{{File: "file", Line: 1, Column: 1}}) @@ -883,3 +867,35 @@ func TestNormalizeSliceToAny(t *testing.T) { assert.Len(t, err, 0) assert.Equal(t, dyn.NewValue([]dyn.Value{v1, v2}, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) } + +func TestNormalizeAnyFromString(t *testing.T) { + var typ any + vin := dyn.NewValue("string", []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue("string", []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) +} + +func TestNormalizeAnyFromBool(t *testing.T) { + var typ any + vin := dyn.NewValue(false, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue(false, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) +} + +func TestNormalizeAnyFromInt(t *testing.T) { + var typ any + vin := dyn.NewValue(10, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue(10, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) +} + +func TestNormalizeAnyFromTime(t *testing.T) { + var typ any + vin := dyn.NewValue(dyn.MustTime("2024-08-29"), []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, dyn.NewValue("2024-08-29", vin.Locations()), vout) +} From dedec58e419463f71b6e6a727d0e7ba20572b3d2 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 17 Oct 2024 15:13:30 +0200 Subject: [PATCH 10/25] Add behavioral tests for examples from the YAML spec (#1835) ## Changes I took the examples from https://yaml.org/spec/1.2.2. The required modifications to the loader are: * Correctly parse floating point infinities and NaN * Correctly parse octal numbers per the YAML 1.2 spec * Treat "null" keys in a map as valid ## Tests Existing and new unit tests pass. --- libs/dyn/dynassert/dump.go | 60 ++ libs/dyn/yamlloader/loader.go | 60 +- .../yamlloader/testdata/spec_example_2.1.yml | 5 + .../yamlloader/testdata/spec_example_2.10.yml | 10 + .../yamlloader/testdata/spec_example_2.11.yml | 10 + .../yamlloader/testdata/spec_example_2.12.yml | 10 + .../yamlloader/testdata/spec_example_2.13.yml | 6 + .../yamlloader/testdata/spec_example_2.14.yml | 6 + .../yamlloader/testdata/spec_example_2.15.yml | 10 + .../yamlloader/testdata/spec_example_2.16.yml | 9 + .../yamlloader/testdata/spec_example_2.17.yml | 9 + .../yamlloader/testdata/spec_example_2.18.yml | 8 + .../yamlloader/testdata/spec_example_2.19.yml | 15 + .../yamlloader/testdata/spec_example_2.2.yml | 5 + .../yamlloader/testdata/spec_example_2.20.yml | 7 + .../yamlloader/testdata/spec_example_2.21.yml | 5 + .../yamlloader/testdata/spec_example_2.22.yml | 6 + .../yamlloader/testdata/spec_example_2.23.yml | 15 + .../yamlloader/testdata/spec_example_2.24.yml | 16 + .../yamlloader/testdata/spec_example_2.25.yml | 9 + .../yamlloader/testdata/spec_example_2.26.yml | 9 + .../yamlloader/testdata/spec_example_2.27.yml | 31 + .../yamlloader/testdata/spec_example_2.28.yml | 28 + .../yamlloader/testdata/spec_example_2.3.yml | 10 + .../yamlloader/testdata/spec_example_2.4.yml | 10 + .../yamlloader/testdata/spec_example_2.5.yml | 5 + .../yamlloader/testdata/spec_example_2.6.yml | 7 + .../yamlloader/testdata/spec_example_2.7.yml | 12 + .../yamlloader/testdata/spec_example_2.8.yml | 12 + .../yamlloader/testdata/spec_example_2.9.yml | 10 + libs/dyn/yamlloader/yaml_spec_test.go | 821 ++++++++++++++++++ 31 files changed, 1225 insertions(+), 11 deletions(-) create mode 100644 libs/dyn/dynassert/dump.go create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.1.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.10.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.11.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.12.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.13.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.14.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.15.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.16.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.17.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.18.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.19.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.2.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.20.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.21.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.22.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.23.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.24.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.25.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.26.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.27.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.28.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.3.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.4.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.5.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.6.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.7.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.8.yml create mode 100644 libs/dyn/yamlloader/testdata/spec_example_2.9.yml create mode 100644 libs/dyn/yamlloader/yaml_spec_test.go diff --git a/libs/dyn/dynassert/dump.go b/libs/dyn/dynassert/dump.go new file mode 100644 index 000000000..be0c72425 --- /dev/null +++ b/libs/dyn/dynassert/dump.go @@ -0,0 +1,60 @@ +package dynassert + +import ( + "fmt" + "strings" + + "github.com/databricks/cli/libs/dyn" +) + +// Dump returns the Go code to recreate the given value. +func Dump(v dyn.Value) string { + var sb strings.Builder + dump(v, &sb) + return sb.String() +} + +func dump(v dyn.Value, sb *strings.Builder) { + sb.WriteString("dyn.NewValue(\n") + + switch v.Kind() { + case dyn.KindMap: + sb.WriteString("map[string]dyn.Value{") + m := v.MustMap() + for _, p := range m.Pairs() { + sb.WriteString(fmt.Sprintf("\n%q: ", p.Key.MustString())) + dump(p.Value, sb) + sb.WriteByte(',') + } + sb.WriteString("\n},\n") + case dyn.KindSequence: + sb.WriteString("[]dyn.Value{\n") + for _, e := range v.MustSequence() { + dump(e, sb) + sb.WriteByte(',') + } + sb.WriteString("},\n") + case dyn.KindString: + sb.WriteString(fmt.Sprintf("%q,\n", v.MustString())) + case dyn.KindBool: + sb.WriteString(fmt.Sprintf("%t,\n", v.MustBool())) + case dyn.KindInt: + sb.WriteString(fmt.Sprintf("%d,\n", v.MustInt())) + case dyn.KindFloat: + sb.WriteString(fmt.Sprintf("%f,\n", v.MustFloat())) + case dyn.KindTime: + sb.WriteString(fmt.Sprintf("dyn.NewTime(%q),\n", v.MustTime().String())) + case dyn.KindNil: + sb.WriteString("nil,\n") + default: + panic(fmt.Sprintf("unhandled kind: %v", v.Kind())) + } + + // Add location + sb.WriteString("[]dyn.Location{") + for _, l := range v.Locations() { + sb.WriteString(fmt.Sprintf("{File: %q, Line: %d, Column: %d},", l.File, l.Line, l.Column)) + } + sb.WriteString("},\n") + sb.WriteString(")") +} diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index c3e8d0810..b4aaf0a74 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -105,6 +105,9 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro switch st { case "!!str": // OK + case "!!null": + // A literal unquoted "null" is treated as a null value by the YAML parser. + // However, when used as a key, it is treated as the string "null". case "!!merge": if merge != nil { panic("merge node already set") @@ -115,10 +118,11 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro return dyn.InvalidValue, errorf(loc, "invalid key tag: %v", st) } - k, err := d.load(key) - if err != nil { - return dyn.InvalidValue, err - } + k := dyn.NewValue(key.Value, []dyn.Location{{ + File: d.path, + Line: key.Line, + Column: key.Column, + }}) v, err := d.load(val) if err != nil { @@ -173,6 +177,14 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro return dyn.NewValue(out, []dyn.Location{loc}), nil } +func newIntValue(i64 int64, loc dyn.Location) dyn.Value { + // Use regular int type instead of int64 if possible. + if i64 >= math.MinInt32 && i64 <= math.MaxInt32 { + return dyn.NewValue(int(i64), []dyn.Location{loc}) + } + return dyn.NewValue(i64, []dyn.Location{loc}) +} + func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { st := node.ShortTag() switch st { @@ -188,18 +200,44 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error return dyn.InvalidValue, errorf(loc, "invalid bool value: %v", node.Value) } case "!!int": - i64, err := strconv.ParseInt(node.Value, 10, 64) - if err != nil { - return dyn.InvalidValue, errorf(loc, "invalid int value: %v", node.Value) + // Try to parse the an integer value in base 10. + // We trim leading zeros to avoid octal parsing of the "0" prefix. + // See "testdata/spec_example_2.19.yml" for background. + i64, err := strconv.ParseInt(strings.TrimLeft(node.Value, "0"), 10, 64) + if err == nil { + return newIntValue(i64, loc), nil } - // Use regular int type instead of int64 if possible. - if i64 >= math.MinInt32 && i64 <= math.MaxInt32 { - return dyn.NewValue(int(i64), []dyn.Location{loc}), nil + // Let the [ParseInt] function figure out the base. + i64, err = strconv.ParseInt(node.Value, 0, 64) + if err == nil { + return newIntValue(i64, loc), nil } - return dyn.NewValue(i64, []dyn.Location{loc}), nil + return dyn.InvalidValue, errorf(loc, "invalid int value: %v", node.Value) case "!!float": f64, err := strconv.ParseFloat(node.Value, 64) if err != nil { + // Deal with infinity prefixes. + v := strings.ToLower(node.Value) + switch { + case strings.HasPrefix(v, "+"): + v = strings.TrimPrefix(v, "+") + f64 = math.Inf(1) + case strings.HasPrefix(v, "-"): + v = strings.TrimPrefix(v, "-") + f64 = math.Inf(-1) + default: + // No prefix. + f64 = math.Inf(1) + } + + // Deal with infinity and NaN values. + switch v { + case ".inf": + return dyn.NewValue(f64, []dyn.Location{loc}), nil + case ".nan": + return dyn.NewValue(math.NaN(), []dyn.Location{loc}), nil + } + return dyn.InvalidValue, errorf(loc, "invalid float value: %v", node.Value) } return dyn.NewValue(f64, []dyn.Location{loc}), nil diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.1.yml b/libs/dyn/yamlloader/testdata/spec_example_2.1.yml new file mode 100644 index 000000000..c9e26274c --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.1.yml @@ -0,0 +1,5 @@ +# Example 2.1 Sequence of Scalars (ball players) + +- Mark McGwire +- Sammy Sosa +- Ken Griffey diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.10.yml b/libs/dyn/yamlloader/testdata/spec_example_2.10.yml new file mode 100644 index 000000000..a3459dedf --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.10.yml @@ -0,0 +1,10 @@ +# Example 2.10 Node for “Sammy Sosa” appears twice in this document + +--- +hr: +- Mark McGwire +# Following node labeled SS +- &SS Sammy Sosa +rbi: +- *SS # Subsequent occurrence +- Ken Griffey diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.11.yml b/libs/dyn/yamlloader/testdata/spec_example_2.11.yml new file mode 100644 index 000000000..e3c5c115e --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.11.yml @@ -0,0 +1,10 @@ +# Example 2.11 Mapping between Sequences + +? - Detroit Tigers + - Chicago cubs +: - 2001-07-23 + +? [ New York Yankees, + Atlanta Braves ] +: [ 2001-07-02, 2001-08-12, + 2001-08-14 ] diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.12.yml b/libs/dyn/yamlloader/testdata/spec_example_2.12.yml new file mode 100644 index 000000000..eb4a526f6 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.12.yml @@ -0,0 +1,10 @@ +# Example 2.12 Compact Nested Mapping + +--- +# Products purchased +- item : Super Hoop + quantity: 1 +- item : Basketball + quantity: 4 +- item : Big Shoes + quantity: 1 diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.13.yml b/libs/dyn/yamlloader/testdata/spec_example_2.13.yml new file mode 100644 index 000000000..e55abff10 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.13.yml @@ -0,0 +1,6 @@ +# Example 2.13 In literals, newlines are preserved + +# ASCII Art +--- | + \//||\/|| + // || ||__ diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.14.yml b/libs/dyn/yamlloader/testdata/spec_example_2.14.yml new file mode 100644 index 000000000..439fca30c --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.14.yml @@ -0,0 +1,6 @@ +# Example 2.14 In the folded scalars, newlines become spaces + +--- > + Mark McGwire's + year was crippled + by a knee injury. diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.15.yml b/libs/dyn/yamlloader/testdata/spec_example_2.15.yml new file mode 100644 index 000000000..266e7ce47 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.15.yml @@ -0,0 +1,10 @@ +# Example 2.15 Folded newlines are preserved for “more indented” and blank lines + +--- > + Sammy Sosa completed another + fine season with great stats. + + 63 Home Runs + 0.288 Batting Average + + What a year! diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.16.yml b/libs/dyn/yamlloader/testdata/spec_example_2.16.yml new file mode 100644 index 000000000..6db6b0878 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.16.yml @@ -0,0 +1,9 @@ +# Example 2.16 Indentation determines scope + +name: Mark McGwire +accomplishment: > + Mark set a major league + home run record in 1998. +stats: | + 65 Home Runs + 0.278 Batting Average diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.17.yml b/libs/dyn/yamlloader/testdata/spec_example_2.17.yml new file mode 100644 index 000000000..af0777aba --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.17.yml @@ -0,0 +1,9 @@ +# Example 2.17 Quoted Scalars + +unicode: "Sosa did fine.\u263A" +control: "\b1998\t1999\t2000\n" +hex esc: "\x0d\x0a is \r\n" + +single: '"Howdy!" he cried.' +quoted: ' # Not a ''comment''.' +tie-fighter: '|\-*-/|' diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.18.yml b/libs/dyn/yamlloader/testdata/spec_example_2.18.yml new file mode 100644 index 000000000..741bcd8c0 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.18.yml @@ -0,0 +1,8 @@ +# Example 2.18 Multi-line Flow Scalars + +plain: + This unquoted scalar + spans many lines. + +quoted: "So does this + quoted scalar.\n" diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.19.yml b/libs/dyn/yamlloader/testdata/spec_example_2.19.yml new file mode 100644 index 000000000..6ed95e092 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.19.yml @@ -0,0 +1,15 @@ +# Example 2.19 Integers + +canonical: 12345 +decimal: +12345 +octal: 0o14 +hexadecimal: 0xC + +# Note: this example is not part of the spec but added for completeness. +# +# Octal numbers: +# - YAML 1.1: prefix is "0" +# - YAML 1.2: prefix is "0o" +# The "gopkg.in/yaml.v3" package accepts both for backwards compat. +# We accept only the YAML 1.2 prefix "0o". +octal11: 012345 diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.2.yml b/libs/dyn/yamlloader/testdata/spec_example_2.2.yml new file mode 100644 index 000000000..29c16105e --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.2.yml @@ -0,0 +1,5 @@ +# Example 2.2 Mapping Scalars to Scalars (player statistics) + +hr: 65 # Home runs +avg: 0.278 # Batting average +rbi: 147 # Runs Batted In diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.20.yml b/libs/dyn/yamlloader/testdata/spec_example_2.20.yml new file mode 100644 index 000000000..77a79a0c3 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.20.yml @@ -0,0 +1,7 @@ +# Example 2.20 Floating Point + +canonical: 1.23015e+3 +exponential: 12.3015e+02 +fixed: 1230.15 +negative infinity: -.inf +not a number: .nan diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.21.yml b/libs/dyn/yamlloader/testdata/spec_example_2.21.yml new file mode 100644 index 000000000..cdb423c56 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.21.yml @@ -0,0 +1,5 @@ +# Example 2.21 Miscellaneous + +null: +booleans: [ true, false ] +string: '012345' diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.22.yml b/libs/dyn/yamlloader/testdata/spec_example_2.22.yml new file mode 100644 index 000000000..bef2addfa --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.22.yml @@ -0,0 +1,6 @@ +# Example 2.22 Timestamps + +canonical: 2001-12-15T02:59:43.1Z +iso8601: 2001-12-14t21:59:43.10-05:00 +spaced: 2001-12-14 21:59:43.10 -5 +date: 2002-12-14 diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.23.yml b/libs/dyn/yamlloader/testdata/spec_example_2.23.yml new file mode 100644 index 000000000..56e9898e2 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.23.yml @@ -0,0 +1,15 @@ +# Example 2.23 Various Explicit Tags + +--- +not-date: !!str 2002-04-28 + +picture: !!binary | + R0lGODlhDAAMAIQAAP//9/X + 17unp5WZmZgAAAOfn515eXv + Pz7Y6OjuDg4J+fn5OTk6enp + 56enmleECcgggoBADs= + +application specific tag: !something | + The semantics of the tag + above may be different for + different documents. diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.24.yml b/libs/dyn/yamlloader/testdata/spec_example_2.24.yml new file mode 100644 index 000000000..f7c11d0fb --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.24.yml @@ -0,0 +1,16 @@ +# Example 2.24 Global Tags + +%TAG ! tag:clarkevans.com,2002: +--- !shape + # Use the ! handle for presenting + # tag:clarkevans.com,2002:circle +- !circle + center: &ORIGIN {x: 73, y: 129} + radius: 7 +- !line + start: *ORIGIN + finish: { x: 89, y: 102 } +- !label + start: *ORIGIN + color: 0xFFEEBB + text: Pretty vector drawing. diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.25.yml b/libs/dyn/yamlloader/testdata/spec_example_2.25.yml new file mode 100644 index 000000000..73bac8627 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.25.yml @@ -0,0 +1,9 @@ +# Example 2.25 Unordered Sets + +# Sets are represented as a +# Mapping where each key is +# associated with a null value +--- !!set +? Mark McGwire +? Sammy Sosa +? Ken Griffey diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.26.yml b/libs/dyn/yamlloader/testdata/spec_example_2.26.yml new file mode 100644 index 000000000..00863a6b8 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.26.yml @@ -0,0 +1,9 @@ +# Example 2.26 Ordered Mappings + +# Ordered maps are represented as +# A sequence of mappings, with +# each mapping having one key +--- !!omap +- Mark McGwire: 65 +- Sammy Sosa: 63 +- Ken Griffey: 58 diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.27.yml b/libs/dyn/yamlloader/testdata/spec_example_2.27.yml new file mode 100644 index 000000000..fc9b460c2 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.27.yml @@ -0,0 +1,31 @@ +# Example 2.27 Invoice + +--- ! +invoice: 34843 +date : 2001-01-23 +bill-to: &id001 + given : Chris + family : Dumars + address: + lines: | + 458 Walkman Dr. + Suite #292 + city : Royal Oak + state : MI + postal : 48046 +ship-to: *id001 +product: +- sku : BL394D + quantity : 4 + description : Basketball + price : 450.00 +- sku : BL4438H + quantity : 1 + description : Super Hoop + price : 2392.00 +tax : 251.42 +total: 4443.52 +comments: + Late afternoon is best. + Backup contact is Nancy + Billsmer @ 338-4338. diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.28.yml b/libs/dyn/yamlloader/testdata/spec_example_2.28.yml new file mode 100644 index 000000000..353694729 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.28.yml @@ -0,0 +1,28 @@ +# Example 2.28 Log File + +--- +Time: 2001-11-23 15:01:42 -5 +User: ed +Warning: + This is an error message + for the log file +--- +Time: 2001-11-23 15:02:31 -5 +User: ed +Warning: + A slightly different error + message. +--- +Date: 2001-11-23 15:03:17 -5 +User: ed +Fatal: + Unknown variable "bar" +Stack: +- file: TopClass.py + line: 23 + code: | + x = MoreObject("345\n") +- file: MoreClass.py + line: 58 + code: |- + foo = bar diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.3.yml b/libs/dyn/yamlloader/testdata/spec_example_2.3.yml new file mode 100644 index 000000000..70cbe07d3 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.3.yml @@ -0,0 +1,10 @@ +# Example 2.3 Mapping Scalars to Sequences (ball clubs in each league) + +american: +- Boston Red Sox +- Detroit Tigers +- New York Yankees +national: +- New York Mets +- Chicago Cubs +- Atlanta Braves diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.4.yml b/libs/dyn/yamlloader/testdata/spec_example_2.4.yml new file mode 100644 index 000000000..cce28625f --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.4.yml @@ -0,0 +1,10 @@ +# Example 2.4 Sequence of Mappings (players’ statistics) + +- + name: Mark McGwire + hr: 65 + avg: 0.278 +- + name: Sammy Sosa + hr: 63 + avg: 0.288 diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.5.yml b/libs/dyn/yamlloader/testdata/spec_example_2.5.yml new file mode 100644 index 000000000..a585faee7 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.5.yml @@ -0,0 +1,5 @@ +# Example 2.5 Sequence of Sequences + +- [name , hr, avg ] +- [Mark McGwire, 65, 0.278] +- [Sammy Sosa , 63, 0.288] diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.6.yml b/libs/dyn/yamlloader/testdata/spec_example_2.6.yml new file mode 100644 index 000000000..cc137e5d5 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.6.yml @@ -0,0 +1,7 @@ +# Example 2.6 Mapping of Mappings + +Mark McGwire: {hr: 65, avg: 0.278} +Sammy Sosa: { + hr: 63, + avg: 0.288, + } diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.7.yml b/libs/dyn/yamlloader/testdata/spec_example_2.7.yml new file mode 100644 index 000000000..35c2541d6 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.7.yml @@ -0,0 +1,12 @@ +# Example 2.7 Two Documents in a Stream (each with a leading comment) + +# Ranking of 1998 home runs +--- +- Mark McGwire +- Sammy Sosa +- Ken Griffey + +# Team ranking +--- +- Chicago Cubs +- St Louis Cardinals diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.8.yml b/libs/dyn/yamlloader/testdata/spec_example_2.8.yml new file mode 100644 index 000000000..ae6e8bf27 --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.8.yml @@ -0,0 +1,12 @@ +# Example 2.8 Play by Play Feed from a Game + +--- +time: 20:03:20 +player: Sammy Sosa +action: strike (miss) +... +--- +time: 20:03:47 +player: Sammy Sosa +action: grand slam +... diff --git a/libs/dyn/yamlloader/testdata/spec_example_2.9.yml b/libs/dyn/yamlloader/testdata/spec_example_2.9.yml new file mode 100644 index 000000000..75217b25e --- /dev/null +++ b/libs/dyn/yamlloader/testdata/spec_example_2.9.yml @@ -0,0 +1,10 @@ +# Example 2.9 Single Document with Two Comments + +--- +hr: # 1998 hr ranking +- Mark McGwire +- Sammy Sosa +# 1998 rbi ranking +rbi: +- Sammy Sosa +- Ken Griffey diff --git a/libs/dyn/yamlloader/yaml_spec_test.go b/libs/dyn/yamlloader/yaml_spec_test.go new file mode 100644 index 000000000..2a5ae817f --- /dev/null +++ b/libs/dyn/yamlloader/yaml_spec_test.go @@ -0,0 +1,821 @@ +package yamlloader_test + +import ( + "bytes" + "math" + "os" + "testing" + + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/stretchr/testify/require" +) + +const NL = "\n" + +func loadExample(t *testing.T, file string) dyn.Value { + input, err := os.ReadFile(file) + require.NoError(t, err) + self, err := yamlloader.LoadYAML(file, bytes.NewBuffer(input)) + require.NoError(t, err) + return self +} + +func TestYAMLSpecExample_2_1(t *testing.T) { + file := "testdata/spec_example_2.1.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + []dyn.Value{ + dyn.NewValue("Mark McGwire", []dyn.Location{{File: file, Line: 3, Column: 3}}), + dyn.NewValue("Sammy Sosa", []dyn.Location{{File: file, Line: 4, Column: 3}}), + dyn.NewValue("Ken Griffey", []dyn.Location{{File: file, Line: 5, Column: 3}}), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_2(t *testing.T) { + file := "testdata/spec_example_2.2.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "hr": dyn.NewValue(65, []dyn.Location{{File: file, Line: 3, Column: 6}}), + "avg": dyn.NewValue(0.278, []dyn.Location{{File: file, Line: 4, Column: 6}}), + "rbi": dyn.NewValue(147, []dyn.Location{{File: file, Line: 5, Column: 6}}), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_3(t *testing.T) { + file := "testdata/spec_example_2.3.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "american": dyn.NewValue( + []dyn.Value{ + dyn.NewValue("Boston Red Sox", []dyn.Location{{File: file, Line: 4, Column: 3}}), + dyn.NewValue("Detroit Tigers", []dyn.Location{{File: file, Line: 5, Column: 3}}), + dyn.NewValue("New York Yankees", []dyn.Location{{File: file, Line: 6, Column: 3}}), + }, + []dyn.Location{{File: file, Line: 4, Column: 1}}, + ), + "national": dyn.NewValue( + []dyn.Value{ + dyn.NewValue("New York Mets", []dyn.Location{{File: file, Line: 8, Column: 3}}), + dyn.NewValue("Chicago Cubs", []dyn.Location{{File: file, Line: 9, Column: 3}}), + dyn.NewValue("Atlanta Braves", []dyn.Location{{File: file, Line: 10, Column: 3}}), + }, + []dyn.Location{{File: file, Line: 8, Column: 1}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_4(t *testing.T) { + file := "testdata/spec_example_2.4.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + []dyn.Value{ + dyn.NewValue( + map[string]dyn.Value{ + "name": dyn.NewValue("Mark McGwire", []dyn.Location{{File: file, Line: 4, Column: 9}}), + "hr": dyn.NewValue(65, []dyn.Location{{File: file, Line: 5, Column: 9}}), + "avg": dyn.NewValue(0.278, []dyn.Location{{File: file, Line: 6, Column: 9}}), + }, + []dyn.Location{{File: file, Line: 4, Column: 3}}, + ), + dyn.NewValue( + map[string]dyn.Value{ + "name": dyn.NewValue("Sammy Sosa", []dyn.Location{{File: file, Line: 8, Column: 9}}), + "hr": dyn.NewValue(63, []dyn.Location{{File: file, Line: 9, Column: 9}}), + "avg": dyn.NewValue(0.288, []dyn.Location{{File: file, Line: 10, Column: 9}}), + }, + []dyn.Location{{File: file, Line: 8, Column: 3}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_5(t *testing.T) { + file := "testdata/spec_example_2.5.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + []dyn.Value{ + dyn.NewValue( + []dyn.Value{ + dyn.NewValue("name", []dyn.Location{{File: file, Line: 3, Column: 4}}), + dyn.NewValue("hr", []dyn.Location{{File: file, Line: 3, Column: 18}}), + dyn.NewValue("avg", []dyn.Location{{File: file, Line: 3, Column: 22}}), + }, + []dyn.Location{{File: file, Line: 3, Column: 3}}, + ), + dyn.NewValue( + []dyn.Value{ + dyn.NewValue("Mark McGwire", []dyn.Location{{File: file, Line: 4, Column: 4}}), + dyn.NewValue(65, []dyn.Location{{File: file, Line: 4, Column: 18}}), + dyn.NewValue(0.278, []dyn.Location{{File: file, Line: 4, Column: 22}}), + }, + []dyn.Location{{File: file, Line: 4, Column: 3}}, + ), + dyn.NewValue( + []dyn.Value{ + dyn.NewValue("Sammy Sosa", []dyn.Location{{File: file, Line: 5, Column: 4}}), + dyn.NewValue(63, []dyn.Location{{File: file, Line: 5, Column: 18}}), + dyn.NewValue(0.288, []dyn.Location{{File: file, Line: 5, Column: 22}}), + }, + []dyn.Location{{File: file, Line: 5, Column: 3}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_6(t *testing.T) { + file := "testdata/spec_example_2.6.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "Mark McGwire": dyn.NewValue( + map[string]dyn.Value{ + "hr": dyn.NewValue(65, []dyn.Location{{File: file, Line: 3, Column: 20}}), + "avg": dyn.NewValue(0.278, []dyn.Location{{File: file, Line: 3, Column: 29}}), + }, + []dyn.Location{{File: file, Line: 3, Column: 15}}, + ), + "Sammy Sosa": dyn.NewValue( + map[string]dyn.Value{ + "hr": dyn.NewValue(63, []dyn.Location{{File: file, Line: 5, Column: 9}}), + "avg": dyn.NewValue(0.288, []dyn.Location{{File: file, Line: 6, Column: 10}}), + }, + []dyn.Location{{File: file, Line: 4, Column: 13}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_7(t *testing.T) { + file := "testdata/spec_example_2.7.yml" + self := loadExample(t, file) + + // Note: we do not support multiple documents in a single YAML file. + + assert.Equal(t, dyn.NewValue( + []dyn.Value{ + dyn.NewValue( + "Mark McGwire", + []dyn.Location{{File: file, Line: 5, Column: 3}}, + ), + dyn.NewValue( + "Sammy Sosa", + []dyn.Location{{File: file, Line: 6, Column: 3}}, + ), + dyn.NewValue( + "Ken Griffey", + []dyn.Location{{File: file, Line: 7, Column: 3}}, + ), + }, + []dyn.Location{{File: file, Line: 5, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_8(t *testing.T) { + file := "testdata/spec_example_2.8.yml" + self := loadExample(t, file) + + // Note: we do not support multiple documents in a single YAML file. + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "time": dyn.NewValue("20:03:20", []dyn.Location{{File: file, Line: 4, Column: 7}}), + "player": dyn.NewValue("Sammy Sosa", []dyn.Location{{File: file, Line: 5, Column: 9}}), + "action": dyn.NewValue("strike (miss)", []dyn.Location{{File: file, Line: 6, Column: 9}}), + }, + []dyn.Location{{File: file, Line: 4, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_9(t *testing.T) { + file := "testdata/spec_example_2.9.yml" + self := loadExample(t, file) + + // Note: we do not support multiple documents in a single YAML file. + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "hr": dyn.NewValue( + []dyn.Value{ + dyn.NewValue("Mark McGwire", []dyn.Location{{File: file, Line: 5, Column: 3}}), + dyn.NewValue("Sammy Sosa", []dyn.Location{{File: file, Line: 6, Column: 3}}), + }, + []dyn.Location{{File: file, Line: 5, Column: 1}}, + ), + "rbi": dyn.NewValue( + []dyn.Value{ + dyn.NewValue("Sammy Sosa", []dyn.Location{{File: file, Line: 9, Column: 3}}), + dyn.NewValue("Ken Griffey", []dyn.Location{{File: file, Line: 10, Column: 3}}), + }, + []dyn.Location{{File: file, Line: 9, Column: 1}}, + ), + }, + []dyn.Location{{File: file, Line: 4, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_10(t *testing.T) { + file := "testdata/spec_example_2.10.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "hr": dyn.NewValue( + []dyn.Value{ + dyn.NewValue("Mark McGwire", []dyn.Location{{File: file, Line: 5, Column: 3}}), + dyn.NewValue("Sammy Sosa", []dyn.Location{{File: file, Line: 7, Column: 3}}), + }, + []dyn.Location{{File: file, Line: 5, Column: 1}}, + ), + "rbi": dyn.NewValue( + []dyn.Value{ + // The location for an anchored value refers to the anchor, not the reference. + // This is the same location as the anchor that appears in the "hr" mapping. + dyn.NewValue("Sammy Sosa", []dyn.Location{{File: file, Line: 7, Column: 3}}), + dyn.NewValue("Ken Griffey", []dyn.Location{{File: file, Line: 10, Column: 3}}), + }, + []dyn.Location{{File: file, Line: 9, Column: 1}}, + ), + }, + []dyn.Location{{File: file, Line: 4, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_11(t *testing.T) { + file := "testdata/spec_example_2.11.yml" + input, err := os.ReadFile(file) + require.NoError(t, err) + + // Note: non-string mapping keys are not supported by "gopkg.in/yaml.v3". + _, err = yamlloader.LoadYAML(file, bytes.NewBuffer(input)) + assert.ErrorContains(t, err, `: key is not a scalar`) +} + +func TestYAMLSpecExample_2_12(t *testing.T) { + file := "testdata/spec_example_2.12.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + []dyn.Value{ + dyn.NewValue( + map[string]dyn.Value{ + "item": dyn.NewValue("Super Hoop", []dyn.Location{{File: file, Line: 5, Column: 13}}), + "quantity": dyn.NewValue(1, []dyn.Location{{File: file, Line: 6, Column: 13}}), + }, + []dyn.Location{{File: file, Line: 5, Column: 3}}, + ), + dyn.NewValue( + map[string]dyn.Value{ + "item": dyn.NewValue("Basketball", []dyn.Location{{File: file, Line: 7, Column: 13}}), + "quantity": dyn.NewValue(4, []dyn.Location{{File: file, Line: 8, Column: 13}}), + }, + []dyn.Location{{File: file, Line: 7, Column: 3}}, + ), + dyn.NewValue( + map[string]dyn.Value{ + "item": dyn.NewValue("Big Shoes", []dyn.Location{{File: file, Line: 9, Column: 13}}), + "quantity": dyn.NewValue(1, []dyn.Location{{File: file, Line: 10, Column: 13}}), + }, + []dyn.Location{{File: file, Line: 9, Column: 3}}, + ), + }, + []dyn.Location{{File: file, Line: 5, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_13(t *testing.T) { + file := "testdata/spec_example_2.13.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + ``+ + `\//||\/||`+NL+ + "// || ||__"+NL, + []dyn.Location{{File: file, Line: 4, Column: 5}}, + ), self) +} + +func TestYAMLSpecExample_2_14(t *testing.T) { + file := "testdata/spec_example_2.14.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + `Mark McGwire's year was crippled by a knee injury.`+NL, + []dyn.Location{{File: file, Line: 3, Column: 5}}, + ), self) +} + +func TestYAMLSpecExample_2_15(t *testing.T) { + file := "testdata/spec_example_2.15.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + ``+ + `Sammy Sosa completed another fine season with great stats.`+NL+ + NL+ + ` 63 Home Runs`+NL+ + ` 0.288 Batting Average`+NL+ + NL+ + `What a year!`+NL, + []dyn.Location{{File: file, Line: 3, Column: 5}}, + ), self) +} + +func TestYAMLSpecExample_2_16(t *testing.T) { + file := "testdata/spec_example_2.16.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "name": dyn.NewValue( + "Mark McGwire", + []dyn.Location{{File: file, Line: 3, Column: 7}}, + ), + "accomplishment": dyn.NewValue( + `Mark set a major league home run record in 1998.`+NL, + []dyn.Location{{File: file, Line: 4, Column: 17}}, + ), + "stats": dyn.NewValue( + ``+ + `65 Home Runs`+NL+ + `0.278 Batting Average`+NL, + []dyn.Location{{File: file, Line: 7, Column: 8}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_17(t *testing.T) { + file := "testdata/spec_example_2.17.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "unicode": dyn.NewValue( + `Sosa did fine.`+"\u263A", + []dyn.Location{{File: file, Line: 3, Column: 10}}, + ), + "control": dyn.NewValue( + "\b1998\t1999\t2000\n", + []dyn.Location{{File: file, Line: 4, Column: 10}}, + ), + "hex esc": dyn.NewValue( + "\x0d\x0a is \r\n", + []dyn.Location{{File: file, Line: 5, Column: 10}}, + ), + "single": dyn.NewValue( + `"Howdy!" he cried.`, + []dyn.Location{{File: file, Line: 7, Column: 9}}, + ), + "quoted": dyn.NewValue( + ` # Not a 'comment'.`, + []dyn.Location{{File: file, Line: 8, Column: 9}}, + ), + "tie-fighter": dyn.NewValue( + `|\-*-/|`, + []dyn.Location{{File: file, Line: 9, Column: 14}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_18(t *testing.T) { + file := "testdata/spec_example_2.18.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "plain": dyn.NewValue( + `This unquoted scalar spans many lines.`, + []dyn.Location{{File: file, Line: 4, Column: 3}}, + ), + "quoted": dyn.NewValue( + `So does this quoted scalar.`+NL, + []dyn.Location{{File: file, Line: 7, Column: 9}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_19(t *testing.T) { + file := "testdata/spec_example_2.19.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "canonical": dyn.NewValue( + 12345, + []dyn.Location{{File: file, Line: 3, Column: 12}}, + ), + "decimal": dyn.NewValue( + 12345, + []dyn.Location{{File: file, Line: 4, Column: 10}}, + ), + "octal": dyn.NewValue( + 12, + []dyn.Location{{File: file, Line: 5, Column: 8}}, + ), + "hexadecimal": dyn.NewValue( + 12, + []dyn.Location{{File: file, Line: 6, Column: 14}}, + ), + "octal11": dyn.NewValue( + 12345, + []dyn.Location{{File: file, Line: 15, Column: 10}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_20(t *testing.T) { + file := "testdata/spec_example_2.20.yml" + self := loadExample(t, file) + + // Equality assertion doesn't work with NaNs. + // See https://github.com/stretchr/testify/issues/624. + // + // Remove the NaN entry. + self, _ = dyn.Walk(self, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + if f, ok := v.AsFloat(); ok && math.IsNaN(f) { + return dyn.InvalidValue, dyn.ErrDrop + } + return v, nil + }) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "canonical": dyn.NewValue( + 1230.15, + []dyn.Location{{File: file, Line: 3, Column: 12}}, + ), + "exponential": dyn.NewValue( + 1230.15, + []dyn.Location{{File: file, Line: 4, Column: 14}}, + ), + "fixed": dyn.NewValue( + 1230.15, + []dyn.Location{{File: file, Line: 5, Column: 8}}, + ), + "negative infinity": dyn.NewValue( + math.Inf(-1), + []dyn.Location{{File: file, Line: 6, Column: 20}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_21(t *testing.T) { + file := "testdata/spec_example_2.21.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "null": dyn.NewValue( + nil, + []dyn.Location{{File: file, Line: 3, Column: 6}}, + ), + "booleans": dyn.NewValue( + []dyn.Value{ + dyn.NewValue(true, []dyn.Location{{File: file, Line: 4, Column: 13}}), + dyn.NewValue(false, []dyn.Location{{File: file, Line: 4, Column: 19}}), + }, + []dyn.Location{{File: file, Line: 4, Column: 11}}, + ), + "string": dyn.NewValue( + "012345", + []dyn.Location{{File: file, Line: 5, Column: 9}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_22(t *testing.T) { + file := "testdata/spec_example_2.22.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "canonical": dyn.NewValue( + dyn.MustTime("2001-12-15T02:59:43.1Z"), + []dyn.Location{{File: file, Line: 3, Column: 12}}, + ), + "iso8601": dyn.NewValue( + dyn.MustTime("2001-12-14t21:59:43.10-05:00"), + []dyn.Location{{File: file, Line: 4, Column: 10}}, + ), + "spaced": dyn.NewValue( + // This is parsed as a string, not a timestamp, + // both by "gopkg.in/yaml.v3" and by our implementation. + "2001-12-14 21:59:43.10 -5", + []dyn.Location{{File: file, Line: 5, Column: 9}}, + ), + "date": dyn.NewValue( + dyn.MustTime("2002-12-14"), + []dyn.Location{{File: file, Line: 6, Column: 7}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 1}}, + ), self) +} + +func TestYAMLSpecExample_2_23(t *testing.T) { + file := "testdata/spec_example_2.23.yml" + input, err := os.ReadFile(file) + require.NoError(t, err) + + // Note: the !!binary tag is not supported by us. + + _, err = yamlloader.LoadYAML(file, bytes.NewBuffer(input)) + assert.ErrorContains(t, err, `: unknown tag: !!binary`) +} + +func TestYAMLSpecExample_2_24(t *testing.T) { + file := "testdata/spec_example_2.24.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + []dyn.Value{ + dyn.NewValue( + map[string]dyn.Value{ + "center": dyn.NewValue( + map[string]dyn.Value{ + "x": dyn.NewValue(73, []dyn.Location{{File: file, Line: 8, Column: 23}}), + "y": dyn.NewValue(129, []dyn.Location{{File: file, Line: 8, Column: 30}}), + }, + []dyn.Location{{File: file, Line: 8, Column: 11}}, + ), + "radius": dyn.NewValue(7, []dyn.Location{{File: file, Line: 9, Column: 11}}), + }, + []dyn.Location{{File: file, Line: 7, Column: 3}}, + ), + dyn.NewValue( + map[string]dyn.Value{ + "start": dyn.NewValue( + map[string]dyn.Value{ + "x": dyn.NewValue(73, []dyn.Location{{File: file, Line: 8, Column: 23}}), + "y": dyn.NewValue(129, []dyn.Location{{File: file, Line: 8, Column: 30}}), + }, + []dyn.Location{{File: file, Line: 8, Column: 11}}, + ), + "finish": dyn.NewValue( + map[string]dyn.Value{ + "x": dyn.NewValue(89, []dyn.Location{{File: file, Line: 12, Column: 16}}), + "y": dyn.NewValue(102, []dyn.Location{{File: file, Line: 12, Column: 23}}), + }, + []dyn.Location{{File: file, Line: 12, Column: 11}}, + ), + }, + []dyn.Location{{File: file, Line: 10, Column: 3}}, + ), + dyn.NewValue( + map[string]dyn.Value{ + "start": dyn.NewValue( + map[string]dyn.Value{ + "x": dyn.NewValue(73, []dyn.Location{{File: file, Line: 8, Column: 23}}), + "y": dyn.NewValue(129, []dyn.Location{{File: file, Line: 8, Column: 30}}), + }, + []dyn.Location{{File: file, Line: 8, Column: 11}}, + ), + "color": dyn.NewValue(16772795, []dyn.Location{{File: file, Line: 15, Column: 10}}), + "text": dyn.NewValue("Pretty vector drawing.", []dyn.Location{{File: file, Line: 16, Column: 9}}), + }, + []dyn.Location{{File: file, Line: 13, Column: 3}}, + ), + }, + []dyn.Location{{File: file, Line: 4, Column: 5}}, + ), self) +} + +func TestYAMLSpecExample_2_25(t *testing.T) { + file := "testdata/spec_example_2.25.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "Mark McGwire": dyn.NewValue(nil, []dyn.Location{{File: file, Line: 8, Column: 1}}), + "Sammy Sosa": dyn.NewValue(nil, []dyn.Location{{File: file, Line: 9, Column: 1}}), + "Ken Griffey": dyn.NewValue(nil, []dyn.Location{{File: file, Line: 10, Column: 1}}), + }, + []dyn.Location{{File: file, Line: 6, Column: 5}}, + ), self) +} + +func TestYAMLSpecExample_2_26(t *testing.T) { + file := "testdata/spec_example_2.26.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + []dyn.Value{ + dyn.NewValue( + map[string]dyn.Value{ + "Mark McGwire": dyn.NewValue(65, []dyn.Location{{File: file, Line: 7, Column: 17}}), + }, + []dyn.Location{{File: file, Line: 7, Column: 3}}, + ), + dyn.NewValue( + map[string]dyn.Value{ + "Sammy Sosa": dyn.NewValue(63, []dyn.Location{{File: file, Line: 8, Column: 15}}), + }, + []dyn.Location{{File: file, Line: 8, Column: 3}}, + ), + dyn.NewValue( + map[string]dyn.Value{ + "Ken Griffey": dyn.NewValue(58, []dyn.Location{{File: file, Line: 9, Column: 16}}), + }, + []dyn.Location{{File: file, Line: 9, Column: 3}}, + ), + }, + []dyn.Location{{File: file, Line: 6, Column: 5}}, + ), self) +} + +func TestYAMLSpecExample_2_27(t *testing.T) { + file := "testdata/spec_example_2.27.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "invoice": dyn.NewValue( + 34843, + []dyn.Location{{File: file, Line: 4, Column: 10}}, + ), + "date": dyn.NewValue( + dyn.MustTime("2001-01-23"), + []dyn.Location{{File: file, Line: 5, Column: 10}}, + ), + "bill-to": dyn.NewValue( + map[string]dyn.Value{ + "given": dyn.NewValue( + "Chris", + []dyn.Location{{File: file, Line: 7, Column: 12}}, + ), + "family": dyn.NewValue( + "Dumars", + []dyn.Location{{File: file, Line: 8, Column: 12}}, + ), + "address": dyn.NewValue( + map[string]dyn.Value{ + "lines": dyn.NewValue( + "458 Walkman Dr.\nSuite #292\n", + []dyn.Location{{File: file, Line: 10, Column: 12}}, + ), + "city": dyn.NewValue( + "Royal Oak", + []dyn.Location{{File: file, Line: 13, Column: 15}}, + ), + "state": dyn.NewValue( + "MI", + []dyn.Location{{File: file, Line: 14, Column: 15}}, + ), + "postal": dyn.NewValue( + 48046, + []dyn.Location{{File: file, Line: 15, Column: 15}}, + ), + }, + []dyn.Location{{File: file, Line: 10, Column: 5}}, + ), + }, + []dyn.Location{{File: file, Line: 6, Column: 10}}, + ), + "ship-to": dyn.NewValue( + map[string]dyn.Value{ + "given": dyn.NewValue( + "Chris", + []dyn.Location{{File: file, Line: 7, Column: 12}}, + ), + "family": dyn.NewValue( + "Dumars", + []dyn.Location{{File: file, Line: 8, Column: 12}}, + ), + "address": dyn.NewValue( + map[string]dyn.Value{ + "lines": dyn.NewValue( + "458 Walkman Dr.\nSuite #292\n", + []dyn.Location{{File: file, Line: 10, Column: 12}}, + ), + "city": dyn.NewValue( + "Royal Oak", + []dyn.Location{{File: file, Line: 13, Column: 15}}, + ), + "state": dyn.NewValue( + "MI", + []dyn.Location{{File: file, Line: 14, Column: 15}}, + ), + "postal": dyn.NewValue( + 48046, + []dyn.Location{{File: file, Line: 15, Column: 15}}, + ), + }, + []dyn.Location{{File: file, Line: 10, Column: 5}}, + ), + }, + []dyn.Location{{File: file, Line: 6, Column: 10}}, + ), + "product": dyn.NewValue( + []dyn.Value{ + dyn.NewValue( + map[string]dyn.Value{ + "sku": dyn.NewValue( + "BL394D", + []dyn.Location{{File: file, Line: 18, Column: 17}}, + ), + "quantity": dyn.NewValue( + 4, + []dyn.Location{{File: file, Line: 19, Column: 17}}, + ), + "description": dyn.NewValue( + "Basketball", + []dyn.Location{{File: file, Line: 20, Column: 17}}, + ), + "price": dyn.NewValue( + 450.0, + []dyn.Location{{File: file, Line: 21, Column: 17}}, + ), + }, + []dyn.Location{{File: file, Line: 18, Column: 3}}, + ), dyn.NewValue( + map[string]dyn.Value{ + "sku": dyn.NewValue( + "BL4438H", + []dyn.Location{{File: file, Line: 22, Column: 17}}, + ), + "quantity": dyn.NewValue( + 1, + []dyn.Location{{File: file, Line: 23, Column: 17}}, + ), + "description": dyn.NewValue( + "Super Hoop", + []dyn.Location{{File: file, Line: 24, Column: 17}}, + ), + "price": dyn.NewValue( + 2392.0, + []dyn.Location{{File: file, Line: 25, Column: 17}}, + ), + }, + []dyn.Location{{File: file, Line: 22, Column: 3}}, + )}, + []dyn.Location{{File: file, Line: 18, Column: 1}}, + ), + "tax": dyn.NewValue( + 251.42, + []dyn.Location{{File: file, Line: 26, Column: 8}}, + ), + "total": dyn.NewValue( + 4443.52, + []dyn.Location{{File: file, Line: 27, Column: 8}}, + ), + "comments": dyn.NewValue( + "Late afternoon is best. Backup contact is Nancy Billsmer @ 338-4338.", + []dyn.Location{{File: file, Line: 29, Column: 3}}, + ), + }, + []dyn.Location{{File: file, Line: 3, Column: 5}}, + ), self) +} + +func TestYAMLSpecExample_2_28(t *testing.T) { + file := "testdata/spec_example_2.28.yml" + self := loadExample(t, file) + + assert.Equal(t, dyn.NewValue( + map[string]dyn.Value{ + "Time": dyn.NewValue( + "2001-11-23 15:01:42 -5", + []dyn.Location{{File: file, Line: 4, Column: 7}}, + ), + "User": dyn.NewValue( + "ed", + []dyn.Location{{File: file, Line: 5, Column: 7}}, + ), + "Warning": dyn.NewValue( + "This is an error message for the log file", + []dyn.Location{{File: file, Line: 7, Column: 3}}, + ), + }, + []dyn.Location{{File: file, Line: 4, Column: 1}}, + ), self) +} From c5043c3d9df8c6422f325c8ce30fa0456115d707 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Fri, 18 Oct 2024 08:45:47 +0200 Subject: [PATCH 11/25] Add `bundle summary` to display URLs for deployed resources (#1731) ## Changes Adds a textual output to the `databricks bundle summary` command, which includes URLs of deployed resources. Example usage: ``` $ databricks bundle summary Name: my_pipeline Target: dev Workspace: Host: https://domain.databricks.com User: user@databricks.com Path: /Users/user@databricks.com/.bundle/my_pipeline/dev Resources: Jobs: my_project_job: Name: [dev lennart] my_project_job URL: https://domain.databricks.com/jobs/206899209187287?o=6051921418418893 Pipelines: my_project_pipeline: Name: [dev lennart] my_project_pipeline URL: https://domain.databricks.com/pipelines/3f849fd5-ba7d-47fa-a34c-c6bf034b4f58?o=6051921418418893 ``` Notes: * The top headers of the output are the same as those from the existing `bundle validate` command * URLs are colored light blue in the output * For resources that haven't been deployed yet, we show `(not deployed)` in place of the URL --------- Co-authored-by: Pieter Noordhuis Co-authored-by: Pieter Noordhuis --- bundle/config/mutator/initialize_urls.go | 65 +++++++++ bundle/config/mutator/initialize_urls_test.go | 130 ++++++++++++++++++ bundle/config/resources.go | 117 ++++++++++++++-- bundle/config/resources/clusters.go | 19 +++ bundle/config/resources/job.go | 19 +++ bundle/config/resources/mlflow_experiment.go | 19 +++ bundle/config/resources/mlflow_model.go | 19 +++ .../resources/model_serving_endpoint.go | 19 +++ bundle/config/resources/pipeline.go | 19 +++ bundle/config/resources/quality_monitor.go | 20 +++ bundle/config/resources/registered_model.go | 20 +++ bundle/config/resources/schema.go | 31 +++++ bundle/config/resources_test.go | 34 ++++- bundle/render/render_text_output.go | 121 ++++++++++++---- bundle/render/render_text_output_test.go | 112 ++++++++++++++- cmd/bundle/deploy.go | 2 +- cmd/bundle/summary.go | 12 +- cmd/bundle/validate.go | 2 +- 18 files changed, 728 insertions(+), 52 deletions(-) create mode 100644 bundle/config/mutator/initialize_urls.go create mode 100644 bundle/config/mutator/initialize_urls_test.go diff --git a/bundle/config/mutator/initialize_urls.go b/bundle/config/mutator/initialize_urls.go new file mode 100644 index 000000000..319305912 --- /dev/null +++ b/bundle/config/mutator/initialize_urls.go @@ -0,0 +1,65 @@ +package mutator + +import ( + "context" + "net/url" + "strconv" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" +) + +type initializeURLs struct { +} + +// InitializeURLs makes sure the URL field of each resource is configured. +// NOTE: since this depends on an extra API call, this mutator adds some extra +// latency. As such, it should only be used when needed. +// This URL field is used for the output of the 'bundle summary' CLI command. +func InitializeURLs() bundle.Mutator { + return &initializeURLs{} +} + +func (m *initializeURLs) Name() string { + return "InitializeURLs" +} + +func (m *initializeURLs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + workspaceId, err := b.WorkspaceClient().CurrentWorkspaceID(ctx) + if err != nil { + return diag.FromErr(err) + } + orgId := strconv.FormatInt(workspaceId, 10) + host := b.WorkspaceClient().Config.CanonicalHostName() + initializeForWorkspace(b, orgId, host) + return nil +} + +func initializeForWorkspace(b *bundle.Bundle, orgId string, host string) error { + baseURL, err := url.Parse(host) + if err != nil { + return err + } + + // Add ?o= only if wasn't in the subdomain already. + // The ?o= is needed when vanity URLs / legacy workspace URLs are used. + // If it's not needed we prefer to leave it out since these URLs are rather + // long for most terminals. + // + // See https://docs.databricks.com/en/workspace/workspace-details.html for + // further reading about the '?o=' suffix. + if !strings.Contains(baseURL.Hostname(), orgId) { + values := baseURL.Query() + values.Add("o", orgId) + baseURL.RawQuery = values.Encode() + } + + for _, group := range b.Config.Resources.AllResources() { + for _, r := range group.Resources { + r.InitializeURL(*baseURL) + } + } + + return nil +} diff --git a/bundle/config/mutator/initialize_urls_test.go b/bundle/config/mutator/initialize_urls_test.go new file mode 100644 index 000000000..71cc153ab --- /dev/null +++ b/bundle/config/mutator/initialize_urls_test.go @@ -0,0 +1,130 @@ +package mutator + +import ( + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/stretchr/testify/require" +) + +func TestInitializeURLs(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + Host: "https://mycompany.databricks.com/", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + ID: "1", + JobSettings: &jobs.JobSettings{Name: "job1"}, + }, + }, + Pipelines: map[string]*resources.Pipeline{ + "pipeline1": { + ID: "3", + PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}, + }, + }, + Experiments: map[string]*resources.MlflowExperiment{ + "experiment1": { + ID: "4", + Experiment: &ml.Experiment{Name: "experiment1"}, + }, + }, + Models: map[string]*resources.MlflowModel{ + "model1": { + ID: "a model uses its name for identifier", + Model: &ml.Model{Name: "a model uses its name for identifier"}, + }, + }, + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "servingendpoint1": { + ID: "my_serving_endpoint", + CreateServingEndpoint: &serving.CreateServingEndpoint{ + Name: "my_serving_endpoint", + }, + }, + }, + RegisteredModels: map[string]*resources.RegisteredModel{ + "registeredmodel1": { + ID: "8", + CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ + Name: "my_registered_model", + }, + }, + }, + QualityMonitors: map[string]*resources.QualityMonitor{ + "qualityMonitor1": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "catalog.schema.qualityMonitor1", + }, + }, + }, + Schemas: map[string]*resources.Schema{ + "schema1": { + ID: "catalog.schema", + CreateSchema: &catalog.CreateSchema{ + Name: "schema", + }, + }, + }, + Clusters: map[string]*resources.Cluster{ + "cluster1": { + ID: "1017-103929-vlr7jzcf", + ClusterSpec: &compute.ClusterSpec{ + ClusterName: "cluster1", + }, + }, + }, + }, + }, + } + + expectedURLs := map[string]string{ + "job1": "https://mycompany.databricks.com/jobs/1?o=123456", + "pipeline1": "https://mycompany.databricks.com/pipelines/3?o=123456", + "experiment1": "https://mycompany.databricks.com/ml/experiments/4?o=123456", + "model1": "https://mycompany.databricks.com/ml/models/a%20model%20uses%20its%20name%20for%20identifier?o=123456", + "servingendpoint1": "https://mycompany.databricks.com/ml/endpoints/my_serving_endpoint?o=123456", + "registeredmodel1": "https://mycompany.databricks.com/explore/data/models/8?o=123456", + "qualityMonitor1": "https://mycompany.databricks.com/explore/data/catalog/schema/qualityMonitor1?o=123456", + "schema1": "https://mycompany.databricks.com/explore/data/catalog/schema?o=123456", + "cluster1": "https://mycompany.databricks.com/compute/clusters/1017-103929-vlr7jzcf?o=123456", + } + + initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/") + + for _, group := range b.Config.Resources.AllResources() { + for key, r := range group.Resources { + require.Equal(t, expectedURLs[key], r.GetURL(), "Unexpected URL for "+key) + } + } +} + +func TestInitializeURLsWithoutOrgId(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + ID: "1", + JobSettings: &jobs.JobSettings{Name: "job1"}, + }, + }, + }, + }, + } + + initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/") + + require.Equal(t, "https://adb-123456.azuredatabricks.net/jobs/1", b.Config.Resources.Jobs["job1"].URL) +} diff --git a/bundle/config/resources.go b/bundle/config/resources.go index dc51a7caf..9513369e4 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -3,6 +3,7 @@ package config import ( "context" "fmt" + "net/url" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go" @@ -30,6 +31,53 @@ type ConfigResource interface { // Terraform equivalent name of the resource. For example "databricks_job" // for jobs and "databricks_pipeline" for pipelines. TerraformResourceName() string + + // GetName returns the in-product name of the resource. + GetName() string + + // GetURL returns the URL of the resource. + GetURL() string + + // InitializeURL initializes the URL field of the resource. + InitializeURL(baseURL url.URL) +} + +// ResourceGroup represents a group of resources of the same type. +// It includes a description of the resource type and a map of resources. +type ResourceGroup struct { + Description ResourceDescription + Resources map[string]ConfigResource +} + +// collectResourceMap collects resources of a specific type into a ResourceGroup. +func collectResourceMap[T ConfigResource]( + description ResourceDescription, + input map[string]T, +) ResourceGroup { + resources := make(map[string]ConfigResource) + for key, resource := range input { + resources[key] = resource + } + return ResourceGroup{ + Description: description, + Resources: resources, + } +} + +// AllResources returns all resources in the bundle grouped by their resource type. +func (r *Resources) AllResources() []ResourceGroup { + descriptions := SupportedResources() + return []ResourceGroup{ + collectResourceMap(descriptions["jobs"], r.Jobs), + collectResourceMap(descriptions["pipelines"], r.Pipelines), + collectResourceMap(descriptions["models"], r.Models), + collectResourceMap(descriptions["experiments"], r.Experiments), + collectResourceMap(descriptions["model_serving_endpoints"], r.ModelServingEndpoints), + collectResourceMap(descriptions["registered_models"], r.RegisteredModels), + collectResourceMap(descriptions["quality_monitors"], r.QualityMonitors), + collectResourceMap(descriptions["schemas"], r.Schemas), + collectResourceMap(descriptions["clusters"], r.Clusters), + } } func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) { @@ -61,20 +109,71 @@ func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) } type ResourceDescription struct { + // Singular and plural name when used to refer to the configuration. SingularName string + PluralName string + + // Singular and plural title when used in summaries / terminal UI. + SingularTitle string + PluralTitle string } // The keys of the map corresponds to the resource key in the bundle configuration. func SupportedResources() map[string]ResourceDescription { return map[string]ResourceDescription{ - "jobs": {SingularName: "job"}, - "pipelines": {SingularName: "pipeline"}, - "models": {SingularName: "model"}, - "experiments": {SingularName: "experiment"}, - "model_serving_endpoints": {SingularName: "model_serving_endpoint"}, - "registered_models": {SingularName: "registered_model"}, - "quality_monitors": {SingularName: "quality_monitor"}, - "schemas": {SingularName: "schema"}, - "clusters": {SingularName: "cluster"}, + "jobs": { + SingularName: "job", + PluralName: "jobs", + SingularTitle: "Job", + PluralTitle: "Jobs", + }, + "pipelines": { + SingularName: "pipeline", + PluralName: "pipelines", + SingularTitle: "Pipeline", + PluralTitle: "Pipelines", + }, + "models": { + SingularName: "model", + PluralName: "models", + SingularTitle: "Model", + PluralTitle: "Models", + }, + "experiments": { + SingularName: "experiment", + PluralName: "experiments", + SingularTitle: "Experiment", + PluralTitle: "Experiments", + }, + "model_serving_endpoints": { + SingularName: "model_serving_endpoint", + PluralName: "model_serving_endpoints", + SingularTitle: "Model Serving Endpoint", + PluralTitle: "Model Serving Endpoints", + }, + "registered_models": { + SingularName: "registered_model", + PluralName: "registered_models", + SingularTitle: "Registered Model", + PluralTitle: "Registered Models", + }, + "quality_monitors": { + SingularName: "quality_monitor", + PluralName: "quality_monitors", + SingularTitle: "Quality Monitor", + PluralTitle: "Quality Monitors", + }, + "schemas": { + SingularName: "schema", + PluralName: "schemas", + SingularTitle: "Schema", + PluralTitle: "Schemas", + }, + "clusters": { + SingularName: "cluster", + PluralName: "clusters", + SingularTitle: "Cluster", + PluralTitle: "Clusters", + }, } } diff --git a/bundle/config/resources/clusters.go b/bundle/config/resources/clusters.go index 632345666..eb0247c6e 100644 --- a/bundle/config/resources/clusters.go +++ b/bundle/config/resources/clusters.go @@ -2,6 +2,8 @@ package resources import ( "context" + "fmt" + "net/url" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" @@ -13,6 +15,7 @@ type Cluster struct { ID string `json:"id,omitempty" bundle:"readonly"` Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` *compute.ClusterSpec } @@ -37,3 +40,19 @@ func (s *Cluster) Exists(ctx context.Context, w *databricks.WorkspaceClient, id func (s *Cluster) TerraformResourceName() string { return "databricks_cluster" } + +func (s *Cluster) InitializeURL(baseURL url.URL) { + if s.ID == "" { + return + } + baseURL.Path = fmt.Sprintf("compute/clusters/%s", s.ID) + s.URL = baseURL.String() +} + +func (s *Cluster) GetName() string { + return s.ClusterName +} + +func (s *Cluster) GetURL() string { + return s.URL +} diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index d8f97a2db..98db1ec5d 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -2,6 +2,8 @@ package resources import ( "context" + "fmt" + "net/url" "strconv" "github.com/databricks/cli/libs/log" @@ -14,6 +16,7 @@ type Job struct { ID string `json:"id,omitempty" bundle:"readonly"` Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` *jobs.JobSettings } @@ -44,3 +47,19 @@ func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id stri func (j *Job) TerraformResourceName() string { return "databricks_job" } + +func (j *Job) InitializeURL(baseURL url.URL) { + if j.ID == "" { + return + } + baseURL.Path = fmt.Sprintf("jobs/%s", j.ID) + j.URL = baseURL.String() +} + +func (j *Job) GetName() string { + return j.Name +} + +func (j *Job) GetURL() string { + return j.URL +} diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index 0ab486436..a5871468f 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -2,6 +2,8 @@ package resources import ( "context" + "fmt" + "net/url" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" @@ -13,6 +15,7 @@ type MlflowExperiment struct { ID string `json:"id,omitempty" bundle:"readonly"` Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` *ml.Experiment } @@ -39,3 +42,19 @@ func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceCl func (s *MlflowExperiment) TerraformResourceName() string { return "databricks_mlflow_experiment" } + +func (s *MlflowExperiment) InitializeURL(baseURL url.URL) { + if s.ID == "" { + return + } + baseURL.Path = fmt.Sprintf("ml/experiments/%s", s.ID) + s.URL = baseURL.String() +} + +func (s *MlflowExperiment) GetName() string { + return s.Name +} + +func (s *MlflowExperiment) GetURL() string { + return s.URL +} diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 300474e35..9ead254d8 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -2,6 +2,8 @@ package resources import ( "context" + "fmt" + "net/url" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" @@ -13,6 +15,7 @@ type MlflowModel struct { ID string `json:"id,omitempty" bundle:"readonly"` Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` *ml.Model } @@ -39,3 +42,19 @@ func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient, func (s *MlflowModel) TerraformResourceName() string { return "databricks_mlflow_model" } + +func (s *MlflowModel) InitializeURL(baseURL url.URL) { + if s.ID == "" { + return + } + baseURL.Path = fmt.Sprintf("ml/models/%s", s.ID) + s.URL = baseURL.String() +} + +func (s *MlflowModel) GetName() string { + return s.Name +} + +func (s *MlflowModel) GetURL() string { + return s.URL +} diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index 5efb7ea26..7f3ae00c8 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -2,6 +2,8 @@ package resources import ( "context" + "fmt" + "net/url" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" @@ -23,6 +25,7 @@ type ModelServingEndpoint struct { Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` } func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error { @@ -47,3 +50,19 @@ func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.Workspa func (s *ModelServingEndpoint) TerraformResourceName() string { return "databricks_model_serving" } + +func (s *ModelServingEndpoint) InitializeURL(baseURL url.URL) { + if s.ID == "" { + return + } + baseURL.Path = fmt.Sprintf("ml/endpoints/%s", s.ID) + s.URL = baseURL.String() +} + +func (s *ModelServingEndpoint) GetName() string { + return s.Name +} + +func (s *ModelServingEndpoint) GetURL() string { + return s.URL +} diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index 55270be65..b3311d8e2 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -2,6 +2,8 @@ package resources import ( "context" + "fmt" + "net/url" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" @@ -13,6 +15,7 @@ type Pipeline struct { ID string `json:"id,omitempty" bundle:"readonly"` Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` *pipelines.PipelineSpec } @@ -39,3 +42,19 @@ func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id func (p *Pipeline) TerraformResourceName() string { return "databricks_pipeline" } + +func (p *Pipeline) InitializeURL(baseURL url.URL) { + if p.ID == "" { + return + } + baseURL.Path = fmt.Sprintf("pipelines/%s", p.ID) + p.URL = baseURL.String() +} + +func (p *Pipeline) GetName() string { + return p.Name +} + +func (s *Pipeline) GetURL() string { + return s.URL +} diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index 9160782cd..3c823e625 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -2,6 +2,9 @@ package resources import ( "context" + "fmt" + "net/url" + "strings" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" @@ -20,6 +23,7 @@ type QualityMonitor struct { ID string `json:"id,omitempty" bundle:"readonly"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` } func (s *QualityMonitor) UnmarshalJSON(b []byte) error { @@ -44,3 +48,19 @@ func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClie func (s *QualityMonitor) TerraformResourceName() string { return "databricks_quality_monitor" } + +func (s *QualityMonitor) InitializeURL(baseURL url.URL) { + if s.TableName == "" { + return + } + baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.TableName, ".", "/")) + s.URL = baseURL.String() +} + +func (s *QualityMonitor) GetName() string { + return s.TableName +} + +func (s *QualityMonitor) GetURL() string { + return s.URL +} diff --git a/bundle/config/resources/registered_model.go b/bundle/config/resources/registered_model.go index 6033ffdf2..c44526d09 100644 --- a/bundle/config/resources/registered_model.go +++ b/bundle/config/resources/registered_model.go @@ -2,6 +2,9 @@ package resources import ( "context" + "fmt" + "net/url" + "strings" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" @@ -24,6 +27,7 @@ type RegisteredModel struct { *catalog.CreateRegisteredModelRequest ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` } func (s *RegisteredModel) UnmarshalJSON(b []byte) error { @@ -48,3 +52,19 @@ func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceCli func (s *RegisteredModel) TerraformResourceName() string { return "databricks_registered_model" } + +func (s *RegisteredModel) InitializeURL(baseURL url.URL) { + if s.ID == "" { + return + } + baseURL.Path = fmt.Sprintf("explore/data/models/%s", strings.ReplaceAll(s.ID, ".", "/")) + s.URL = baseURL.String() +} + +func (s *RegisteredModel) GetName() string { + return s.Name +} + +func (s *RegisteredModel) GetURL() string { + return s.URL +} diff --git a/bundle/config/resources/schema.go b/bundle/config/resources/schema.go index 7ab00495a..a9f905cf1 100644 --- a/bundle/config/resources/schema.go +++ b/bundle/config/resources/schema.go @@ -1,6 +1,12 @@ package resources import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/catalog" ) @@ -16,6 +22,31 @@ type Schema struct { *catalog.CreateSchema ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` +} + +func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + return false, fmt.Errorf("schema.Exists() is not supported") +} + +func (s *Schema) TerraformResourceName() string { + return "databricks_schema" +} + +func (s *Schema) InitializeURL(baseURL url.URL) { + if s.ID == "" { + return + } + baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.ID, ".", "/")) + s.URL = baseURL.String() +} + +func (s *Schema) GetURL() string { + return s.URL +} + +func (s *Schema) GetName() string { + return s.Name } func (s *Schema) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index c1b76118c..9ae73b22a 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -63,17 +63,37 @@ func TestCustomMarshallerIsImplemented(t *testing.T) { } } +func TestResourcesAllResourcesCompleteness(t *testing.T) { + r := Resources{} + rt := reflect.TypeOf(r) + + // Collect set of includes resource types + var types []string + for _, group := range r.AllResources() { + types = append(types, group.Description.PluralName) + } + + for i := 0; i < rt.NumField(); i++ { + field := rt.Field(i) + jsonTag := field.Tag.Get("json") + + if idx := strings.Index(jsonTag, ","); idx != -1 { + jsonTag = jsonTag[:idx] + } + + assert.Contains(t, types, jsonTag, "Field %s is missing in AllResources", field.Name) + } +} + func TestSupportedResources(t *testing.T) { - expected := map[string]ResourceDescription{} + // Please add your resource to the SupportedResources() function in resources.go if you add a new resource. + actual := SupportedResources() + typ := reflect.TypeOf(Resources{}) for i := 0; i < typ.NumField(); i++ { field := typ.Field(i) jsonTags := strings.Split(field.Tag.Get("json"), ",") - singularName := strings.TrimSuffix(jsonTags[0], "s") - expected[jsonTags[0]] = ResourceDescription{SingularName: singularName} + pluralName := jsonTags[0] + assert.Equal(t, actual[pluralName].PluralName, pluralName) } - - // Please add your resource to the SupportedResources() function in resources.go - // if you are adding a new resource. - assert.Equal(t, expected, SupportedResources()) } diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go index 3e52d5f16..2f7affbf3 100644 --- a/bundle/render/render_text_output.go +++ b/bundle/render/render_text_output.go @@ -1,9 +1,11 @@ package render import ( + "context" "fmt" "io" "path/filepath" + "sort" "strings" "text/template" @@ -29,7 +31,7 @@ var renderFuncMap = template.FuncMap{ }, } -const summaryTemplate = `{{- if .Name -}} +const summaryHeaderTemplate = `{{- if .Name -}} Name: {{ .Name | bold }} {{- if .Target }} Target: {{ .Target | bold }} @@ -46,12 +48,30 @@ Workspace: Path: {{ .Path | bold }} {{- end }} {{- end }} +{{ end -}}` -{{ end -}} - -{{ .Trailer }} +const resourcesTemplate = `Resources: +{{- range . }} + {{ .GroupName }}: + {{- range .Resources }} + {{ .Key | bold }}: + Name: {{ .Name }} + URL: {{ if .URL }}{{ .URL | cyan }}{{ else }}{{ "(not deployed)" | cyan }}{{ end }} + {{- end }} +{{- end }} ` +type ResourceGroup struct { + GroupName string + Resources []ResourceInfo +} + +type ResourceInfo struct { + Key string + Name string + URL string +} + func pluralize(n int, singular, plural string) string { if n == 1 { return fmt.Sprintf("%d %s", n, singular) @@ -74,20 +94,20 @@ func buildTrailer(diags diag.Diagnostics) string { case len(parts) >= 3: first := strings.Join(parts[:len(parts)-1], ", ") last := parts[len(parts)-1] - return fmt.Sprintf("Found %s, and %s", first, last) + return fmt.Sprintf("Found %s, and %s\n", first, last) case len(parts) == 2: - return fmt.Sprintf("Found %s and %s", parts[0], parts[1]) + return fmt.Sprintf("Found %s and %s\n", parts[0], parts[1]) case len(parts) == 1: - return fmt.Sprintf("Found %s", parts[0]) + return fmt.Sprintf("Found %s\n", parts[0]) default: // No diagnostics to print. - return color.GreenString("Validation OK!") + return color.GreenString("Validation OK!\n") } } -func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { +func renderSummaryHeaderTemplate(out io.Writer, b *bundle.Bundle) error { if b == nil { - return renderSummaryTemplate(out, &bundle.Bundle{}, diags) + return renderSummaryHeaderTemplate(out, &bundle.Bundle{}) } var currentUser = &iam.User{} @@ -98,20 +118,19 @@ func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnosti } } - t := template.Must(template.New("summary").Funcs(renderFuncMap).Parse(summaryTemplate)) + t := template.Must(template.New("summary").Funcs(renderFuncMap).Parse(summaryHeaderTemplate)) err := t.Execute(out, map[string]any{ - "Name": b.Config.Bundle.Name, - "Target": b.Config.Bundle.Target, - "User": currentUser.UserName, - "Path": b.Config.Workspace.RootPath, - "Host": b.Config.Workspace.Host, - "Trailer": buildTrailer(diags), + "Name": b.Config.Bundle.Name, + "Target": b.Config.Bundle.Target, + "User": currentUser.UserName, + "Path": b.Config.Workspace.RootPath, + "Host": b.Config.Workspace.Host, }) return err } -func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { +func renderDiagnosticsOnly(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { for _, d := range diags { for i := range d.Locations { if b == nil { @@ -139,19 +158,73 @@ type RenderOptions struct { RenderSummaryTable bool } -// RenderTextOutput renders the diagnostics in a human-readable format. -func RenderTextOutput(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, opts RenderOptions) error { - err := renderDiagnostics(out, b, diags) +// RenderDiagnostics renders the diagnostics in a human-readable format. +func RenderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, opts RenderOptions) error { + err := renderDiagnosticsOnly(out, b, diags) if err != nil { return fmt.Errorf("failed to render diagnostics: %w", err) } if opts.RenderSummaryTable { - err = renderSummaryTemplate(out, b, diags) - if err != nil { - return fmt.Errorf("failed to render summary: %w", err) + if b != nil { + err = renderSummaryHeaderTemplate(out, b) + if err != nil { + return fmt.Errorf("failed to render summary: %w", err) + } + io.WriteString(out, "\n") } + trailer := buildTrailer(diags) + io.WriteString(out, trailer) } return nil } + +func RenderSummary(ctx context.Context, out io.Writer, b *bundle.Bundle) error { + if err := renderSummaryHeaderTemplate(out, b); err != nil { + return err + } + + var resourceGroups []ResourceGroup + + for _, group := range b.Config.Resources.AllResources() { + resources := make([]ResourceInfo, 0, len(group.Resources)) + for key, resource := range group.Resources { + resources = append(resources, ResourceInfo{ + Key: key, + Name: resource.GetName(), + URL: resource.GetURL(), + }) + } + + if len(resources) > 0 { + resourceGroups = append(resourceGroups, ResourceGroup{ + GroupName: group.Description.PluralTitle, + Resources: resources, + }) + } + } + + if err := renderResourcesTemplate(out, resourceGroups); err != nil { + return fmt.Errorf("failed to render resources template: %w", err) + } + + return nil +} + +// Helper function to sort and render resource groups using the template +func renderResourcesTemplate(out io.Writer, resourceGroups []ResourceGroup) error { + // Sort everything to ensure consistent output + sort.Slice(resourceGroups, func(i, j int) bool { + return resourceGroups[i].GroupName < resourceGroups[j].GroupName + }) + for _, group := range resourceGroups { + sort.Slice(group.Resources, func(i, j int) bool { + return group.Resources[i].Key < group.Resources[j].Key + }) + } + + t := template.Must(template.New("resources").Funcs(renderFuncMap).Parse(resourcesTemplate)) + + return t.Execute(out, resourceGroups) +} diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go index 1a41fa01c..cd9e7723b 100644 --- a/bundle/render/render_text_output_test.go +++ b/bundle/render/render_text_output_test.go @@ -2,14 +2,21 @@ package render import ( "bytes" + "context" + "io" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" - assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -326,7 +333,7 @@ func TestRenderTextOutput(t *testing.T) { t.Run(tc.name, func(t *testing.T) { writer := &bytes.Buffer{} - err := RenderTextOutput(writer, tc.bundle, tc.diags, tc.opts) + err := RenderDiagnostics(writer, tc.bundle, tc.diags, tc.opts) require.NoError(t, err) assert.Equal(t, tc.expected, writer.String()) @@ -468,7 +475,7 @@ func TestRenderDiagnostics(t *testing.T) { t.Run(tc.name, func(t *testing.T) { writer := &bytes.Buffer{} - err := renderDiagnostics(writer, bundle, tc.diags) + err := renderDiagnosticsOnly(writer, bundle, tc.diags) require.NoError(t, err) assert.Equal(t, tc.expected, writer.String()) @@ -479,8 +486,105 @@ func TestRenderDiagnostics(t *testing.T) { func TestRenderSummaryTemplate_nilBundle(t *testing.T) { writer := &bytes.Buffer{} - err := renderSummaryTemplate(writer, nil, nil) + err := renderSummaryHeaderTemplate(writer, nil) require.NoError(t, err) + io.WriteString(writer, buildTrailer(nil)) + assert.Equal(t, "Validation OK!\n", writer.String()) } + +func TestRenderSummary(t *testing.T) { + ctx := context.Background() + + // Create a mock bundle with various resources + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "test-bundle", + Target: "test-target", + }, + Workspace: config.Workspace{ + Host: "https://mycompany.databricks.com/", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + ID: "1", + URL: "https://url1", + JobSettings: &jobs.JobSettings{Name: "job1-name"}, + }, + "job2": { + ID: "2", + URL: "https://url2", + JobSettings: &jobs.JobSettings{Name: "job2-name"}, + }, + }, + Pipelines: map[string]*resources.Pipeline{ + "pipeline2": { + ID: "4", + // no URL + PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline2-name"}, + }, + "pipeline1": { + ID: "3", + URL: "https://url3", + PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1-name"}, + }, + }, + Schemas: map[string]*resources.Schema{ + "schema1": { + ID: "catalog.schema", + CreateSchema: &catalog.CreateSchema{ + Name: "schema", + }, + // no URL + }, + }, + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "endpoint1": { + ID: "7", + CreateServingEndpoint: &serving.CreateServingEndpoint{ + Name: "my_serving_endpoint", + }, + URL: "https://url4", + }, + }, + }, + }, + } + + writer := &bytes.Buffer{} + err := RenderSummary(ctx, writer, b) + require.NoError(t, err) + + expectedSummary := `Name: test-bundle +Target: test-target +Workspace: + Host: https://mycompany.databricks.com/ +Resources: + Jobs: + job1: + Name: job1-name + URL: https://url1 + job2: + Name: job2-name + URL: https://url2 + Model Serving Endpoints: + endpoint1: + Name: my_serving_endpoint + URL: https://url4 + Pipelines: + pipeline1: + Name: pipeline1-name + URL: https://url3 + pipeline2: + Name: pipeline2-name + URL: (not deployed) + Schemas: + schema1: + Name: schema + URL: (not deployed) +` + assert.Equal(t, expectedSummary, writer.String()) +} diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index f1c85cb3d..a25e02f6c 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -78,7 +78,7 @@ func newDeployCommand() *cobra.Command { } renderOpts := render.RenderOptions{RenderSummaryTable: false} - err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags, renderOpts) + err := render.RenderDiagnostics(cmd.OutOrStdout(), b, diags, renderOpts) if err != nil { return fmt.Errorf("failed to render output: %w", err) } diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index 5a64b46c0..8c34dd612 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -8,8 +8,10 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/bundle/render" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/flags" @@ -19,11 +21,8 @@ import ( func newSummaryCommand() *cobra.Command { cmd := &cobra.Command{ Use: "summary", - Short: "Describe the bundle resources and their deployment states", + Short: "Summarize resources deployed by this bundle", Args: root.NoArgs, - - // This command is currently intended for the Databricks VSCode extension only - Hidden: true, } var forcePull bool @@ -60,14 +59,15 @@ func newSummaryCommand() *cobra.Command { } } - diags = bundle.Apply(ctx, b, terraform.Load()) + diags = bundle.Apply(ctx, b, + bundle.Seq(terraform.Load(), mutator.InitializeURLs())) if err := diags.Error(); err != nil { return err } switch root.OutputType(cmd) { case flags.OutputText: - return fmt.Errorf("%w, only json output is supported", errors.ErrUnsupported) + return render.RenderSummary(ctx, cmd.OutOrStdout(), b) case flags.OutputJSON: buf, err := json.MarshalIndent(b.Config, "", " ") if err != nil { diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 496d5d2b5..5331e7e7b 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -54,7 +54,7 @@ func newValidateCommand() *cobra.Command { switch root.OutputType(cmd) { case flags.OutputText: renderOpts := render.RenderOptions{RenderSummaryTable: true} - err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags, renderOpts) + err := render.RenderDiagnostics(cmd.OutOrStdout(), b, diags, renderOpts) if err != nil { return fmt.Errorf("failed to render output: %w", err) } From 0c9c90208f00482d7bf725a0bb865e1f2a5f706f Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 18 Oct 2024 17:37:16 +0200 Subject: [PATCH 12/25] Added a warning when incorrect permissions used for `/Workspace/Shared` bundle root (#1821) ## Changes Added a warning when incorrect permissions used for `/Workspace/Shared` bundle root ## Tests Added unit test --------- Co-authored-by: Pieter Noordhuis --- bundle/permissions/validate.go | 56 +++++++++++++++++++ bundle/permissions/validate_test.go | 66 +++++++++++++++++++++++ bundle/permissions/workspace_root.go | 8 +-- bundle/permissions/workspace_root_test.go | 4 +- bundle/phases/initialize.go | 3 ++ 5 files changed, 131 insertions(+), 6 deletions(-) create mode 100644 bundle/permissions/validate.go create mode 100644 bundle/permissions/validate_test.go diff --git a/bundle/permissions/validate.go b/bundle/permissions/validate.go new file mode 100644 index 000000000..acd2e6062 --- /dev/null +++ b/bundle/permissions/validate.go @@ -0,0 +1,56 @@ +package permissions + +import ( + "context" + "fmt" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" +) + +type validateSharedRootPermissions struct { +} + +func ValidateSharedRootPermissions() bundle.Mutator { + return &validateSharedRootPermissions{} +} + +func (*validateSharedRootPermissions) Name() string { + return "ValidateSharedRootPermissions" +} + +func (*validateSharedRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + if isWorkspaceSharedRoot(b.Config.Workspace.RootPath) { + return isUsersGroupPermissionSet(b) + } + + return nil +} + +func isWorkspaceSharedRoot(path string) bool { + return strings.HasPrefix(path, "/Workspace/Shared/") +} + +// isUsersGroupPermissionSet checks that top-level permissions set for bundle contain group_name: users with CAN_MANAGE permission. +func isUsersGroupPermissionSet(b *bundle.Bundle) diag.Diagnostics { + var diags diag.Diagnostics + + allUsers := false + for _, p := range b.Config.Permissions { + if p.GroupName == "users" && p.Level == CAN_MANAGE { + allUsers = true + break + } + } + + if !allUsers { + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("the bundle root path %s is writable by all workspace users", b.Config.Workspace.RootPath), + Detail: "The bundle is configured to use /Workspace/Shared, which will give read/write access to all users. If this is intentional, add CAN_MANAGE for 'group_name: users' permission to your bundle configuration. If the deployment should be restricted, move it to a restricted folder such as /Workspace/Users/.", + }) + } + + return diags +} diff --git a/bundle/permissions/validate_test.go b/bundle/permissions/validate_test.go new file mode 100644 index 000000000..ff132b4e1 --- /dev/null +++ b/bundle/permissions/validate_test.go @@ -0,0 +1,66 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestValidateSharedRootPermissionsForShared(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Workspace/Shared/foo/bar", + }, + Permissions: []resources.Permission{ + {Level: CAN_MANAGE, GroupName: "users"}, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}}, + "job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}}, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions())) + require.Empty(t, diags) +} + +func TestValidateSharedRootPermissionsForSharedError(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Workspace/Shared/foo/bar", + }, + Permissions: []resources.Permission{ + {Level: CAN_MANAGE, UserName: "foo@bar.com"}, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}}, + "job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}}, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions())) + require.Len(t, diags, 1) + require.Equal(t, "the bundle root path /Workspace/Shared/foo/bar is writable by all workspace users", diags[0].Summary) + require.Equal(t, diag.Warning, diags[0].Severity) +} diff --git a/bundle/permissions/workspace_root.go b/bundle/permissions/workspace_root.go index a59a039f6..e7867521e 100644 --- a/bundle/permissions/workspace_root.go +++ b/bundle/permissions/workspace_root.go @@ -16,6 +16,10 @@ func ApplyWorkspaceRootPermissions() bundle.Mutator { return &workspaceRootPermissions{} } +func (*workspaceRootPermissions) Name() string { + return "ApplyWorkspaceRootPermissions" +} + // Apply implements bundle.Mutator. func (*workspaceRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := giveAccessForWorkspaceRoot(ctx, b) @@ -26,10 +30,6 @@ func (*workspaceRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) di return nil } -func (*workspaceRootPermissions) Name() string { - return "ApplyWorkspaceRootPermissions" -} - func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error { permissions := make([]workspace.WorkspaceObjectAccessControlRequest, 0) diff --git a/bundle/permissions/workspace_root_test.go b/bundle/permissions/workspace_root_test.go index 5e23a1da8..6b37b2c41 100644 --- a/bundle/permissions/workspace_root_test.go +++ b/bundle/permissions/workspace_root_test.go @@ -69,6 +69,6 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) { WorkspaceObjectType: "directories", }).Return(nil, nil) - diags := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions()) - require.NoError(t, diags.Error()) + diags := bundle.Apply(context.Background(), b, bundle.Seq(ValidateSharedRootPermissions(), ApplyWorkspaceRootPermissions())) + require.Empty(t, diags) } diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index da5b2eff6..5582016fd 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -76,8 +76,11 @@ func Initialize() bundle.Mutator { mutator.TranslatePaths(), trampoline.WrapperWarning(), + + permissions.ValidateSharedRootPermissions(), permissions.ApplyBundlePermissions(), permissions.FilterCurrentUser(), + metadata.AnnotateJobs(), metadata.AnnotatePipelines(), terraform.Initialize(), From 3b1fb6d2dfd10b3388f8fdc38c9ea611bb5f88ee Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 18 Oct 2024 17:38:10 +0200 Subject: [PATCH 13/25] Remove Terraform conversion function that's no longer used (#1840) ## Changes In #1218, the `BundleToTerraform` function was replaced by a version based on the dynamic configuration tree. Since then, it has only been used in tests to confirm that the output of the old function was equal to the output of the new function. We no longer need this and can exclusively rely on the version based on the dynamic configuration tree. ## Tests Tests pass. --- bundle/deploy/terraform/convert.go | 240 ------------------------ bundle/deploy/terraform/convert_test.go | 142 +++++++------- 2 files changed, 63 insertions(+), 319 deletions(-) diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index b8993c031..0ba8bb1f4 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -2,9 +2,7 @@ package terraform import ( "context" - "encoding/json" "fmt" - "sort" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" @@ -14,244 +12,6 @@ import ( tfjson "github.com/hashicorp/terraform-json" ) -func conv(from any, to any) { - buf, _ := json.Marshal(from) - json.Unmarshal(buf, &to) -} - -func convPermissions(acl []resources.Permission) *schema.ResourcePermissions { - if len(acl) == 0 { - return nil - } - - resource := schema.ResourcePermissions{} - for _, ac := range acl { - resource.AccessControl = append(resource.AccessControl, convPermission(ac)) - } - - return &resource -} - -func convPermission(ac resources.Permission) schema.ResourcePermissionsAccessControl { - dst := schema.ResourcePermissionsAccessControl{ - PermissionLevel: ac.Level, - } - if ac.UserName != "" { - dst.UserName = ac.UserName - } - if ac.GroupName != "" { - dst.GroupName = ac.GroupName - } - if ac.ServicePrincipalName != "" { - dst.ServicePrincipalName = ac.ServicePrincipalName - } - return dst -} - -func convGrants(acl []resources.Grant) *schema.ResourceGrants { - if len(acl) == 0 { - return nil - } - - resource := schema.ResourceGrants{} - for _, ac := range acl { - resource.Grant = append(resource.Grant, schema.ResourceGrantsGrant{ - Privileges: ac.Privileges, - Principal: ac.Principal, - }) - } - - return &resource -} - -// BundleToTerraform converts resources in a bundle configuration -// to the equivalent Terraform JSON representation. -// -// Note: This function is an older implementation of the conversion logic. It is -// no longer used in any code paths. It is kept around to be used in tests. -// New resources do not need to modify this function and can instead can define -// the conversion login in the tfdyn package. -func BundleToTerraform(config *config.Root) *schema.Root { - tfroot := schema.NewRoot() - tfroot.Provider = schema.NewProviders() - tfroot.Resource = schema.NewResources() - noResources := true - - for k, src := range config.Resources.Jobs { - noResources = false - var dst schema.ResourceJob - conv(src, &dst) - - if src.JobSettings != nil { - sort.Slice(src.JobSettings.Tasks, func(i, j int) bool { - return src.JobSettings.Tasks[i].TaskKey < src.JobSettings.Tasks[j].TaskKey - }) - - for _, v := range src.Tasks { - var t schema.ResourceJobTask - conv(v, &t) - - for _, v_ := range v.Libraries { - var l schema.ResourceJobTaskLibrary - conv(v_, &l) - t.Library = append(t.Library, l) - } - - // Convert for_each_task libraries - if v.ForEachTask != nil { - for _, v_ := range v.ForEachTask.Task.Libraries { - var l schema.ResourceJobTaskForEachTaskTaskLibrary - conv(v_, &l) - t.ForEachTask.Task.Library = append(t.ForEachTask.Task.Library, l) - } - - } - - dst.Task = append(dst.Task, t) - } - - for _, v := range src.JobClusters { - var t schema.ResourceJobJobCluster - conv(v, &t) - dst.JobCluster = append(dst.JobCluster, t) - } - - // Unblock downstream work. To be addressed more generally later. - if git := src.GitSource; git != nil { - dst.GitSource = &schema.ResourceJobGitSource{ - Url: git.GitUrl, - Branch: git.GitBranch, - Commit: git.GitCommit, - Provider: string(git.GitProvider), - Tag: git.GitTag, - } - } - - for _, v := range src.Parameters { - var t schema.ResourceJobParameter - conv(v, &t) - dst.Parameter = append(dst.Parameter, t) - } - } - - tfroot.Resource.Job[k] = &dst - - // Configure permissions for this resource. - if rp := convPermissions(src.Permissions); rp != nil { - rp.JobId = fmt.Sprintf("${databricks_job.%s.id}", k) - tfroot.Resource.Permissions["job_"+k] = rp - } - } - - for k, src := range config.Resources.Pipelines { - noResources = false - var dst schema.ResourcePipeline - conv(src, &dst) - - if src.PipelineSpec != nil { - for _, v := range src.Libraries { - var l schema.ResourcePipelineLibrary - conv(v, &l) - dst.Library = append(dst.Library, l) - } - - for _, v := range src.Clusters { - var l schema.ResourcePipelineCluster - conv(v, &l) - dst.Cluster = append(dst.Cluster, l) - } - - for _, v := range src.Notifications { - var l schema.ResourcePipelineNotification - conv(v, &l) - dst.Notification = append(dst.Notification, l) - } - } - - tfroot.Resource.Pipeline[k] = &dst - - // Configure permissions for this resource. - if rp := convPermissions(src.Permissions); rp != nil { - rp.PipelineId = fmt.Sprintf("${databricks_pipeline.%s.id}", k) - tfroot.Resource.Permissions["pipeline_"+k] = rp - } - } - - for k, src := range config.Resources.Models { - noResources = false - var dst schema.ResourceMlflowModel - conv(src, &dst) - tfroot.Resource.MlflowModel[k] = &dst - - // Configure permissions for this resource. - if rp := convPermissions(src.Permissions); rp != nil { - rp.RegisteredModelId = fmt.Sprintf("${databricks_mlflow_model.%s.registered_model_id}", k) - tfroot.Resource.Permissions["mlflow_model_"+k] = rp - } - } - - for k, src := range config.Resources.Experiments { - noResources = false - var dst schema.ResourceMlflowExperiment - conv(src, &dst) - tfroot.Resource.MlflowExperiment[k] = &dst - - // Configure permissions for this resource. - if rp := convPermissions(src.Permissions); rp != nil { - rp.ExperimentId = fmt.Sprintf("${databricks_mlflow_experiment.%s.id}", k) - tfroot.Resource.Permissions["mlflow_experiment_"+k] = rp - } - } - - for k, src := range config.Resources.ModelServingEndpoints { - noResources = false - var dst schema.ResourceModelServing - conv(src, &dst) - tfroot.Resource.ModelServing[k] = &dst - - // Configure permissions for this resource. - if rp := convPermissions(src.Permissions); rp != nil { - rp.ServingEndpointId = fmt.Sprintf("${databricks_model_serving.%s.serving_endpoint_id}", k) - tfroot.Resource.Permissions["model_serving_"+k] = rp - } - } - - for k, src := range config.Resources.RegisteredModels { - noResources = false - var dst schema.ResourceRegisteredModel - conv(src, &dst) - tfroot.Resource.RegisteredModel[k] = &dst - - // Configure permissions for this resource. - if rp := convGrants(src.Grants); rp != nil { - rp.Function = fmt.Sprintf("${databricks_registered_model.%s.id}", k) - tfroot.Resource.Grants["registered_model_"+k] = rp - } - } - - for k, src := range config.Resources.QualityMonitors { - noResources = false - var dst schema.ResourceQualityMonitor - conv(src, &dst) - tfroot.Resource.QualityMonitor[k] = &dst - } - - for k, src := range config.Resources.Clusters { - noResources = false - var dst schema.ResourceCluster - conv(src, &dst) - tfroot.Resource.Cluster[k] = &dst - } - - // We explicitly set "resource" to nil to omit it from a JSON encoding. - // This is required because the terraform CLI requires >= 1 resources defined - // if the "resource" property is used in a .tf.json file. - if noResources { - tfroot.Resource = nil - } - return tfroot -} - // BundleToTerraformWithDynValue converts resources in a bundle configuration // to the equivalent Terraform JSON representation. func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema.Root, error) { diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 4c6866d9d..575ff00bc 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -2,7 +2,6 @@ package terraform import ( "context" - "encoding/json" "reflect" "testing" @@ -21,6 +20,27 @@ import ( "github.com/stretchr/testify/require" ) +func produceTerraformConfiguration(t *testing.T, config config.Root) *schema.Root { + vin, err := convert.FromTyped(config, dyn.NilValue) + require.NoError(t, err) + out, err := BundleToTerraformWithDynValue(context.Background(), vin) + require.NoError(t, err) + return out +} + +func convertToResourceStruct[T any](t *testing.T, resource *T, data any) { + require.NotNil(t, resource) + require.NotNil(t, data) + + // Convert data to a dyn.Value. + vin, err := convert.FromTyped(data, dyn.NilValue) + require.NoError(t, err) + + // Convert the dyn.Value to a struct. + err = convert.ToTyped(resource, vin) + require.NoError(t, err) +} + func TestBundleToTerraformJob(t *testing.T) { var src = resources.Job{ JobSettings: &jobs.JobSettings{ @@ -58,8 +78,9 @@ func TestBundleToTerraformJob(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Job["my_job"].(*schema.ResourceJob) + var resource schema.ResourceJob + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Job["my_job"]) assert.Equal(t, "my job", resource.Name) assert.Len(t, resource.JobCluster, 1) @@ -68,8 +89,6 @@ func TestBundleToTerraformJob(t *testing.T) { assert.Equal(t, "param1", resource.Parameter[0].Name) assert.Equal(t, "param2", resource.Parameter[1].Name) assert.Nil(t, out.Data) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformJobPermissions(t *testing.T) { @@ -90,15 +109,14 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Permissions["job_my_job"].(*schema.ResourcePermissions) + var resource schema.ResourcePermissions + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Permissions["job_my_job"]) assert.NotEmpty(t, resource.JobId) assert.Len(t, resource.AccessControl, 1) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformJobTaskLibraries(t *testing.T) { @@ -128,15 +146,14 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Job["my_job"].(*schema.ResourceJob) + var resource schema.ResourceJob + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Job["my_job"]) assert.Equal(t, "my job", resource.Name) require.Len(t, resource.Task, 1) require.Len(t, resource.Task[0].Library, 1) assert.Equal(t, "mlflow", resource.Task[0].Library[0].Pypi.Package) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { @@ -172,15 +189,14 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Job["my_job"].(*schema.ResourceJob) + var resource schema.ResourceJob + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Job["my_job"]) assert.Equal(t, "my job", resource.Name) require.Len(t, resource.Task, 1) require.Len(t, resource.Task[0].ForEachTask.Task.Library, 1) assert.Equal(t, "mlflow", resource.Task[0].ForEachTask.Task.Library[0].Pypi.Package) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformPipeline(t *testing.T) { @@ -230,8 +246,9 @@ func TestBundleToTerraformPipeline(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Pipeline["my_pipeline"].(*schema.ResourcePipeline) + var resource schema.ResourcePipeline + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Pipeline["my_pipeline"]) assert.Equal(t, "my pipeline", resource.Name) assert.Len(t, resource.Library, 2) @@ -241,8 +258,6 @@ func TestBundleToTerraformPipeline(t *testing.T) { assert.Equal(t, resource.Notification[1].Alerts, []string{"on-update-failure", "on-flow-failure"}) assert.Equal(t, resource.Notification[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"}) assert.Nil(t, out.Data) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformPipelinePermissions(t *testing.T) { @@ -263,15 +278,14 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Permissions["pipeline_my_pipeline"].(*schema.ResourcePermissions) + var resource schema.ResourcePermissions + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Permissions["pipeline_my_pipeline"]) assert.NotEmpty(t, resource.PipelineId) assert.Len(t, resource.AccessControl, 1) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModel(t *testing.T) { @@ -300,8 +314,9 @@ func TestBundleToTerraformModel(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.MlflowModel["my_model"].(*schema.ResourceMlflowModel) + var resource schema.ResourceMlflowModel + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.MlflowModel["my_model"]) assert.Equal(t, "name", resource.Name) assert.Equal(t, "description", resource.Description) @@ -311,8 +326,6 @@ func TestBundleToTerraformModel(t *testing.T) { assert.Equal(t, "k2", resource.Tags[1].Key) assert.Equal(t, "v2", resource.Tags[1].Value) assert.Nil(t, out.Data) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModelPermissions(t *testing.T) { @@ -336,15 +349,14 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Permissions["mlflow_model_my_model"].(*schema.ResourcePermissions) + var resource schema.ResourcePermissions + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Permissions["mlflow_model_my_model"]) assert.NotEmpty(t, resource.RegisteredModelId) assert.Len(t, resource.AccessControl, 1) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformExperiment(t *testing.T) { @@ -362,13 +374,12 @@ func TestBundleToTerraformExperiment(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.MlflowExperiment["my_experiment"].(*schema.ResourceMlflowExperiment) + var resource schema.ResourceMlflowExperiment + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.MlflowExperiment["my_experiment"]) assert.Equal(t, "name", resource.Name) assert.Nil(t, out.Data) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformExperimentPermissions(t *testing.T) { @@ -392,15 +403,14 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Permissions["mlflow_experiment_my_experiment"].(*schema.ResourcePermissions) + var resource schema.ResourcePermissions + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Permissions["mlflow_experiment_my_experiment"]) assert.NotEmpty(t, resource.ExperimentId) assert.Len(t, resource.AccessControl, 1) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModelServing(t *testing.T) { @@ -436,8 +446,9 @@ func TestBundleToTerraformModelServing(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.ModelServing["my_model_serving_endpoint"].(*schema.ResourceModelServing) + var resource schema.ResourceModelServing + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.ModelServing["my_model_serving_endpoint"]) assert.Equal(t, "name", resource.Name) assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName) @@ -447,8 +458,6 @@ func TestBundleToTerraformModelServing(t *testing.T) { assert.Equal(t, "model_name-1", resource.Config.TrafficConfig.Routes[0].ServedModelName) assert.Equal(t, 100, resource.Config.TrafficConfig.Routes[0].TrafficPercentage) assert.Nil(t, out.Data) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModelServingPermissions(t *testing.T) { @@ -490,15 +499,14 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Permissions["model_serving_my_model_serving_endpoint"].(*schema.ResourcePermissions) + var resource schema.ResourcePermissions + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Permissions["model_serving_my_model_serving_endpoint"]) assert.NotEmpty(t, resource.ServingEndpointId) assert.Len(t, resource.AccessControl, 1) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformRegisteredModel(t *testing.T) { @@ -519,16 +527,15 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.RegisteredModel["my_registered_model"].(*schema.ResourceRegisteredModel) + var resource schema.ResourceRegisteredModel + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.RegisteredModel["my_registered_model"]) assert.Equal(t, "name", resource.Name) assert.Equal(t, "catalog", resource.CatalogName) assert.Equal(t, "schema", resource.SchemaName) assert.Equal(t, "comment", resource.Comment) assert.Nil(t, out.Data) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { @@ -554,15 +561,14 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { }, } - out := BundleToTerraform(&config) - resource := out.Resource.Grants["registered_model_my_registered_model"].(*schema.ResourceGrants) + var resource schema.ResourceGrants + out := produceTerraformConfiguration(t, config) + convertToResourceStruct(t, &resource, out.Resource.Grants["registered_model_my_registered_model"]) assert.NotEmpty(t, resource.Function) assert.Len(t, resource.Grant, 1) assert.Equal(t, "jane@doe.com", resource.Grant[0].Principal) assert.Equal(t, "EXECUTE", resource.Grant[0].Privileges[0]) - - bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformDeletedResources(t *testing.T) { @@ -1154,25 +1160,3 @@ func AssertFullResourceCoverage(t *testing.T, config *config.Root) { } } } - -func assertEqualTerraformRoot(t *testing.T, a, b *schema.Root) { - ba, err := json.Marshal(a) - require.NoError(t, err) - bb, err := json.Marshal(b) - require.NoError(t, err) - assert.JSONEq(t, string(ba), string(bb)) -} - -func bundleToTerraformEquivalenceTest(t *testing.T, config *config.Root) { - t.Run("dyn equivalence", func(t *testing.T) { - tf1 := BundleToTerraform(config) - - vin, err := convert.FromTyped(config, dyn.NilValue) - require.NoError(t, err) - tf2, err := BundleToTerraformWithDynValue(context.Background(), vin) - require.NoError(t, err) - - // Compare roots - assertEqualTerraformRoot(t, tf1, tf2) - }) -} From c9770503f8598ec9d6aeaafddca8c50003f96eb2 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 18 Oct 2024 17:42:05 +0200 Subject: [PATCH 14/25] Encode assumptions about the dashboards API in a test (#1839) ## Changes Dashboards can be imported either via its own CRUD API, or via the workspace import API. This test encodes the assumptions we can make about the API behavior. More specifically, the identity of the dashboard is retained on workspace import, as are the properties that aren't surfaced in the workspace import API. ## Tests The integration test passes. --- internal/acc/workspace.go | 30 +++++++ internal/dashboard_assumptions_test.go | 110 +++++++++++++++++++++++++ 2 files changed, 140 insertions(+) create mode 100644 internal/dashboard_assumptions_test.go diff --git a/internal/acc/workspace.go b/internal/acc/workspace.go index 39374f229..69ab0e715 100644 --- a/internal/acc/workspace.go +++ b/internal/acc/workspace.go @@ -2,11 +2,14 @@ package acc import ( "context" + "fmt" "os" "testing" "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/require" ) @@ -94,3 +97,30 @@ func (t *WorkspaceT) RunPython(code string) (string, error) { require.True(t, ok, "unexpected type %T", results.Data) return output, nil } + +func (t *WorkspaceT) TemporaryWorkspaceDir(name ...string) string { + ctx := context.Background() + me, err := t.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName(name...)) + + t.Logf("Creating %s", basePath) + err = t.W.Workspace.MkdirsByPath(ctx, basePath) + require.NoError(t, err) + + // Remove test directory on test completion. + t.Cleanup(func() { + t.Logf("Removing %s", basePath) + err := t.W.Workspace.Delete(ctx, workspace.Delete{ + Path: basePath, + Recursive: true, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) + }) + + return basePath +} diff --git a/internal/dashboard_assumptions_test.go b/internal/dashboard_assumptions_test.go new file mode 100644 index 000000000..912e046b5 --- /dev/null +++ b/internal/dashboard_assumptions_test.go @@ -0,0 +1,110 @@ +package internal + +import ( + "encoding/base64" + "testing" + + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/databricks/databricks-sdk-go/service/workspace" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Verify that importing a dashboard through the Workspace API retains the identity of the underying resource, +// as well as properties exclusively accessible through the dashboards API. +func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + t.Parallel() + + dashboardName := "New Dashboard" + dashboardPayload := []byte(`{"pages":[{"name":"2506f97a","displayName":"New Page"}]}`) + warehouseId := acc.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") + + dir := wt.TemporaryWorkspaceDir("dashboard-assumptions-") + + dashboard, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{ + DisplayName: dashboardName, + ParentPath: dir, + SerializedDashboard: string(dashboardPayload), + WarehouseId: warehouseId, + }) + require.NoError(t, err) + t.Logf("Dashboard ID (per Lakeview API): %s", dashboard.DashboardId) + + // Overwrite the dashboard via the workspace API. + { + err := wt.W.Workspace.Import(ctx, workspace.Import{ + Format: workspace.ImportFormatAuto, + Path: dashboard.Path, + Content: base64.StdEncoding.EncodeToString(dashboardPayload), + Overwrite: true, + }) + require.NoError(t, err) + } + + // Cross-check consistency with the workspace object. + { + obj, err := wt.W.Workspace.GetStatusByPath(ctx, dashboard.Path) + require.NoError(t, err) + + // Confirm that the resource ID included in the response is equal to the dashboard ID. + require.Equal(t, dashboard.DashboardId, obj.ResourceId) + t.Logf("Dashboard ID (per workspace object status): %s", obj.ResourceId) + } + + // Try to overwrite the dashboard via the Lakeview API (and expect failure). + { + _, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{ + DisplayName: dashboardName, + ParentPath: dir, + SerializedDashboard: string(dashboardPayload), + }) + require.ErrorIs(t, err, apierr.ErrResourceAlreadyExists) + } + + // Retrieve the dashboard object and confirm that only select fields were updated by the import. + { + previousDashboard := dashboard + currentDashboard, err := wt.W.Lakeview.Get(ctx, dashboards.GetDashboardRequest{ + DashboardId: dashboard.DashboardId, + }) + require.NoError(t, err) + + // Convert the dashboard object to a [dyn.Value] to make comparison easier. + previous, err := convert.FromTyped(previousDashboard, dyn.NilValue) + require.NoError(t, err) + current, err := convert.FromTyped(currentDashboard, dyn.NilValue) + require.NoError(t, err) + + // Collect updated paths. + var updatedFieldPaths []string + _, err = merge.Override(previous, current, merge.OverrideVisitor{ + VisitDelete: func(basePath dyn.Path, left dyn.Value) error { + assert.Fail(t, "unexpected delete operation") + return nil + }, + VisitInsert: func(basePath dyn.Path, right dyn.Value) (dyn.Value, error) { + assert.Fail(t, "unexpected insert operation") + return right, nil + }, + VisitUpdate: func(basePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + updatedFieldPaths = append(updatedFieldPaths, basePath.String()) + return right, nil + }, + }) + require.NoError(t, err) + + // Confirm that only the expected fields have been updated. + assert.ElementsMatch(t, []string{ + "etag", + "update_time", + }, updatedFieldPaths) + } +} From eefda8c198c939098f90fa4abfa9c2f7f7b67590 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 18 Oct 2024 17:46:39 +0200 Subject: [PATCH 15/25] Fix path to repository-wide exclude file (#1837) ## Changes This file is at `info/exclude`, and not `info/excludes`. Also see https://git-scm.com/docs/gitignore. ## Tests Manually confirmed that these ignore patterns are now picked up. I created a repository with a pattern in this file and ran `sync` to confirm it ignores files matching the pattern. --- libs/git/repository.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/git/repository.go b/libs/git/repository.go index 6940ddac8..82ee7987b 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -252,8 +252,8 @@ func NewRepository(path vfs.Path) (*Repository, error) { newStringIgnoreRules([]string{ ".git", }), - // Load repository-wide excludes file. - repo.newIgnoreFile(".git/info/excludes"), + // Load repository-wide exclude file. + repo.newIgnoreFile(".git/info/exclude"), // Load root gitignore file. repo.newIgnoreFile(".gitignore"), } From ffdbec87cc5976587e73fd836a7fa8d1eaf6c6c3 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 21 Oct 2024 13:45:39 +0200 Subject: [PATCH 16/25] Added support for pip options in environment dependencies (#1842) ## Changes Added support for specifying pip options such as `--extra-index-url` and etc. in environments dependencies ``` environments: - environment_key: Default spec: client: "1" dependencies: - --extra-index-url https://foo@bar.com/packages/smth somepackage - json==1.0.0 ``` ## Tests Added regression test --------- Co-authored-by: Pieter Noordhuis --- bundle/config/mutator/translate_paths_test.go | 6 ++++++ bundle/libraries/local_path.go | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index c03cee73e..9d655b27b 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -699,6 +699,9 @@ func TestTranslatePathJobEnvironments(t *testing.T) { "../dist/env2.whl", "simplejson", "/Workspace/Users/foo@bar.com/test.whl", + "--extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple foobar", + "foobar --extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple", + "https://foo@bar.com/packages/pypi/simple", }, }, }, @@ -719,6 +722,9 @@ func TestTranslatePathJobEnvironments(t *testing.T) { assert.Equal(t, strings.Join([]string{".", "dist", "env2.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) assert.Equal(t, "simplejson", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[2]) assert.Equal(t, "/Workspace/Users/foo@bar.com/test.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[3]) + assert.Equal(t, "--extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple foobar", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[4]) + assert.Equal(t, "foobar --extra-index-url https://name:token@gitlab.com/api/v4/projects/9876/packages/pypi/simple", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[5]) + assert.Equal(t, "https://foo@bar.com/packages/pypi/simple", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[6]) } func TestTranslatePathWithComplexVariables(t *testing.T) { diff --git a/bundle/libraries/local_path.go b/bundle/libraries/local_path.go index 6d60d56bc..53b714104 100644 --- a/bundle/libraries/local_path.go +++ b/bundle/libraries/local_path.go @@ -57,6 +57,12 @@ func IsLibraryLocal(dep string) bool { } } + // If the dependency starts with --, it's a pip flag option which is a valid + // entry for environment dependencies but not a local path + if containsPipFlag(dep) { + return false + } + // If the dependency is a requirements file, it's not a valid local path if strings.HasPrefix(dep, "-r") { return false @@ -70,6 +76,11 @@ func IsLibraryLocal(dep string) bool { return IsLocalPath(dep) } +func containsPipFlag(input string) bool { + re := regexp.MustCompile(`--[a-zA-Z0-9-]+`) + return re.MatchString(input) +} + // ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_). // \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security]. // ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?): Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1). From ca45e53f42c5c4b26f2833554ab7118802c017cb Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 21 Oct 2024 19:56:17 +0200 Subject: [PATCH 17/25] Add script to make testing of code on branches easier (#1844) ## Changes Convenience script to exec into a shell where a CLI build for a specific branch is made available. ## Tests Manually from `/tmp` with `bash <([path]) demo-dashboards`. --- internal/bugbash/README.md | 13 ++++ internal/bugbash/exec.sh | 139 +++++++++++++++++++++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 internal/bugbash/README.md create mode 100755 internal/bugbash/exec.sh diff --git a/internal/bugbash/README.md b/internal/bugbash/README.md new file mode 100644 index 000000000..941ab6227 --- /dev/null +++ b/internal/bugbash/README.md @@ -0,0 +1,13 @@ +# Bugbash + +The script in this directory can be used to conveniently exec into a shell +where a CLI build for a specific branch is made available. + +## Usage + +This script prompts if you do NOT have at least Bash 5 installed, +but works without command completion with earlier versions. + +```shell +bash <(curl -fsSL https://raw.githubusercontent.com/databricks/cli/main/internal/bugbash/exec.sh) my-branch +``` diff --git a/internal/bugbash/exec.sh b/internal/bugbash/exec.sh new file mode 100755 index 000000000..ac25b16ed --- /dev/null +++ b/internal/bugbash/exec.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Set the GitHub repository for the Databricks CLI. +export GH_REPO="databricks/cli" + +# Synthesize the directory name for the snapshot build. +function cli_snapshot_directory() { + dir="cli" + + # Append OS + case "$(uname -s)" in + Linux) + dir="${dir}_linux" + ;; + Darwin) + dir="${dir}_darwin" + ;; + *) + echo "Unknown operating system: $os" + ;; + esac + + # Append architecture + case "$(uname -m)" in + x86_64) + dir="${dir}_amd64_v1" + ;; + i386|i686) + dir="${dir}_386" + ;; + arm64|aarch64) + dir="${dir}_arm64" + ;; + armv7l|armv8l) + dir="${dir}_arm_6" + ;; + *) + echo "Unknown architecture: $arch" + ;; + esac + + echo $dir +} + +BRANCH=$1 +shift + +# Default to main branch if branch is not specified. +if [ -z "$BRANCH" ]; then + BRANCH=main +fi + +if [ -z "$BRANCH" ]; then + echo "Please specify which branch to bugbash..." + exit 1 +fi + +# Check if the "gh" command is available. +if ! command -v gh &> /dev/null; then + echo "The GitHub CLI (gh) is required to download the snapshot build." + echo "Install and configure it with:" + echo "" + echo " brew install gh" + echo " gh auth login" + echo "" + exit 1 +fi + +echo "Looking for a snapshot build of the Databricks CLI on branch $BRANCH..." + +# Find last successful build on $BRANCH. +last_successful_run_id=$( + gh run list -b "$BRANCH" -w release-snapshot --json 'databaseId,conclusion' | + jq 'limit(1; .[] | select(.conclusion == "success")) | .databaseId' +) +if [ -z "$last_successful_run_id" ]; then + echo "Unable to find last successful build of the release-snapshot workflow for branch $BRANCH." + exit 1 +fi + +# Determine artifact name with the right binaries for this runner. +case "$(uname -s)" in +Linux) + artifact="cli_linux_snapshot" + ;; +Darwin) + artifact="cli_darwin_snapshot" + ;; +esac + +# Create a temporary directory to download the artifact. +dir=$(mktemp -d) + +# Download the artifact. +echo "Downloading the snapshot build..." +gh run download "$last_successful_run_id" -n "$artifact" -D "$dir/.bin" +dir="$dir/.bin/$(cli_snapshot_directory)" +if [ ! -d "$dir" ]; then + echo "Directory does not exist: $dir" + exit 1 +fi + +# Make CLI available on $PATH. +chmod +x "$dir/databricks" +export PATH="$dir:$PATH" + +# Set the prompt to indicate the bugbash environment and exec. +export PS1="(bugbash $BRANCH) \[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ " + +# Display completion instructions. +echo "" +echo "==================================================================" + +if [[ ${BASH_VERSINFO[0]} -lt 5 ]]; then + echo -en "\033[31m" + echo "You have Bash version < 5 installed... completion won't work." + echo -en "\033[0m" + echo "" + echo "Install it with:" + echo "" + echo " brew install bash bash-completion" + echo "" + echo "==================================================================" +fi + +echo "" +echo "To load completions in your current shell session:" +echo "" +echo " source /opt/homebrew/etc/profile.d/bash_completion.sh" +echo " source <(databricks completion bash)" +echo "" +echo "==================================================================" +echo "" + +# Exec into a new shell. +# Note: don't use zsh because on macOS it _always_ overwrites PS1. +exec /usr/bin/env bash From 571076d5e1a637e4a6d4f2873665f1e4a0cb4c7f Mon Sep 17 00:00:00 2001 From: Stephen Macke Date: Mon, 21 Oct 2024 11:27:07 -0700 Subject: [PATCH 18/25] Support Git worktrees for `sync` (#1831) ## Changes This change allows the `sync` command to work from [git worktrees](https://git-scm.com/docs/git-worktree). ## Tests * Added unit tests for traversal of worktree related files. * Manually confirmed that synchronization of files from a main checkout, as well as a worktree, observed the same ignore rules (both locally defined as well as from `$GIT_DIR/info/exclude`). --------- Co-authored-by: Pieter Noordhuis --- libs/git/repository.go | 64 ++++++++++++-------- libs/git/view.go | 2 +- libs/git/worktree.go | 123 ++++++++++++++++++++++++++++++++++++++ libs/git/worktree_test.go | 108 +++++++++++++++++++++++++++++++++ 4 files changed, 271 insertions(+), 26 deletions(-) create mode 100644 libs/git/worktree.go create mode 100644 libs/git/worktree_test.go diff --git a/libs/git/repository.go b/libs/git/repository.go index 82ee7987b..0bbd57865 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -23,8 +23,21 @@ type Repository struct { // directory where we process .gitignore files. real bool - // root is the absolute path to the repository root. - root vfs.Path + // rootDir is the path to the root of the repository checkout. + // This can be either the main repository checkout or a worktree checkout. + // For more information about worktrees, see: https://git-scm.com/docs/git-worktree#_description. + rootDir vfs.Path + + // gitDir is the equivalent of $GIT_DIR and points to the + // `.git` directory of a repository or a worktree directory. + // See https://git-scm.com/docs/git-worktree#_details for more information. + gitDir vfs.Path + + // gitCommonDir is the equivalent of $GIT_COMMON_DIR and points to the + // `.git` directory of the main working tree (common between worktrees). + // This is equivalent to [gitDir] if this is the main working tree. + // See https://git-scm.com/docs/git-worktree#_details for more information. + gitCommonDir vfs.Path // ignore contains a list of ignore patterns indexed by the // path prefix relative to the repository root. @@ -44,12 +57,11 @@ type Repository struct { // Root returns the absolute path to the repository root. func (r *Repository) Root() string { - return r.root.Native() + return r.rootDir.Native() } func (r *Repository) CurrentBranch() (string, error) { - // load .git/HEAD - ref, err := LoadReferenceFile(r.root, path.Join(GitDirectoryName, "HEAD")) + ref, err := LoadReferenceFile(r.gitDir, "HEAD") if err != nil { return "", err } @@ -65,8 +77,7 @@ func (r *Repository) CurrentBranch() (string, error) { } func (r *Repository) LatestCommit() (string, error) { - // load .git/HEAD - ref, err := LoadReferenceFile(r.root, path.Join(GitDirectoryName, "HEAD")) + ref, err := LoadReferenceFile(r.gitDir, "HEAD") if err != nil { return "", err } @@ -80,12 +91,12 @@ func (r *Repository) LatestCommit() (string, error) { return ref.Content, nil } - // read reference from .git/HEAD + // Read reference from $GIT_DIR/HEAD branchHeadPath, err := ref.ResolvePath() if err != nil { return "", err } - branchHeadRef, err := LoadReferenceFile(r.root, path.Join(GitDirectoryName, branchHeadPath)) + branchHeadRef, err := LoadReferenceFile(r.gitCommonDir, branchHeadPath) if err != nil { return "", err } @@ -125,7 +136,7 @@ func (r *Repository) loadConfig() error { if err != nil { return fmt.Errorf("unable to load user specific gitconfig: %w", err) } - err = config.loadFile(r.root, ".git/config") + err = config.loadFile(r.gitCommonDir, "config") if err != nil { return fmt.Errorf("unable to load repository specific gitconfig: %w", err) } @@ -133,12 +144,6 @@ func (r *Repository) loadConfig() error { return nil } -// newIgnoreFile constructs a new [ignoreRules] implementation backed by -// a file using the specified path relative to the repository root. -func (r *Repository) newIgnoreFile(relativeIgnoreFilePath string) ignoreRules { - return newIgnoreFile(r.root, relativeIgnoreFilePath) -} - // getIgnoreRules returns a slice of [ignoreRules] that apply // for the specified prefix. The prefix must be cleaned by the caller. // It lazily initializes an entry for the specified prefix if it @@ -149,7 +154,7 @@ func (r *Repository) getIgnoreRules(prefix string) []ignoreRules { return fs } - r.ignore[prefix] = append(r.ignore[prefix], r.newIgnoreFile(path.Join(prefix, gitIgnoreFileName))) + r.ignore[prefix] = append(r.ignore[prefix], newIgnoreFile(r.rootDir, path.Join(prefix, gitIgnoreFileName))) return r.ignore[prefix] } @@ -205,21 +210,30 @@ func (r *Repository) Ignore(relPath string) (bool, error) { func NewRepository(path vfs.Path) (*Repository, error) { real := true - rootPath, err := vfs.FindLeafInTree(path, GitDirectoryName) + rootDir, err := vfs.FindLeafInTree(path, GitDirectoryName) if err != nil { if !errors.Is(err, fs.ErrNotExist) { return nil, err } // Cannot find `.git` directory. - // Treat the specified path as a potential repository root. + // Treat the specified path as a potential repository root checkout. real = false - rootPath = path + rootDir = path + } + + // Derive $GIT_DIR and $GIT_COMMON_DIR paths if this is a real repository. + // If it isn't a real repository, they'll point to the (non-existent) `.git` directory. + gitDir, gitCommonDir, err := resolveGitDirs(rootDir) + if err != nil { + return nil, err } repo := &Repository{ - real: real, - root: rootPath, - ignore: make(map[string][]ignoreRules), + real: real, + rootDir: rootDir, + gitDir: gitDir, + gitCommonDir: gitCommonDir, + ignore: make(map[string][]ignoreRules), } err = repo.loadConfig() @@ -253,9 +267,9 @@ func NewRepository(path vfs.Path) (*Repository, error) { ".git", }), // Load repository-wide exclude file. - repo.newIgnoreFile(".git/info/exclude"), + newIgnoreFile(repo.gitCommonDir, "info/exclude"), // Load root gitignore file. - repo.newIgnoreFile(".gitignore"), + newIgnoreFile(repo.rootDir, ".gitignore"), } return repo, nil diff --git a/libs/git/view.go b/libs/git/view.go index 90eed0bb8..2d2e39a60 100644 --- a/libs/git/view.go +++ b/libs/git/view.go @@ -80,7 +80,7 @@ func NewView(root vfs.Path) (*View, error) { // Target path must be relative to the repository root path. target := root.Native() - prefix := repo.root.Native() + prefix := repo.rootDir.Native() if !strings.HasPrefix(target, prefix) { return nil, fmt.Errorf("path %q is not within repository root %q", root.Native(), prefix) } diff --git a/libs/git/worktree.go b/libs/git/worktree.go new file mode 100644 index 000000000..964c1c95b --- /dev/null +++ b/libs/git/worktree.go @@ -0,0 +1,123 @@ +package git + +import ( + "bufio" + "errors" + "fmt" + "io/fs" + "path/filepath" + "strings" + + "github.com/databricks/cli/libs/vfs" +) + +func readLines(root vfs.Path, name string) ([]string, error) { + file, err := root.Open(name) + if err != nil { + return nil, err + } + + defer file.Close() + + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + return lines, scanner.Err() +} + +// readGitDir reads the value of the `.git` file in a worktree. +func readGitDir(root vfs.Path) (string, error) { + lines, err := readLines(root, GitDirectoryName) + if err != nil { + return "", err + } + + var gitDir string + for _, line := range lines { + parts := strings.SplitN(line, ": ", 2) + if len(parts) != 2 { + continue + } + + if parts[0] == "gitdir" { + gitDir = strings.TrimSpace(parts[1]) + } + } + + if gitDir == "" { + return "", fmt.Errorf(`expected %q to contain a line with "gitdir: [...]"`, filepath.Join(root.Native(), GitDirectoryName)) + } + + return gitDir, nil +} + +// readGitCommonDir reads the value of the `commondir` file in the `.git` directory of a worktree. +// This file typically contains "../.." to point to $GIT_COMMON_DIR. +func readGitCommonDir(gitDir vfs.Path) (string, error) { + lines, err := readLines(gitDir, "commondir") + if err != nil { + return "", err + } + + if len(lines) == 0 { + return "", errors.New("file is empty") + } + + return strings.TrimSpace(lines[0]), nil +} + +// resolveGitDirs resolves the paths for $GIT_DIR and $GIT_COMMON_DIR. +// The path argument is the root of the checkout where (supposedly) a `.git` file or directory exists. +func resolveGitDirs(root vfs.Path) (vfs.Path, vfs.Path, error) { + fileInfo, err := root.Stat(GitDirectoryName) + if err != nil { + // If the `.git` file or directory does not exist, then this is not a git repository. + // Return paths that we know don't exist, so we do not need to perform nil checks in the caller. + if errors.Is(err, fs.ErrNotExist) { + gitDir := vfs.MustNew(filepath.Join(root.Native(), GitDirectoryName)) + return gitDir, gitDir, nil + } + return nil, nil, err + } + + // If the path is a directory, then it is the main working tree. + // Both $GIT_DIR and $GIT_COMMON_DIR point to the same directory. + if fileInfo.IsDir() { + gitDir := vfs.MustNew(filepath.Join(root.Native(), GitDirectoryName)) + return gitDir, gitDir, nil + } + + // If the path is not a directory, then it is a worktree. + // Read value for $GIT_DIR. + gitDirValue, err := readGitDir(root) + if err != nil { + return nil, nil, err + } + + // Resolve $GIT_DIR. + var gitDir vfs.Path + if filepath.IsAbs(gitDirValue) { + gitDir = vfs.MustNew(gitDirValue) + } else { + gitDir = vfs.MustNew(filepath.Join(root.Native(), gitDirValue)) + } + + // Read value for $GIT_COMMON_DIR. + gitCommonDirValue, err := readGitCommonDir(gitDir) + if err != nil { + return nil, nil, fmt.Errorf(`expected "commondir" file in worktree git folder at %q: %w`, gitDir.Native(), err) + } + + // Resolve $GIT_COMMON_DIR. + var gitCommonDir vfs.Path + if filepath.IsAbs(gitCommonDirValue) { + gitCommonDir = vfs.MustNew(gitCommonDirValue) + } else { + gitCommonDir = vfs.MustNew(filepath.Join(gitDir.Native(), gitCommonDirValue)) + } + + return gitDir, gitCommonDir, nil +} diff --git a/libs/git/worktree_test.go b/libs/git/worktree_test.go new file mode 100644 index 000000000..3d620c483 --- /dev/null +++ b/libs/git/worktree_test.go @@ -0,0 +1,108 @@ +package git + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/libs/vfs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func setupWorktree(t *testing.T) string { + var err error + + tmpDir := t.TempDir() + + // Checkout path + err = os.MkdirAll(filepath.Join(tmpDir, "my_worktree"), os.ModePerm) + require.NoError(t, err) + + // Main $GIT_COMMON_DIR + err = os.MkdirAll(filepath.Join(tmpDir, ".git"), os.ModePerm) + require.NoError(t, err) + + // Worktree $GIT_DIR + err = os.MkdirAll(filepath.Join(tmpDir, ".git/worktrees/my_worktree"), os.ModePerm) + require.NoError(t, err) + + return tmpDir +} + +func writeGitDir(t *testing.T, dir, content string) { + err := os.WriteFile(filepath.Join(dir, "my_worktree/.git"), []byte(content), os.ModePerm) + require.NoError(t, err) +} + +func writeGitCommonDir(t *testing.T, dir, content string) { + err := os.WriteFile(filepath.Join(dir, ".git/worktrees/my_worktree/commondir"), []byte(content), os.ModePerm) + require.NoError(t, err) +} + +func verifyCorrectDirs(t *testing.T, dir string) { + gitDir, gitCommonDir, err := resolveGitDirs(vfs.MustNew(filepath.Join(dir, "my_worktree"))) + require.NoError(t, err) + assert.Equal(t, filepath.Join(dir, ".git/worktrees/my_worktree"), gitDir.Native()) + assert.Equal(t, filepath.Join(dir, ".git"), gitCommonDir.Native()) +} + +func TestWorktreeResolveGitDir(t *testing.T) { + dir := setupWorktree(t) + writeGitCommonDir(t, dir, "../..") + + t.Run("relative", func(t *testing.T) { + writeGitDir(t, dir, fmt.Sprintf("gitdir: %s", "../.git/worktrees/my_worktree")) + verifyCorrectDirs(t, dir) + }) + + t.Run("absolute", func(t *testing.T) { + writeGitDir(t, dir, fmt.Sprintf("gitdir: %s", filepath.Join(dir, ".git/worktrees/my_worktree"))) + verifyCorrectDirs(t, dir) + }) + + t.Run("additional spaces", func(t *testing.T) { + writeGitDir(t, dir, fmt.Sprintf("gitdir: %s \n\n\n", "../.git/worktrees/my_worktree")) + verifyCorrectDirs(t, dir) + }) + + t.Run("empty", func(t *testing.T) { + writeGitDir(t, dir, "") + + _, _, err := resolveGitDirs(vfs.MustNew(filepath.Join(dir, "my_worktree"))) + assert.ErrorContains(t, err, ` to contain a line with "gitdir: [...]"`) + }) +} + +func TestWorktreeResolveCommonDir(t *testing.T) { + dir := setupWorktree(t) + writeGitDir(t, dir, fmt.Sprintf("gitdir: %s", "../.git/worktrees/my_worktree")) + + t.Run("relative", func(t *testing.T) { + writeGitCommonDir(t, dir, "../..") + verifyCorrectDirs(t, dir) + }) + + t.Run("absolute", func(t *testing.T) { + writeGitCommonDir(t, dir, filepath.Join(dir, ".git")) + verifyCorrectDirs(t, dir) + }) + + t.Run("additional spaces", func(t *testing.T) { + writeGitCommonDir(t, dir, " ../.. \n\n\n") + verifyCorrectDirs(t, dir) + }) + + t.Run("empty", func(t *testing.T) { + writeGitCommonDir(t, dir, "") + + _, _, err := resolveGitDirs(vfs.MustNew(filepath.Join(dir, "my_worktree"))) + assert.ErrorContains(t, err, `expected "commondir" file in worktree git folder at `) + }) + + t.Run("missing", func(t *testing.T) { + _, _, err := resolveGitDirs(vfs.MustNew(filepath.Join(dir, "my_worktree"))) + assert.ErrorContains(t, err, `expected "commondir" file in worktree git folder at `) + }) +} From f8bb3a8d729b4cbe6313c1e313379dbe70f6b680 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:37:01 +0200 Subject: [PATCH 19/25] Bump github.com/databricks/databricks-sdk-go from 0.48.0 to 0.49.0 (#1843) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.48.0 to 0.49.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.49.0

API Changes:

OpenAPI SHA: cf9c61453990df0f9453670f2fe68e1b128647a2, Date: 2024-10-14

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

[Release] Release v0.49.0

API Changes:

OpenAPI SHA: cf9c61453990df0f9453670f2fe68e1b128647a2, Date: 2024-10-14

Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.48.0&new-version=0.49.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + bundle/schema/jsonschema.json | 48 +++- cmd/workspace/apps/apps.go | 3 - .../disable-legacy-dbfs.go | 220 ++++++++++++++++++ cmd/workspace/jobs/jobs.go | 1 + cmd/workspace/settings/settings.go | 2 + go.mod | 2 +- go.sum | 4 +- 9 files changed, 275 insertions(+), 8 deletions(-) create mode 100755 cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 303c78553..2d9cb6d86 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -0c86ea6dbd9a730c24ff0d4e509603e476955ac5 \ No newline at end of file +cf9c61453990df0f9453670f2fe68e1b128647a2 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 2470eb33d..ae10198bb 100755 --- a/.gitattributes +++ b/.gitattributes @@ -54,6 +54,7 @@ cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true cmd/workspace/default-namespace/default-namespace.go linguist-generated=true cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true +cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go linguist-generated=true cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true cmd/workspace/experiments/experiments.go linguist-generated=true cmd/workspace/external-locations/external-locations.go linguist-generated=true diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 06b9cc15a..178656fe0 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -209,6 +209,10 @@ { "type": "object", "properties": { + "budget_policy_id": { + "description": "The id of the user specified budget policy to use for this job.\nIf not specified, a default budget policy may be applied when creating or modifying the job.\nSee `effective_budget_policy_id` for the budget policy used by this workload.", + "$ref": "#/$defs/string" + }, "continuous": { "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Continuous" @@ -3901,6 +3905,10 @@ { "type": "object", "properties": { + "report": { + "description": "Select tables from a specific source report.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec" + }, "schema": { "description": "Select tables from a specific source schema.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec" @@ -4233,6 +4241,40 @@ } ] }, + "pipelines.ReportSpec": { + "anyOf": [ + { + "type": "object", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store table.", + "$ref": "#/$defs/string" + }, + "destination_schema": { + "description": "Required. Destination schema to store table.", + "$ref": "#/$defs/string" + }, + "destination_table": { + "description": "Required. Destination table name. The pipeline fails if a table with that name already exists.", + "$ref": "#/$defs/string" + }, + "source_url": { + "description": "Required. Report URL in the source system.", + "$ref": "#/$defs/string" + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "pipelines.SchemaSpec": { "anyOf": [ { @@ -4281,7 +4323,7 @@ "$ref": "#/$defs/string" }, "destination_table": { - "description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used.", + "description": "Optional. Destination table name. The pipeline fails if a table with that name already exists. If not set, the source table name is used.", "$ref": "#/$defs/string" }, "source_catalog": { @@ -4329,6 +4371,10 @@ "SCD_TYPE_1", "SCD_TYPE_2" ] + }, + "sequence_by": { + "description": "The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order.", + "$ref": "#/$defs/slice/string" } }, "additionalProperties": false diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 4cee2f82a..9331ddc2e 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -28,9 +28,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "apps", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go b/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go new file mode 100755 index 000000000..d09755370 --- /dev/null +++ b/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go @@ -0,0 +1,220 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package disable_legacy_dbfs + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "disable-legacy-dbfs", + Short: `When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new mounts).`, + Long: `When this setting is on, access to DBFS root and DBFS mounts is disallowed (as + well as creation of new mounts). When the setting is off, all DBFS + functionality is enabled`, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteDisableLegacyDbfsRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteDisableLegacyDbfsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the disable legacy DBFS setting.` + cmd.Long = `Delete the disable legacy DBFS setting. + + Deletes the disable legacy DBFS setting for a workspace, reverting back to the + default.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.DisableLegacyDbfs().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetDisableLegacyDbfsRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetDisableLegacyDbfsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the disable legacy DBFS setting.` + cmd.Long = `Get the disable legacy DBFS setting. + + Gets the disable legacy DBFS setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.DisableLegacyDbfs().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateDisableLegacyDbfsRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateDisableLegacyDbfsRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the disable legacy DBFS setting.` + cmd.Long = `Update the disable legacy DBFS setting. + + Updates the disable legacy DBFS setting for the workspace.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.DisableLegacyDbfs().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service DisableLegacyDbfs diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index d4ceb0c28..9e8db43d0 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -1557,6 +1557,7 @@ func newSubmit() *cobra.Command { cmd.Flags().Var(&submitJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: access_control_list + cmd.Flags().StringVar(&submitReq.BudgetPolicyId, "budget-policy-id", submitReq.BudgetPolicyId, `The user specified id of the budget policy to use for this one-time run.`) // TODO: complex arg: email_notifications // TODO: array: environments // TODO: complex arg: git_source diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index aaeecf41b..31e6ceee4 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -9,6 +9,7 @@ import ( compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile" default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace" disable_legacy_access "github.com/databricks/cli/cmd/workspace/disable-legacy-access" + disable_legacy_dbfs "github.com/databricks/cli/cmd/workspace/disable-legacy-dbfs" enhanced_security_monitoring "github.com/databricks/cli/cmd/workspace/enhanced-security-monitoring" restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins" ) @@ -33,6 +34,7 @@ func New() *cobra.Command { cmd.AddCommand(compliance_security_profile.New()) cmd.AddCommand(default_namespace.New()) cmd.AddCommand(disable_legacy_access.New()) + cmd.AddCommand(disable_legacy_dbfs.New()) cmd.AddCommand(enhanced_security_monitoring.New()) cmd.AddCommand(restrict_workspace_admins.New()) diff --git a/go.mod b/go.mod index 697205f33..9059b9637 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.22.7 require ( github.com/Masterminds/semver/v3 v3.3.0 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.48.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.49.0 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 03698b20a..f365fcbf6 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.48.0 h1:46KtsnRo+FGhC3izUXbpL0PXBNomvsdignYDhJZlm9s= -github.com/databricks/databricks-sdk-go v0.48.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.49.0 h1:VBTeZZMLIuBSM4kxOCfUcW9z4FUQZY2QeNRD5qm9FUQ= +github.com/databricks/databricks-sdk-go v0.49.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 68d69d6e0bb420cdfbdceceb686717912187980e Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 22 Oct 2024 12:43:43 +0200 Subject: [PATCH 20/25] Upgrade TF provider to 1.54.0 (#1852) ## Changes Upgrade TF provider to 1.54.0 --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../data_source_notification_destinations.go | 15 +++++++++ .../tf/schema/data_source_registered_model.go | 32 +++++++++++++++++++ bundle/internal/tf/schema/data_sources.go | 4 +++ bundle/internal/tf/schema/resource_job.go | 1 + .../tf/schema/resource_online_table.go | 11 ++++--- .../internal/tf/schema/resource_pipeline.go | 19 +++++++++++ bundle/internal/tf/schema/root.go | 2 +- 8 files changed, 79 insertions(+), 7 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_notification_destinations.go create mode 100644 bundle/internal/tf/schema/data_source_registered_model.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 49e48a6e3..0c4244089 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.53.0" +const ProviderVersion = "1.54.0" diff --git a/bundle/internal/tf/schema/data_source_notification_destinations.go b/bundle/internal/tf/schema/data_source_notification_destinations.go new file mode 100644 index 000000000..c95ad6db9 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_notification_destinations.go @@ -0,0 +1,15 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceNotificationDestinationsNotificationDestinations struct { + DestinationType string `json:"destination_type,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Id string `json:"id,omitempty"` +} + +type DataSourceNotificationDestinations struct { + DisplayNameContains string `json:"display_name_contains,omitempty"` + Type string `json:"type,omitempty"` + NotificationDestinations []DataSourceNotificationDestinationsNotificationDestinations `json:"notification_destinations,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_registered_model.go b/bundle/internal/tf/schema/data_source_registered_model.go new file mode 100644 index 000000000..e19e0849a --- /dev/null +++ b/bundle/internal/tf/schema/data_source_registered_model.go @@ -0,0 +1,32 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceRegisteredModelModelInfoAliases struct { + AliasName string `json:"alias_name,omitempty"` + VersionNum int `json:"version_num,omitempty"` +} + +type DataSourceRegisteredModelModelInfo struct { + BrowseOnly bool `json:"browse_only,omitempty"` + CatalogName string `json:"catalog_name,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + FullName string `json:"full_name,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + StorageLocation string `json:"storage_location,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"` +} + +type DataSourceRegisteredModel struct { + FullName string `json:"full_name"` + IncludeAliases bool `json:"include_aliases,omitempty"` + IncludeBrowse bool `json:"include_browse,omitempty"` + ModelInfo []DataSourceRegisteredModelModelInfo `json:"model_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 10829b994..050e0bc1d 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -36,7 +36,9 @@ type DataSources struct { NodeType map[string]any `json:"databricks_node_type,omitempty"` Notebook map[string]any `json:"databricks_notebook,omitempty"` NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"` + NotificationDestinations map[string]any `json:"databricks_notification_destinations,omitempty"` Pipelines map[string]any `json:"databricks_pipelines,omitempty"` + RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` Schema map[string]any `json:"databricks_schema,omitempty"` Schemas map[string]any `json:"databricks_schemas,omitempty"` ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` @@ -92,7 +94,9 @@ func NewDataSources() *DataSources { NodeType: make(map[string]any), Notebook: make(map[string]any), NotebookPaths: make(map[string]any), + NotificationDestinations: make(map[string]any), Pipelines: make(map[string]any), + RegisteredModel: make(map[string]any), Schema: make(map[string]any), Schemas: make(map[string]any), ServicePrincipal: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 42b648b0f..c89eafab9 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -1448,6 +1448,7 @@ type ResourceJobWebhookNotifications struct { type ResourceJob struct { AlwaysRunning bool `json:"always_running,omitempty"` + BudgetPolicyId string `json:"budget_policy_id,omitempty"` ControlRunState bool `json:"control_run_state,omitempty"` Description string `json:"description,omitempty"` EditMode string `json:"edit_mode,omitempty"` diff --git a/bundle/internal/tf/schema/resource_online_table.go b/bundle/internal/tf/schema/resource_online_table.go index de671eade..58d6f4ba5 100644 --- a/bundle/internal/tf/schema/resource_online_table.go +++ b/bundle/internal/tf/schema/resource_online_table.go @@ -19,9 +19,10 @@ type ResourceOnlineTableSpec struct { } type ResourceOnlineTable struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - Status []any `json:"status,omitempty"` - TableServingUrl string `json:"table_serving_url,omitempty"` - Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + Status []any `json:"status,omitempty"` + TableServingUrl string `json:"table_serving_url,omitempty"` + UnityCatalogProvisioningState string `json:"unity_catalog_provisioning_state,omitempty"` + Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 1bed91fcb..2cb459aba 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -142,10 +142,26 @@ type ResourcePipelineGatewayDefinition struct { GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` } +type ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration struct { + PrimaryKeys []string `json:"primary_keys,omitempty"` + SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` + ScdType string `json:"scd_type,omitempty"` + SequenceBy []string `json:"sequence_by,omitempty"` +} + +type ResourcePipelineIngestionDefinitionObjectsReport struct { + DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationSchema string `json:"destination_schema,omitempty"` + DestinationTable string `json:"destination_table,omitempty"` + SourceUrl string `json:"source_url,omitempty"` + TableConfiguration *ResourcePipelineIngestionDefinitionObjectsReportTableConfiguration `json:"table_configuration,omitempty"` +} + type ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration struct { PrimaryKeys []string `json:"primary_keys,omitempty"` SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` ScdType string `json:"scd_type,omitempty"` + SequenceBy []string `json:"sequence_by,omitempty"` } type ResourcePipelineIngestionDefinitionObjectsSchema struct { @@ -160,6 +176,7 @@ type ResourcePipelineIngestionDefinitionObjectsTableTableConfiguration struct { PrimaryKeys []string `json:"primary_keys,omitempty"` SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` ScdType string `json:"scd_type,omitempty"` + SequenceBy []string `json:"sequence_by,omitempty"` } type ResourcePipelineIngestionDefinitionObjectsTable struct { @@ -173,6 +190,7 @@ type ResourcePipelineIngestionDefinitionObjectsTable struct { } type ResourcePipelineIngestionDefinitionObjects struct { + Report *ResourcePipelineIngestionDefinitionObjectsReport `json:"report,omitempty"` Schema *ResourcePipelineIngestionDefinitionObjectsSchema `json:"schema,omitempty"` Table *ResourcePipelineIngestionDefinitionObjectsTable `json:"table,omitempty"` } @@ -181,6 +199,7 @@ type ResourcePipelineIngestionDefinitionTableConfiguration struct { PrimaryKeys []string `json:"primary_keys,omitempty"` SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` ScdType string `json:"scd_type,omitempty"` + SequenceBy []string `json:"sequence_by,omitempty"` } type ResourcePipelineIngestionDefinition struct { diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 7a0cc01f9..bf4283c9b 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.53.0" +const ProviderVersion = "1.54.0" func NewRoot() *Root { return &Root{ From 3bab21e72ee78f6711519762317e8218f9884afd Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 22 Oct 2024 20:29:17 +0530 Subject: [PATCH 21/25] Fix race condition when restarting continuous jobs (#1849) ## Changes We don't need to cancel existing runs when the job is continuous and unpaused. The `/jobs/run-now` command will cancel the existing run and trigger a new one automatically. Cancelling the job manually can cause a race condition where both the manual trigger from the CLI and the continuous trigger from the job configuration happens at the same time. This PR prevents that from happening. ## Tests Unit tests and manually --- bundle/run/job.go | 23 +++++++ bundle/run/job_test.go | 132 ++++++++++++++++++++++++++++++++++++ bundle/run/pipeline.go | 12 ++++ bundle/run/pipeline_test.go | 70 +++++++++++++++++++ bundle/run/runner.go | 4 ++ cmd/bundle/run.go | 14 ++-- 6 files changed, 247 insertions(+), 8 deletions(-) diff --git a/bundle/run/job.go b/bundle/run/job.go index 8003c7d29..340af961c 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -317,6 +317,29 @@ func (r *jobRunner) Cancel(ctx context.Context) error { return errGroup.Wait() } +func (r *jobRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) { + // We don't need to cancel existing runs if the job is continuous and unpaused. + // the /jobs/run-now API will automatically cancel any existing runs before starting a new one. + // + // /jobs/run-now will not cancel existing runs if the job is continuous and paused. + // New job runs will be queued instead and will wait for existing runs to finish. + // In this case, we need to cancel the existing runs before starting a new one. + continuous := r.job.JobSettings.Continuous + if continuous != nil && continuous.PauseStatus == jobs.PauseStatusUnpaused { + return r.Run(ctx, opts) + } + + s := cmdio.Spinner(ctx) + s <- "Cancelling all active job runs" + err := r.Cancel(ctx) + close(s) + if err != nil { + return nil, err + } + + return r.Run(ctx, opts) +} + func (r *jobRunner) ParseArgs(args []string, opts *Options) error { return r.posArgsHandler().ParseArgs(args, opts) } diff --git a/bundle/run/job_test.go b/bundle/run/job_test.go index be189306b..369c546aa 100644 --- a/bundle/run/job_test.go +++ b/bundle/run/job_test.go @@ -1,6 +1,7 @@ package run import ( + "bytes" "context" "testing" "time" @@ -8,6 +9,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/mock" @@ -126,3 +129,132 @@ func TestJobRunnerCancelWithNoActiveRuns(t *testing.T) { err := runner.Cancel(context.Background()) require.NoError(t, err) } + +func TestJobRunnerRestart(t *testing.T) { + for _, jobSettings := range []*jobs.JobSettings{ + {}, + { + Continuous: &jobs.Continuous{ + PauseStatus: jobs.PauseStatusPaused, + }, + }, + } { + job := &resources.Job{ + ID: "123", + JobSettings: jobSettings, + } + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test_job": job, + }, + }, + }, + } + + runner := jobRunner{key: "test", bundle: b, job: job} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + ctx := context.Background() + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "")) + ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) + + jobApi := m.GetMockJobsAPI() + jobApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + ActiveOnly: true, + JobId: 123, + }).Return([]jobs.BaseRun{ + {RunId: 1}, + {RunId: 2}, + }, nil) + + // Mock the runner cancelling existing job runs. + mockWait := &jobs.WaitGetRunJobTerminatedOrSkipped[struct{}]{ + Poll: func(time time.Duration, f func(j *jobs.Run)) (*jobs.Run, error) { + return nil, nil + }, + } + jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{ + RunId: 1, + }).Return(mockWait, nil) + jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{ + RunId: 2, + }).Return(mockWait, nil) + + // Mock the runner triggering a job run + mockWaitForRun := &jobs.WaitGetRunJobTerminatedOrSkipped[jobs.RunNowResponse]{ + Poll: func(d time.Duration, f func(*jobs.Run)) (*jobs.Run, error) { + return &jobs.Run{ + State: &jobs.RunState{ + ResultState: jobs.RunResultStateSuccess, + }, + }, nil + }, + } + jobApi.EXPECT().RunNow(mock.Anything, jobs.RunNow{ + JobId: 123, + }).Return(mockWaitForRun, nil) + + // Mock the runner getting the job output + jobApi.EXPECT().GetRun(mock.Anything, jobs.GetRunRequest{}).Return(&jobs.Run{}, nil) + + _, err := runner.Restart(ctx, &Options{}) + require.NoError(t, err) + } +} + +func TestJobRunnerRestartForContinuousUnpausedJobs(t *testing.T) { + job := &resources.Job{ + ID: "123", + JobSettings: &jobs.JobSettings{ + Continuous: &jobs.Continuous{ + PauseStatus: jobs.PauseStatusUnpaused, + }, + }, + } + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test_job": job, + }, + }, + }, + } + + runner := jobRunner{key: "test", bundle: b, job: job} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + ctx := context.Background() + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) + + jobApi := m.GetMockJobsAPI() + + // The runner should not try and cancel existing job runs for unpaused continuous jobs. + jobApi.AssertNotCalled(t, "ListRunsAll") + jobApi.AssertNotCalled(t, "CancelRun") + + // Mock the runner triggering a job run + mockWaitForRun := &jobs.WaitGetRunJobTerminatedOrSkipped[jobs.RunNowResponse]{ + Poll: func(d time.Duration, f func(*jobs.Run)) (*jobs.Run, error) { + return &jobs.Run{ + State: &jobs.RunState{ + ResultState: jobs.RunResultStateSuccess, + }, + }, nil + }, + } + jobApi.EXPECT().RunNow(mock.Anything, jobs.RunNow{ + JobId: 123, + }).Return(mockWaitForRun, nil) + + // Mock the runner getting the job output + jobApi.EXPECT().GetRun(mock.Anything, jobs.GetRunRequest{}).Return(&jobs.Run{}, nil) + + _, err := runner.Restart(ctx, &Options{}) + require.NoError(t, err) +} diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index d684f8388..ffe012843 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -183,6 +183,18 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error { return err } +func (r *pipelineRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) { + s := cmdio.Spinner(ctx) + s <- "Cancelling the active pipeline update" + err := r.Cancel(ctx) + close(s) + if err != nil { + return nil, err + } + + return r.Run(ctx, opts) +} + func (r *pipelineRunner) ParseArgs(args []string, opts *Options) error { if len(args) == 0 { return nil diff --git a/bundle/run/pipeline_test.go b/bundle/run/pipeline_test.go index 29b57ffdb..e4608061c 100644 --- a/bundle/run/pipeline_test.go +++ b/bundle/run/pipeline_test.go @@ -1,6 +1,7 @@ package run import ( + "bytes" "context" "testing" "time" @@ -8,8 +9,12 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + sdk_config "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -47,3 +52,68 @@ func TestPipelineRunnerCancel(t *testing.T) { err := runner.Cancel(context.Background()) require.NoError(t, err) } + +func TestPipelineRunnerRestart(t *testing.T) { + pipeline := &resources.Pipeline{ + ID: "123", + } + + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "test_pipeline": pipeline, + }, + }, + }, + } + + runner := pipelineRunner{key: "test", bundle: b, pipeline: pipeline} + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &sdk_config.Config{ + Host: "https://test.com", + } + b.SetWorkpaceClient(m.WorkspaceClient) + ctx := context.Background() + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) + + mockWait := &pipelines.WaitGetPipelineIdle[struct{}]{ + Poll: func(time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error) { + return nil, nil + }, + } + + pipelineApi := m.GetMockPipelinesAPI() + pipelineApi.EXPECT().Stop(mock.Anything, pipelines.StopRequest{ + PipelineId: "123", + }).Return(mockWait, nil) + + pipelineApi.EXPECT().GetByPipelineId(mock.Anything, "123").Return(&pipelines.GetPipelineResponse{}, nil) + + // Mock runner starting a new update + pipelineApi.EXPECT().StartUpdate(mock.Anything, pipelines.StartUpdate{ + PipelineId: "123", + }).Return(&pipelines.StartUpdateResponse{ + UpdateId: "456", + }, nil) + + // Mock runner polling for events + pipelineApi.EXPECT().ListPipelineEventsAll(mock.Anything, pipelines.ListPipelineEventsRequest{ + Filter: `update_id = '456'`, + MaxResults: 100, + PipelineId: "123", + }).Return([]pipelines.PipelineEvent{}, nil) + + // Mock runner polling for update status + pipelineApi.EXPECT().GetUpdateByPipelineIdAndUpdateId(mock.Anything, "123", "456"). + Return(&pipelines.GetUpdateResponse{ + Update: &pipelines.UpdateInfo{ + State: pipelines.UpdateInfoStateCompleted, + }, + }, nil) + + _, err := runner.Restart(ctx, &Options{}) + require.NoError(t, err) +} diff --git a/bundle/run/runner.go b/bundle/run/runner.go index 0f202ce7d..1cdcc9d8b 100644 --- a/bundle/run/runner.go +++ b/bundle/run/runner.go @@ -27,6 +27,10 @@ type Runner interface { // Run the underlying worklow. Run(ctx context.Context, opts *Options) (output.RunOutput, error) + // Restart the underlying workflow by cancelling any existing runs before + // starting a new one. + Restart(ctx context.Context, opts *Options) (output.RunOutput, error) + // Cancel the underlying workflow. Cancel(ctx context.Context) error diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 9ef5eb8ff..ed5bd2ef1 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/bundle/run" + "github.com/databricks/cli/bundle/run/output" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" @@ -100,19 +101,16 @@ task or a Python wheel task, the second example applies. } runOptions.NoWait = noWait + var output output.RunOutput if restart { - s := cmdio.Spinner(ctx) - s <- "Cancelling all runs" - err := runner.Cancel(ctx) - close(s) - if err != nil { - return err - } + output, err = runner.Restart(ctx, &runOptions) + } else { + output, err = runner.Run(ctx, &runOptions) } - output, err := runner.Run(ctx, &runOptions) if err != nil { return err } + if output != nil { switch root.OutputType(cmd) { case flags.OutputText: From 60c153c0e765e6a7fd53e20f2ce431b7f3a70812 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Tue, 22 Oct 2024 17:52:46 +0200 Subject: [PATCH 22/25] Fix pipeline in default-python template not working for certain workspaces (#1854) Change the default-python template to not set the `catalog` field for the pipeline for workspaces that set `hive_metastore` as the default catalog. The Pipelines service currently returns an error when that value is used for the `catalog` field. This is the most simple fix for this issue, which was reported by a customer. As a followup, we should look at whether we want to prompt for a catalog instead, possibly just for this specific scenario. --- .../resources/{{.project_name}}.pipeline.yml.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl index 50e5ad97c..1c6b8607e 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl @@ -3,7 +3,7 @@ resources: pipelines: {{.project_name}}_pipeline: name: {{.project_name}}_pipeline - {{- if eq default_catalog ""}} + {{- if or (eq default_catalog "") (eq default_catalog "hive_metastore")}} ## Specify the 'catalog' field to configure this pipeline to make use of Unity Catalog: # catalog: catalog_name {{- else}} From 55a055d0f532c8ab60bdbccc51c106fb051520d8 Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Wed, 23 Oct 2024 13:08:12 +0200 Subject: [PATCH 23/25] Add "output" flag to the bundle sync command (#1853) ## Changes We want to use 'bundle sync' in the vscode extension before running a file as an ad-hoc job (or through the context api). Right now we use bundle deploy in these cases, but deploying bundle resources is not always expected when you just want to quickly run a file. Sync makes more sense in these cases, but we still want to have verbose output to see what's happening. In the 'deploy' command we have hidden 'verbose' flag. For the sync I've just added 'output' flag, handling both json and text cases, similar to how it's done in the non-bundle `sync` command. The flag is not hidden (although we still don't show any output by default, if the flag is not set). VSCode Extension PR: https://github.com/databricks/databricks-vscode/pull/1401 ## Tests Manually --- cmd/bundle/sync.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index df3e087c2..274bba0e0 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -1,7 +1,9 @@ package bundle import ( + "context" "fmt" + "io" "time" "github.com/databricks/cli/bundle" @@ -9,6 +11,7 @@ import ( "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/sync" "github.com/spf13/cobra" @@ -18,6 +21,7 @@ type syncFlags struct { interval time.Duration full bool watch bool + output flags.Output } func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) { @@ -26,6 +30,21 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) return nil, fmt.Errorf("cannot get sync options: %w", err) } + if f.output != "" { + var outputFunc func(context.Context, <-chan sync.Event, io.Writer) + switch f.output { + case flags.OutputText: + outputFunc = sync.TextOutput + case flags.OutputJSON: + outputFunc = sync.JsonOutput + } + if outputFunc != nil { + opts.OutputHandler = func(ctx context.Context, c <-chan sync.Event) { + outputFunc(ctx, c, cmd.OutOrStdout()) + } + } + } + opts.Full = f.full opts.PollInterval = f.interval return opts, nil @@ -42,6 +61,7 @@ func newSyncCommand() *cobra.Command { cmd.Flags().DurationVar(&f.interval, "interval", 1*time.Second, "file system polling interval (for --watch)") cmd.Flags().BoolVar(&f.full, "full", false, "perform full synchronization (default is incremental)") cmd.Flags().BoolVar(&f.watch, "watch", false, "watch local file system for changes") + cmd.Flags().Var(&f.output, "output", "type of the output format") cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -65,6 +85,7 @@ func newSyncCommand() *cobra.Command { if err != nil { return err } + defer s.Close() log.Infof(ctx, "Remote file sync location: %v", opts.RemotePath) From ab622e65bbf8d90dcc8f363a978f871bb04554af Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 23 Oct 2024 16:08:27 +0200 Subject: [PATCH 24/25] [Release] Release v0.231.0 (#1856) CLI: * Added JSON input validation for CLI commands ([#1771](https://github.com/databricks/cli/pull/1771)). * Support Git worktrees for `sync` ([#1831](https://github.com/databricks/cli/pull/1831)). Bundles: * Add `bundle summary` to display URLs for deployed resources ([#1731](https://github.com/databricks/cli/pull/1731)). * Added a warning when incorrect permissions used for `/Workspace/Shared` bundle root ([#1821](https://github.com/databricks/cli/pull/1821)). * Show actionable errors for collaborative deployment scenarios ([#1386](https://github.com/databricks/cli/pull/1386)). * Fix path to repository-wide exclude file ([#1837](https://github.com/databricks/cli/pull/1837)). * Fixed typo in converting cluster permissions ([#1826](https://github.com/databricks/cli/pull/1826)). * Ignore metastore permission error during template generation ([#1819](https://github.com/databricks/cli/pull/1819)). * Handle normalization of `dyn.KindTime` into an any type ([#1836](https://github.com/databricks/cli/pull/1836)). * Added support for pip options in environment dependencies ([#1842](https://github.com/databricks/cli/pull/1842)). * Fix race condition when restarting continuous jobs ([#1849](https://github.com/databricks/cli/pull/1849)). * Fix pipeline in default-python template not working for certain workspaces ([#1854](https://github.com/databricks/cli/pull/1854)). * Add "output" flag to the bundle sync command ([#1853](https://github.com/databricks/cli/pull/1853)). Internal: * Move utility functions dealing with IAM to libs/iamutil ([#1820](https://github.com/databricks/cli/pull/1820)). * Remove unused `IS_OWNER` constant ([#1823](https://github.com/databricks/cli/pull/1823)). * Assert SDK version is consistent in the CLI generation process ([#1814](https://github.com/databricks/cli/pull/1814)). * Fixed unmarshalling json input into `interface{}` type ([#1832](https://github.com/databricks/cli/pull/1832)). * Fix `TestAccFsMkdirWhenFileExistsAtPath` in isolated Azure environments ([#1833](https://github.com/databricks/cli/pull/1833)). * Add behavioral tests for examples from the YAML spec ([#1835](https://github.com/databricks/cli/pull/1835)). * Remove Terraform conversion function that's no longer used ([#1840](https://github.com/databricks/cli/pull/1840)). * Encode assumptions about the dashboards API in a test ([#1839](https://github.com/databricks/cli/pull/1839)). * Add script to make testing of code on branches easier ([#1844](https://github.com/databricks/cli/pull/1844)). API Changes: * Added `databricks disable-legacy-dbfs` command group. OpenAPI commit cf9c61453990df0f9453670f2fe68e1b128647a2 (2024-10-14) Dependency updates: * Upgrade TF provider to 1.54.0 ([#1852](https://github.com/databricks/cli/pull/1852)). * Bump github.com/databricks/databricks-sdk-go from 0.48.0 to 0.49.0 ([#1843](https://github.com/databricks/cli/pull/1843)). --- CHANGELOG.md | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f31bb10b2..863474934 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,43 @@ # Version changelog +## [Release] Release v0.231.0 + +CLI: + * Added JSON input validation for CLI commands ([#1771](https://github.com/databricks/cli/pull/1771)). + * Support Git worktrees for `sync` ([#1831](https://github.com/databricks/cli/pull/1831)). + +Bundles: + * Add `bundle summary` to display URLs for deployed resources ([#1731](https://github.com/databricks/cli/pull/1731)). + * Added a warning when incorrect permissions used for `/Workspace/Shared` bundle root ([#1821](https://github.com/databricks/cli/pull/1821)). + * Show actionable errors for collaborative deployment scenarios ([#1386](https://github.com/databricks/cli/pull/1386)). + * Fix path to repository-wide exclude file ([#1837](https://github.com/databricks/cli/pull/1837)). + * Fixed typo in converting cluster permissions ([#1826](https://github.com/databricks/cli/pull/1826)). + * Ignore metastore permission error during template generation ([#1819](https://github.com/databricks/cli/pull/1819)). + * Handle normalization of `dyn.KindTime` into an any type ([#1836](https://github.com/databricks/cli/pull/1836)). + * Added support for pip options in environment dependencies ([#1842](https://github.com/databricks/cli/pull/1842)). + * Fix race condition when restarting continuous jobs ([#1849](https://github.com/databricks/cli/pull/1849)). + * Fix pipeline in default-python template not working for certain workspaces ([#1854](https://github.com/databricks/cli/pull/1854)). + * Add "output" flag to the bundle sync command ([#1853](https://github.com/databricks/cli/pull/1853)). + +Internal: + * Move utility functions dealing with IAM to libs/iamutil ([#1820](https://github.com/databricks/cli/pull/1820)). + * Remove unused `IS_OWNER` constant ([#1823](https://github.com/databricks/cli/pull/1823)). + * Assert SDK version is consistent in the CLI generation process ([#1814](https://github.com/databricks/cli/pull/1814)). + * Fixed unmarshalling json input into `interface{}` type ([#1832](https://github.com/databricks/cli/pull/1832)). + * Fix `TestAccFsMkdirWhenFileExistsAtPath` in isolated Azure environments ([#1833](https://github.com/databricks/cli/pull/1833)). + * Add behavioral tests for examples from the YAML spec ([#1835](https://github.com/databricks/cli/pull/1835)). + * Remove Terraform conversion function that's no longer used ([#1840](https://github.com/databricks/cli/pull/1840)). + * Encode assumptions about the dashboards API in a test ([#1839](https://github.com/databricks/cli/pull/1839)). + * Add script to make testing of code on branches easier ([#1844](https://github.com/databricks/cli/pull/1844)). + +API Changes: + * Added `databricks disable-legacy-dbfs` command group. + +OpenAPI commit cf9c61453990df0f9453670f2fe68e1b128647a2 (2024-10-14) +Dependency updates: + * Upgrade TF provider to 1.54.0 ([#1852](https://github.com/databricks/cli/pull/1852)). + * Bump github.com/databricks/databricks-sdk-go from 0.48.0 to 0.49.0 ([#1843](https://github.com/databricks/cli/pull/1843)). + ## [Release] Release v0.230.0 Notable changes for Databricks Asset Bundles: From eddaddaf8b6773b6afcd2ab86ad483bdaca810fe Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 24 Oct 2024 14:03:12 +0200 Subject: [PATCH 25/25] Attempt to reduce test flakiness on Windows (#1845) ## Changes Test failures indicate that both stdout and stderr are consumed, yet the content of stdout doesn't end up in the intended output. This can happen if the goroutines responsible for writing to the combined output buffer attempt to write to the same underlying buffer concurrently. Example failure: ``` === RUN TestBackgroundCombinedOutput background_test.go:65: Error Trace: D:/a/cli/cli/libs/process/background_test.go:65 Error: elements differ extra elements in list A: ([]interface {}) (len=1) { (string) (len=1) "2" } listA: ([]string) (len=2) { (string) (len=1) "1", (string) (len=1) "2" } listB: ([]string) (len=1) { (string) (len=1) "1" } Test: TestBackgroundCombinedOutput ``` With the test body: https://github.com/databricks/cli/blob/ca45e53f42c5c4b26f2833554ab7118802c017cb/libs/process/background_test.go#L48-L66 With the implementation of `WithCombinedOutput`: https://github.com/databricks/cli/blob/ca45e53f42c5c4b26f2833554ab7118802c017cb/libs/process/opts.go#L72-L78 Notice that `c.Stdout` does get the "2", or the test failure would have included the relevant assertion error. This leads me to believe that there is a race on writing to `buf` from the two goroutines writing to `c.Stdout` and `c.Stderr`. ## Tests The test passes. If this PR has the intended effect remains to be seen... --- libs/process/opts.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/libs/process/opts.go b/libs/process/opts.go index 9516e49ba..dd0667516 100644 --- a/libs/process/opts.go +++ b/libs/process/opts.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os/exec" + "sync" ) type execOption func(context.Context, *exec.Cmd) error @@ -69,10 +70,24 @@ func WithStdoutWriter(dst io.Writer) execOption { } } +// safeWriter is a writer that is safe to use concurrently. +// It serializes writes to the underlying writer. +type safeWriter struct { + w io.Writer + m sync.Mutex +} + +func (s *safeWriter) Write(p []byte) (n int, err error) { + s.m.Lock() + defer s.m.Unlock() + return s.w.Write(p) +} + func WithCombinedOutput(buf *bytes.Buffer) execOption { + sw := &safeWriter{w: buf} return func(_ context.Context, c *exec.Cmd) error { - c.Stdout = io.MultiWriter(buf, c.Stdout) - c.Stderr = io.MultiWriter(buf, c.Stderr) + c.Stdout = io.MultiWriter(sw, c.Stdout) + c.Stderr = io.MultiWriter(sw, c.Stderr) return nil } }