mirror of https://github.com/databricks/cli.git
Merge branch 'main' into feature/all-purpose-clusters
This commit is contained in:
commit
b312691c05
|
@ -1 +1 @@
|
||||||
f98c07f9c71f579de65d2587bb0292f83d10e55d
|
3eae49b444cac5a0118a3503e5b7ecef7f96527a
|
|
@ -116,6 +116,10 @@ func allResolvers() *resolvers {
|
||||||
{{range .Services -}}
|
{{range .Services -}}
|
||||||
{{- if in $allowlist .KebabName -}}
|
{{- if in $allowlist .KebabName -}}
|
||||||
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["{{.Singular.PascalName}}"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
@ -154,6 +154,7 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
"provider-exchanges delete-listing-from-exchange"
|
"provider-exchanges delete-listing-from-exchange"
|
||||||
"provider-exchanges list-exchanges-for-listing"
|
"provider-exchanges list-exchanges-for-listing"
|
||||||
"provider-exchanges list-listings-for-exchange"
|
"provider-exchanges list-listings-for-exchange"
|
||||||
|
"storage-credentials get"
|
||||||
-}}
|
-}}
|
||||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||||
|
|
|
@ -75,6 +75,8 @@ cmd/workspace/online-tables/online-tables.go linguist-generated=true
|
||||||
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
|
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
|
||||||
cmd/workspace/permissions/permissions.go linguist-generated=true
|
cmd/workspace/permissions/permissions.go linguist-generated=true
|
||||||
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
||||||
|
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go linguist-generated=true
|
||||||
|
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go linguist-generated=true
|
||||||
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
||||||
cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true
|
cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true
|
||||||
cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true
|
cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true
|
||||||
|
@ -94,6 +96,7 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr
|
||||||
cmd/workspace/recipients/recipients.go linguist-generated=true
|
cmd/workspace/recipients/recipients.go linguist-generated=true
|
||||||
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
||||||
cmd/workspace/repos/repos.go linguist-generated=true
|
cmd/workspace/repos/repos.go linguist-generated=true
|
||||||
|
cmd/workspace/resource-quotas/resource-quotas.go linguist-generated=true
|
||||||
cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true
|
cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true
|
||||||
cmd/workspace/schemas/schemas.go linguist-generated=true
|
cmd/workspace/schemas/schemas.go linguist-generated=true
|
||||||
cmd/workspace/secrets/secrets.go linguist-generated=true
|
cmd/workspace/secrets/secrets.go linguist-generated=true
|
||||||
|
|
78
CHANGELOG.md
78
CHANGELOG.md
|
@ -1,5 +1,83 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## [Release] Release v0.228.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Do not error if we cannot prompt for a profile in `auth login` ([#1745](https://github.com/databricks/cli/pull/1745)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
|
||||||
|
As of this release, the CLI will show a prompt if there are configuration changes that lead to DLT pipeline recreation.
|
||||||
|
Users can skip the prompt by specifying the `--auto-approve` flag.
|
||||||
|
|
||||||
|
* Pass along to Terraform process ([#1734](https://github.com/databricks/cli/pull/1734)).
|
||||||
|
* Add prompt when a pipeline recreation happens ([#1672](https://github.com/databricks/cli/pull/1672)).
|
||||||
|
* Use materialized views in the default-sql template ([#1709](https://github.com/databricks/cli/pull/1709)).
|
||||||
|
* Update templates to latest LTS DBR ([#1715](https://github.com/databricks/cli/pull/1715)).
|
||||||
|
* Make lock optional in the JSON schema ([#1738](https://github.com/databricks/cli/pull/1738)).
|
||||||
|
* Do not suppress normalisation diagnostics for resolving variables ([#1740](https://github.com/databricks/cli/pull/1740)).
|
||||||
|
* Include a permissions section in all templates ([#1713](https://github.com/databricks/cli/pull/1713)).
|
||||||
|
* Fixed complex variables are not being correctly merged from include files ([#1746](https://github.com/databricks/cli/pull/1746)).
|
||||||
|
* Fixed variable override in target with full variable syntax ([#1749](https://github.com/databricks/cli/pull/1749)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Consider serverless clusters as compatible for Python wheel tasks ([#1733](https://github.com/databricks/cli/pull/1733)).
|
||||||
|
* PythonMutator: explain missing package error ([#1736](https://github.com/databricks/cli/pull/1736)).
|
||||||
|
* Add `dyn.Time` to box a timestamp with its original string value ([#1732](https://github.com/databricks/cli/pull/1732)).
|
||||||
|
* Fix streaming of stdout, stdin, stderr in cobra test runner ([#1742](https://github.com/databricks/cli/pull/1742)).
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/Masterminds/semver/v3 from 3.2.1 to 3.3.0 ([#1741](https://github.com/databricks/cli/pull/1741)).
|
||||||
|
|
||||||
|
## [Release] Release v0.227.1
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Disable prompt for storage-credentials get command ([#1723](https://github.com/databricks/cli/pull/1723)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Do not treat empty path as a local path ([#1717](https://github.com/databricks/cli/pull/1717)).
|
||||||
|
* Correctly mark PyPI package name specs with multiple specifiers as remote libraries ([#1725](https://github.com/databricks/cli/pull/1725)).
|
||||||
|
* Improve error handling for /Volumes paths in mode: development ([#1716](https://github.com/databricks/cli/pull/1716)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Ignore CLI version check on development builds of the CLI ([#1714](https://github.com/databricks/cli/pull/1714)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Added `databricks resource-quotas` command group.
|
||||||
|
* Added `databricks policy-compliance-for-clusters` command group.
|
||||||
|
* Added `databricks policy-compliance-for-jobs` command group.
|
||||||
|
|
||||||
|
OpenAPI commit 3eae49b444cac5a0118a3503e5b7ecef7f96527a (2024-08-21)
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.44.0 to 0.45.0 ([#1719](https://github.com/databricks/cli/pull/1719)).
|
||||||
|
* Revert hc-install version to 0.7.0 ([#1711](https://github.com/databricks/cli/pull/1711)).
|
||||||
|
|
||||||
|
## [Release] Release v0.227.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Added filtering flags for cluster list commands ([#1703](https://github.com/databricks/cli/pull/1703)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Allow users to configure paths (including outside of the bundle root) to synchronize to the workspace. ([#1694](https://github.com/databricks/cli/pull/1694)).
|
||||||
|
* Add configurable presets for name prefixes, tags, etc. ([#1490](https://github.com/databricks/cli/pull/1490)).
|
||||||
|
* Add support for requirements libraries in Job Tasks ([#1543](https://github.com/databricks/cli/pull/1543)).
|
||||||
|
* Remove reference to "dbt" in the default-sql template ([#1696](https://github.com/databricks/cli/pull/1696)).
|
||||||
|
* Pause continuous pipelines when 'mode: development' is used ([#1590](https://github.com/databricks/cli/pull/1590)).
|
||||||
|
* Report all empty resources present in error diagnostic ([#1685](https://github.com/databricks/cli/pull/1685)).
|
||||||
|
* Improves detection of PyPI package names in environment dependencies ([#1699](https://github.com/databricks/cli/pull/1699)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Add `import` option for PyDABs ([#1693](https://github.com/databricks/cli/pull/1693)).
|
||||||
|
* Make fileset take optional list of paths to list ([#1684](https://github.com/databricks/cli/pull/1684)).
|
||||||
|
* Pass through paths argument to libs/sync ([#1689](https://github.com/databricks/cli/pull/1689)).
|
||||||
|
* Correctly mark package names with versions as remote libraries ([#1697](https://github.com/databricks/cli/pull/1697)).
|
||||||
|
* Share test initializer in common helper function ([#1695](https://github.com/databricks/cli/pull/1695)).
|
||||||
|
* Make `pydabs/venv_path` optional ([#1687](https://github.com/databricks/cli/pull/1687)).
|
||||||
|
* Use API mocks for duplicate path errors in workspace files extensions client ([#1690](https://github.com/databricks/cli/pull/1690)).
|
||||||
|
* Fix prefix preset used for UC schemas ([#1704](https://github.com/databricks/cli/pull/1704)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## [Release] Release v0.226.0
|
## [Release] Release v0.226.0
|
||||||
|
|
||||||
CLI:
|
CLI:
|
||||||
|
|
|
@ -33,12 +33,7 @@ func createGlobError(v dyn.Value, p dyn.Path, message string) diag.Diagnostic {
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: fmt.Sprintf("%s: %s", source, message),
|
Summary: fmt.Sprintf("%s: %s", source, message),
|
||||||
Locations: []dyn.Location{v.Location()},
|
Locations: []dyn.Location{v.Location()},
|
||||||
|
Paths: []dyn.Path{p},
|
||||||
Paths: []dyn.Path{
|
|
||||||
// Hack to clone the path. This path copy is mutable.
|
|
||||||
// To be addressed in a later PR.
|
|
||||||
p.Append(),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,8 @@ type infer struct {
|
||||||
|
|
||||||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact := b.Config.Artifacts[m.name]
|
artifact := b.Config.Artifacts[m.name]
|
||||||
|
|
||||||
|
// TODO use python.DetectVEnvExecutable once bundle has a way to specify venv path
|
||||||
py, err := python.DetectExecutable(ctx)
|
py, err := python.DetectExecutable(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
|
|
|
@ -39,6 +39,14 @@ type Bundle struct {
|
||||||
// Exclusively use this field for filesystem operations.
|
// Exclusively use this field for filesystem operations.
|
||||||
BundleRoot vfs.Path
|
BundleRoot vfs.Path
|
||||||
|
|
||||||
|
// SyncRoot is a virtual filesystem path to the root directory of the files that are synchronized to the workspace.
|
||||||
|
// It can be an ancestor to [BundleRoot], but not a descendant; that is, [SyncRoot] must contain [BundleRoot].
|
||||||
|
SyncRoot vfs.Path
|
||||||
|
|
||||||
|
// SyncRootPath is the local path to the root directory of files that are synchronized to the workspace.
|
||||||
|
// It is equal to `SyncRoot.Native()` and included as dedicated field for convenient access.
|
||||||
|
SyncRootPath string
|
||||||
|
|
||||||
Config config.Root
|
Config config.Root
|
||||||
|
|
||||||
// Metadata about the bundle deployment. This is the interface Databricks services
|
// Metadata about the bundle deployment. This is the interface Databricks services
|
||||||
|
|
|
@ -28,6 +28,10 @@ func (r ReadOnlyBundle) BundleRoot() vfs.Path {
|
||||||
return r.b.BundleRoot
|
return r.b.BundleRoot
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r ReadOnlyBundle) SyncRoot() vfs.Path {
|
||||||
|
return r.b.SyncRoot
|
||||||
|
}
|
||||||
|
|
||||||
func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient {
|
func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||||
return r.b.WorkspaceClient()
|
return r.b.WorkspaceClient()
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,5 +6,5 @@ type Deployment struct {
|
||||||
FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"`
|
FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"`
|
||||||
|
|
||||||
// Lock configures locking behavior on deployment.
|
// Lock configures locking behavior on deployment.
|
||||||
Lock Lock `json:"lock"`
|
Lock Lock `json:"lock,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,8 @@ type PyDABs struct {
|
||||||
|
|
||||||
// VEnvPath is path to the virtual environment.
|
// VEnvPath is path to the virtual environment.
|
||||||
//
|
//
|
||||||
// Required if PyDABs is enabled. PyDABs will load the code in the specified
|
// If enabled, PyDABs will execute code within this environment. If disabled,
|
||||||
// environment.
|
// it defaults to using the Python interpreter available in the current shell.
|
||||||
VEnvPath string `json:"venv_path,omitempty"`
|
VEnvPath string `json:"venv_path,omitempty"`
|
||||||
|
|
||||||
// Import contains a list Python packages with PyDABs code.
|
// Import contains a list Python packages with PyDABs code.
|
||||||
|
|
|
@ -155,8 +155,7 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
||||||
|
|
||||||
// Schemas: Prefix
|
// Schemas: Prefix
|
||||||
for i := range r.Schemas {
|
for i := range r.Schemas {
|
||||||
schemaPrefix := "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_"
|
r.Schemas[i].Name = normalizePrefix(prefix) + r.Schemas[i].Name
|
||||||
r.Schemas[i].Name = schemaPrefix + r.Schemas[i].Name
|
|
||||||
// HTTP API for schemas doesn't yet support tags. It's only supported in
|
// HTTP API for schemas doesn't yet support tags. It's only supported in
|
||||||
// the Databricks UI and via the SQL API.
|
// the Databricks UI and via the SQL API.
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -68,6 +69,62 @@ func TestApplyPresetsPrefix(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestApplyPresetsPrefixForUcSchema(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
prefix string
|
||||||
|
schema *resources.Schema
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "add prefix to schema",
|
||||||
|
prefix: "[prefix]",
|
||||||
|
schema: &resources.Schema{
|
||||||
|
CreateSchema: &catalog.CreateSchema{
|
||||||
|
Name: "schema1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: "prefix_schema1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "add empty prefix to schema",
|
||||||
|
prefix: "",
|
||||||
|
schema: &resources.Schema{
|
||||||
|
CreateSchema: &catalog.CreateSchema{
|
||||||
|
Name: "schema1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: "schema1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Schemas: map[string]*resources.Schema{
|
||||||
|
"schema1": tt.schema,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Presets: config.Presets{
|
||||||
|
NamePrefix: tt.prefix,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||||
|
|
||||||
|
if diag.HasError() {
|
||||||
|
t.Fatalf("unexpected error: %v", diag)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, tt.want, b.Config.Resources.Schemas["schema1"].Name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestApplyPresetsTags(t *testing.T) {
|
func TestApplyPresetsTags(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
|
@ -24,7 +24,7 @@ func (m *configureWSFS) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
root := b.BundleRoot.Native()
|
root := b.SyncRoot.Native()
|
||||||
|
|
||||||
// The bundle root must be located in /Workspace/
|
// The bundle root must be located in /Workspace/
|
||||||
if !strings.HasPrefix(root, "/Workspace/") {
|
if !strings.HasPrefix(root, "/Workspace/") {
|
||||||
|
@ -45,6 +45,6 @@ func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.BundleRoot = p
|
b.SyncRoot = p
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,6 +64,7 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
var diags diag.Diagnostics
|
||||||
p := b.Config.Presets
|
p := b.Config.Presets
|
||||||
u := b.Config.Workspace.CurrentUser
|
u := b.Config.Workspace.CurrentUser
|
||||||
|
|
||||||
|
@ -74,44 +75,56 @@ func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||||
// status to UNPAUSED at the level of an individual object, whic hwas
|
// status to UNPAUSED at the level of an individual object, whic hwas
|
||||||
// historically allowed.)
|
// historically allowed.)
|
||||||
if p.TriggerPauseStatus == config.Unpaused {
|
if p.TriggerPauseStatus == config.Unpaused {
|
||||||
return diag.Diagnostics{{
|
diags = diags.Append(diag.Diagnostic{
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
|
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
|
||||||
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
||||||
}}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure this development copy has unique names and paths to avoid conflicts
|
// Make sure this development copy has unique names and paths to avoid conflicts
|
||||||
if path := findNonUserPath(b); path != "" {
|
if path := findNonUserPath(b); path != "" {
|
||||||
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
if path == "artifact_path" && strings.HasPrefix(b.Config.Workspace.ArtifactPath, "/Volumes") {
|
||||||
|
// For Volumes paths we recommend including the current username as a substring
|
||||||
|
diags = diags.Extend(diag.Errorf("%s should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'", path))
|
||||||
|
} else {
|
||||||
|
// For non-Volumes paths recommend simply putting things in the home folder
|
||||||
|
diags = diags.Extend(diag.Errorf("%s must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'", path))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
|
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
|
||||||
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
|
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
|
||||||
// For this reason we require the name prefix to contain the current username;
|
// For this reason we require the name prefix to contain the current username;
|
||||||
// it's a pitfall for users if they don't include it and later find out that
|
// it's a pitfall for users if they don't include it and later find out that
|
||||||
// only a single user can do development deployments.
|
// only a single user can do development deployments.
|
||||||
return diag.Diagnostics{{
|
diags = diags.Append(diag.Diagnostic{
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
|
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
|
||||||
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
|
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
|
||||||
}}
|
})
|
||||||
}
|
}
|
||||||
return nil
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// findNonUserPath finds the first workspace path such as root_path that doesn't
|
||||||
|
// contain the current username or current user's shortname.
|
||||||
func findNonUserPath(b *bundle.Bundle) string {
|
func findNonUserPath(b *bundle.Bundle) string {
|
||||||
username := b.Config.Workspace.CurrentUser.UserName
|
containsName := func(path string) bool {
|
||||||
|
username := b.Config.Workspace.CurrentUser.UserName
|
||||||
|
shortname := b.Config.Workspace.CurrentUser.ShortName
|
||||||
|
return strings.Contains(path, username) || strings.Contains(path, shortname)
|
||||||
|
}
|
||||||
|
|
||||||
if b.Config.Workspace.RootPath != "" && !strings.Contains(b.Config.Workspace.RootPath, username) {
|
if b.Config.Workspace.RootPath != "" && !containsName(b.Config.Workspace.RootPath) {
|
||||||
return "root_path"
|
return "root_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.StatePath != "" && !strings.Contains(b.Config.Workspace.StatePath, username) {
|
if b.Config.Workspace.StatePath != "" && !containsName(b.Config.Workspace.StatePath) {
|
||||||
return "state_path"
|
return "state_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.FilePath != "" && !strings.Contains(b.Config.Workspace.FilePath, username) {
|
if b.Config.Workspace.FilePath != "" && !containsName(b.Config.Workspace.FilePath) {
|
||||||
return "file_path"
|
return "file_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.ArtifactPath != "" && !strings.Contains(b.Config.Workspace.ArtifactPath, username) {
|
if b.Config.Workspace.ArtifactPath != "" && !containsName(b.Config.Workspace.ArtifactPath) {
|
||||||
return "artifact_path"
|
return "artifact_path"
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
|
|
|
@ -237,10 +237,20 @@ func TestValidateDevelopmentMode(t *testing.T) {
|
||||||
diags := validateDevelopmentMode(b)
|
diags := validateDevelopmentMode(b)
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
// Test with /Volumes path
|
||||||
|
b = mockBundle(config.Development)
|
||||||
|
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/lennart/libs"
|
||||||
|
diags = validateDevelopmentMode(b)
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/libs"
|
||||||
|
diags = validateDevelopmentMode(b)
|
||||||
|
require.ErrorContains(t, diags.Error(), "artifact_path should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'")
|
||||||
|
|
||||||
// Test with a bundle that has a non-user path
|
// Test with a bundle that has a non-user path
|
||||||
|
b = mockBundle(config.Development)
|
||||||
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
|
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
|
||||||
diags = validateDevelopmentMode(b)
|
diags = validateDevelopmentMode(b)
|
||||||
require.ErrorContains(t, diags.Error(), "root_path")
|
require.ErrorContains(t, diags.Error(), "root_path must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'")
|
||||||
|
|
||||||
// Test with a bundle that has an unpaused trigger pause status
|
// Test with a bundle that has an unpaused trigger pause status
|
||||||
b = mockBundle(config.Development)
|
b = mockBundle(config.Development)
|
||||||
|
|
|
@ -1,15 +1,21 @@
|
||||||
package python
|
package python
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/databricks/databricks-sdk-go/logger"
|
"github.com/databricks/databricks-sdk-go/logger"
|
||||||
|
"github.com/fatih/color"
|
||||||
|
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/python"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
|
||||||
|
@ -86,23 +92,15 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if experimental.PyDABs.VEnvPath == "" {
|
|
||||||
return diag.Errorf("\"experimental.pydabs.enabled\" can only be used when \"experimental.pydabs.venv_path\" is set")
|
|
||||||
}
|
|
||||||
|
|
||||||
// mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics'
|
// mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics'
|
||||||
var mutateDiags diag.Diagnostics
|
var mutateDiags diag.Diagnostics
|
||||||
var mutateDiagsHasError = errors.New("unexpected error")
|
var mutateDiagsHasError = errors.New("unexpected error")
|
||||||
|
|
||||||
err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) {
|
||||||
pythonPath := interpreterPath(experimental.PyDABs.VEnvPath)
|
pythonPath, err := detectExecutable(ctx, experimental.PyDABs.VEnvPath)
|
||||||
|
|
||||||
if _, err := os.Stat(pythonPath); err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
return dyn.InvalidValue, fmt.Errorf("failed to get Python interpreter path: %w", err)
|
||||||
return dyn.InvalidValue, fmt.Errorf("can't find %q, check if venv is created", pythonPath)
|
|
||||||
} else {
|
|
||||||
return dyn.InvalidValue, fmt.Errorf("can't find %q: %w", pythonPath, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheDir, err := createCacheDir(ctx)
|
cacheDir, err := createCacheDir(ctx)
|
||||||
|
@ -177,7 +175,11 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
||||||
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
|
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stderrWriter := newLogWriter(ctx, "stderr: ")
|
stderrBuf := bytes.Buffer{}
|
||||||
|
stderrWriter := io.MultiWriter(
|
||||||
|
newLogWriter(ctx, "stderr: "),
|
||||||
|
&stderrBuf,
|
||||||
|
)
|
||||||
stdoutWriter := newLogWriter(ctx, "stdout: ")
|
stdoutWriter := newLogWriter(ctx, "stdout: ")
|
||||||
|
|
||||||
_, processErr := process.Background(
|
_, processErr := process.Background(
|
||||||
|
@ -205,7 +207,13 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
||||||
// process can fail without reporting errors in diagnostics file or creating it, for instance,
|
// process can fail without reporting errors in diagnostics file or creating it, for instance,
|
||||||
// venv doesn't have PyDABs library installed
|
// venv doesn't have PyDABs library installed
|
||||||
if processErr != nil {
|
if processErr != nil {
|
||||||
return dyn.InvalidValue, diag.Errorf("python mutator process failed: %sw, use --debug to enable logging", processErr)
|
diagnostic := diag.Diagnostic{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: fmt.Sprintf("python mutator process failed: %q, use --debug to enable logging", processErr),
|
||||||
|
Detail: explainProcessErr(stderrBuf.String()),
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.InvalidValue, diag.Diagnostics{diagnostic}
|
||||||
}
|
}
|
||||||
|
|
||||||
// or we can fail to read diagnostics file, that should always be created
|
// or we can fail to read diagnostics file, that should always be created
|
||||||
|
@ -213,15 +221,40 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
||||||
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
|
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := loadOutputFile(rootPath, outputPath)
|
output, outputDiags := loadOutputFile(rootPath, outputPath)
|
||||||
if err != nil {
|
pythonDiagnostics = pythonDiagnostics.Extend(outputDiags)
|
||||||
return dyn.InvalidValue, diag.Errorf("failed to load Python mutator output: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// we pass through pythonDiagnostic because it contains warnings
|
// we pass through pythonDiagnostic because it contains warnings
|
||||||
return output, pythonDiagnostics
|
return output, pythonDiagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const installExplanation = `If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||||
|
and that the wheel is installed in the Python environment:
|
||||||
|
|
||||||
|
$ .venv/bin/pip install -e .
|
||||||
|
|
||||||
|
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||||
|
or activate the environment before running CLI commands:
|
||||||
|
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
venv_path: .venv
|
||||||
|
`
|
||||||
|
|
||||||
|
// explainProcessErr provides additional explanation for common errors.
|
||||||
|
// It's meant to be the best effort, and not all errors are covered.
|
||||||
|
// Output should be used only used for error reporting.
|
||||||
|
func explainProcessErr(stderr string) string {
|
||||||
|
// implemented in cpython/Lib/runpy.py and portable across Python 3.x, including pypy
|
||||||
|
if strings.Contains(stderr, "Error while finding module specification for 'databricks.bundles.build'") {
|
||||||
|
summary := color.CyanString("Explanation: ") + "'databricks-pydabs' library is not installed in the Python environment.\n"
|
||||||
|
|
||||||
|
return stderr + "\n" + summary + "\n" + installExplanation
|
||||||
|
}
|
||||||
|
|
||||||
|
return stderr
|
||||||
|
}
|
||||||
|
|
||||||
func writeInputFile(inputPath string, input dyn.Value) error {
|
func writeInputFile(inputPath string, input dyn.Value) error {
|
||||||
// we need to marshal dyn.Value instead of bundle.Config to JSON to support
|
// we need to marshal dyn.Value instead of bundle.Config to JSON to support
|
||||||
// non-string fields assigned with bundle variables
|
// non-string fields assigned with bundle variables
|
||||||
|
@ -233,10 +266,10 @@ func writeInputFile(inputPath string, input dyn.Value) error {
|
||||||
return os.WriteFile(inputPath, rootConfigJson, 0600)
|
return os.WriteFile(inputPath, rootConfigJson, 0600)
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) {
|
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, diag.Diagnostics) {
|
||||||
outputFile, err := os.Open(outputPath)
|
outputFile, err := os.Open(outputPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to open output file: %w", err)
|
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer outputFile.Close()
|
defer outputFile.Close()
|
||||||
|
@ -251,27 +284,34 @@ func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) {
|
||||||
// for that, we pass virtualPath instead of outputPath as file location
|
// for that, we pass virtualPath instead of outputPath as file location
|
||||||
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml"))
|
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to get absolute path: %w", err)
|
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to get absolute path: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
generated, err := yamlloader.LoadYAML(virtualPath, outputFile)
|
generated, err := yamlloader.LoadYAML(virtualPath, outputFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to parse output file: %w", err)
|
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to parse output file: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
normalized, diagnostic := convert.Normalize(config.Root{}, generated)
|
return strictNormalize(config.Root{}, generated)
|
||||||
if diagnostic.Error() != nil {
|
}
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %w", diagnostic.Error())
|
|
||||||
}
|
func strictNormalize(dst any, generated dyn.Value) (dyn.Value, diag.Diagnostics) {
|
||||||
|
normalized, diags := convert.Normalize(dst, generated)
|
||||||
|
|
||||||
// warnings shouldn't happen because output should be already normalized
|
// warnings shouldn't happen because output should be already normalized
|
||||||
// when it happens, it's a bug in the mutator, and should be treated as an error
|
// when it happens, it's a bug in the mutator, and should be treated as an error
|
||||||
|
|
||||||
for _, d := range diagnostic.Filter(diag.Warning) {
|
strictDiags := diag.Diagnostics{}
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %s", d.Summary)
|
|
||||||
|
for _, d := range diags {
|
||||||
|
if d.Severity == diag.Warning {
|
||||||
|
d.Severity = diag.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
strictDiags = strictDiags.Append(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
return normalized, nil
|
return normalized, strictDiags
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadDiagnosticsFile loads diagnostics from a file.
|
// loadDiagnosticsFile loads diagnostics from a file.
|
||||||
|
@ -423,11 +463,16 @@ func isOmitemptyDelete(left dyn.Value) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// interpreterPath returns platform-specific path to Python interpreter in the virtual environment.
|
// detectExecutable lookups Python interpreter in virtual environment, or if not set, in PATH.
|
||||||
func interpreterPath(venvPath string) string {
|
func detectExecutable(ctx context.Context, venvPath string) (string, error) {
|
||||||
if runtime.GOOS == "windows" {
|
if venvPath == "" {
|
||||||
return filepath.Join(venvPath, "Scripts", "python3.exe")
|
interpreter, err := python.DetectExecutable(ctx)
|
||||||
} else {
|
if err != nil {
|
||||||
return filepath.Join(venvPath, "bin", "python3")
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return interpreter, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return python.DetectVEnvExecutable(venvPath)
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,8 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/dyn/merge"
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
@ -255,7 +257,7 @@ func TestPythonMutator_badOutput(t *testing.T) {
|
||||||
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
||||||
diag := bundle.Apply(ctx, b, mutator)
|
diag := bundle.Apply(ctx, b, mutator)
|
||||||
|
|
||||||
assert.EqualError(t, diag.Error(), "failed to load Python mutator output: failed to normalize output: unknown field: unknown_property")
|
assert.EqualError(t, diag.Error(), "unknown field: unknown_property")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonMutator_disabled(t *testing.T) {
|
func TestPythonMutator_disabled(t *testing.T) {
|
||||||
|
@ -282,7 +284,7 @@ func TestPythonMutator_venvRequired(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonMutator_venvNotFound(t *testing.T) {
|
func TestPythonMutator_venvNotFound(t *testing.T) {
|
||||||
expectedError := fmt.Sprintf("can't find %q, check if venv is created", interpreterPath("bad_path"))
|
expectedError := fmt.Sprintf("failed to get Python interpreter path: can't find %q, check if virtualenv is created", interpreterPath("bad_path"))
|
||||||
|
|
||||||
b := loadYaml("databricks.yml", `
|
b := loadYaml("databricks.yml", `
|
||||||
experimental:
|
experimental:
|
||||||
|
@ -546,6 +548,46 @@ func TestInterpreterPath(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStrictNormalize(t *testing.T) {
|
||||||
|
// NB: there is no way to trigger diag.Error, so we don't test it
|
||||||
|
|
||||||
|
type TestStruct struct {
|
||||||
|
A int `json:"a"`
|
||||||
|
}
|
||||||
|
|
||||||
|
value := dyn.NewValue(map[string]dyn.Value{"A": dyn.NewValue("abc", nil)}, nil)
|
||||||
|
|
||||||
|
_, diags := convert.Normalize(TestStruct{}, value)
|
||||||
|
_, strictDiags := strictNormalize(TestStruct{}, value)
|
||||||
|
|
||||||
|
assert.False(t, diags.HasError())
|
||||||
|
assert.True(t, strictDiags.HasError())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExplainProcessErr(t *testing.T) {
|
||||||
|
stderr := "/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')\n"
|
||||||
|
expected := `/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')
|
||||||
|
|
||||||
|
Explanation: 'databricks-pydabs' library is not installed in the Python environment.
|
||||||
|
|
||||||
|
If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||||
|
and that the wheel is installed in the Python environment:
|
||||||
|
|
||||||
|
$ .venv/bin/pip install -e .
|
||||||
|
|
||||||
|
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||||
|
or activate the environment before running CLI commands:
|
||||||
|
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
venv_path: .venv
|
||||||
|
`
|
||||||
|
|
||||||
|
out := explainProcessErr(stderr)
|
||||||
|
|
||||||
|
assert.Equal(t, expected, out)
|
||||||
|
}
|
||||||
|
|
||||||
func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context {
|
func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
ctx, stub := process.WithStub(ctx)
|
ctx, stub := process.WithStub(ctx)
|
||||||
|
@ -596,9 +638,7 @@ func loadYaml(name string, content string) *bundle.Bundle {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func withFakeVEnv(t *testing.T, path string) {
|
func withFakeVEnv(t *testing.T, venvPath string) {
|
||||||
interpreterPath := interpreterPath(path)
|
|
||||||
|
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -608,6 +648,8 @@ func withFakeVEnv(t *testing.T, path string) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
interpreterPath := interpreterPath(venvPath)
|
||||||
|
|
||||||
err = os.MkdirAll(filepath.Dir(interpreterPath), 0755)
|
err = os.MkdirAll(filepath.Dir(interpreterPath), 0755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -618,9 +660,22 @@ func withFakeVEnv(t *testing.T, path string) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0755)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
if err := os.Chdir(cwd); err != nil {
|
if err := os.Chdir(cwd); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func interpreterPath(venvPath string) string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return filepath.Join(venvPath, "Scripts", "python3.exe")
|
||||||
|
} else {
|
||||||
|
return filepath.Join(venvPath, "bin", "python3")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -44,11 +43,13 @@ func TestResolveClusterReference(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef1).Return(&compute.ClusterDetails{
|
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
ClusterId: "1234-5678-abcd",
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
}, nil)
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef2).Return(&compute.ClusterDetails{
|
},
|
||||||
ClusterId: "9876-5432-xywz",
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "1234-5678-abcd", ClusterName: clusterRef1},
|
||||||
|
{ClusterId: "9876-5432-xywz", ClusterName: clusterRef2},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
|
@ -78,10 +79,16 @@ func TestResolveNonExistentClusterReference(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef))
|
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "1234-5678-abcd", ClusterName: "some other cluster"},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist")
|
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: cluster named 'Random' does not exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
||||||
|
@ -158,8 +165,14 @@ func TestResolveVariableReferencesInVariableLookups(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, "cluster-bar-dev").Return(&compute.ClusterDetails{
|
|
||||||
ClusterId: "1234-5678-abcd",
|
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "1234-5678-abcd", ClusterName: "cluster-bar-dev"},
|
||||||
|
{ClusterId: "9876-5432-xywz", ClusterName: "some other cluster"},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/dyn/convert"
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
"github.com/databricks/cli/libs/dyn/dynvar"
|
"github.com/databricks/cli/libs/dyn/dynvar"
|
||||||
"github.com/databricks/cli/libs/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type resolveVariableReferences struct {
|
type resolveVariableReferences struct {
|
||||||
|
@ -124,6 +123,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
// We rewrite it here to make the resolution logic simpler.
|
// We rewrite it here to make the resolution logic simpler.
|
||||||
varPath := dyn.NewPath(dyn.Key("var"))
|
varPath := dyn.NewPath(dyn.Key("var"))
|
||||||
|
|
||||||
|
var diags diag.Diagnostics
|
||||||
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||||
// Synthesize a copy of the root that has all fields that are present in the type
|
// Synthesize a copy of the root that has all fields that are present in the type
|
||||||
// but not set in the dynamic value set to their corresponding empty value.
|
// but not set in the dynamic value set to their corresponding empty value.
|
||||||
|
@ -180,14 +180,13 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
|
|
||||||
// Normalize the result because variable resolution may have been applied to non-string fields.
|
// Normalize the result because variable resolution may have been applied to non-string fields.
|
||||||
// For example, a variable reference may have been resolved to a integer.
|
// For example, a variable reference may have been resolved to a integer.
|
||||||
root, diags := convert.Normalize(b.Config, root)
|
root, normaliseDiags := convert.Normalize(b.Config, root)
|
||||||
for _, diag := range diags {
|
diags = diags.Extend(normaliseDiags)
|
||||||
// This occurs when a variable's resolved value is incompatible with the field's type.
|
|
||||||
// Log a warning until we have a better way to surface these diagnostics to the user.
|
|
||||||
log.Warnf(ctx, "normalization diagnostic: %s", diag.Summary)
|
|
||||||
}
|
|
||||||
return root, nil
|
return root, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return diag.FromErr(err)
|
if err != nil {
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
}
|
||||||
|
return diags
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,6 +45,10 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc {
|
||||||
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
||||||
|
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, err
|
return dyn.InvalidValue, err
|
||||||
|
|
|
@ -17,6 +17,10 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
RootPath: ".",
|
RootPath: ".",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
|
Paths: []string{
|
||||||
|
".",
|
||||||
|
"../common",
|
||||||
|
},
|
||||||
Include: []string{
|
Include: []string{
|
||||||
"foo",
|
"foo",
|
||||||
"bar",
|
"bar",
|
||||||
|
@ -29,6 +33,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bundletest.SetLocation(b, "sync.paths[0]", "./databricks.yml")
|
||||||
|
bundletest.SetLocation(b, "sync.paths[1]", "./databricks.yml")
|
||||||
bundletest.SetLocation(b, "sync.include[0]", "./file.yml")
|
bundletest.SetLocation(b, "sync.include[0]", "./file.yml")
|
||||||
bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml")
|
bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml")
|
||||||
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
||||||
|
@ -37,6 +43,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
assert.NoError(t, diags.Error())
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
assert.Equal(t, filepath.Clean("."), b.Config.Sync.Paths[0])
|
||||||
|
assert.Equal(t, filepath.Clean("../common"), b.Config.Sync.Paths[1])
|
||||||
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||||
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||||
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
||||||
|
@ -48,6 +56,10 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
RootPath: "/tmp/dir",
|
RootPath: "/tmp/dir",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
|
Paths: []string{
|
||||||
|
".",
|
||||||
|
"../common",
|
||||||
|
},
|
||||||
Include: []string{
|
Include: []string{
|
||||||
"foo",
|
"foo",
|
||||||
"bar",
|
"bar",
|
||||||
|
@ -60,6 +72,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bundletest.SetLocation(b, "sync.paths[0]", "/tmp/dir/databricks.yml")
|
||||||
|
bundletest.SetLocation(b, "sync.paths[1]", "/tmp/dir/databricks.yml")
|
||||||
bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml")
|
bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml")
|
||||||
bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml")
|
bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml")
|
||||||
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
||||||
|
@ -68,6 +82,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
assert.NoError(t, diags.Error())
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
assert.Equal(t, filepath.Clean("."), b.Config.Sync.Paths[0])
|
||||||
|
assert.Equal(t, filepath.Clean("../common"), b.Config.Sync.Paths[1])
|
||||||
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||||
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||||
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
||||||
|
|
|
@ -0,0 +1,48 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type syncDefaultPath struct{}
|
||||||
|
|
||||||
|
// SyncDefaultPath configures the default sync path to be equal to the bundle root.
|
||||||
|
func SyncDefaultPath() bundle.Mutator {
|
||||||
|
return &syncDefaultPath{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *syncDefaultPath) Name() string {
|
||||||
|
return "SyncDefaultPath"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *syncDefaultPath) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
isset := false
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
pv, _ := dyn.Get(v, "sync.paths")
|
||||||
|
|
||||||
|
// If the sync paths field is already set, do nothing.
|
||||||
|
// We know it is set if its value is either a nil or a sequence (empty or not).
|
||||||
|
switch pv.Kind() {
|
||||||
|
case dyn.KindNil, dyn.KindSequence:
|
||||||
|
isset = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the sync paths field is already set, do nothing.
|
||||||
|
if isset {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the sync paths to the default value.
|
||||||
|
b.Config.Sync.Paths = []string{"."}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSyncDefaultPath_DefaultIfUnset(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "/tmp/some/dir",
|
||||||
|
Config: config.Root{},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, mutator.SyncDefaultPath())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncDefaultPath_SkipIfSet(t *testing.T) {
|
||||||
|
tcases := []struct {
|
||||||
|
name string
|
||||||
|
paths dyn.Value
|
||||||
|
expect []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil",
|
||||||
|
paths: dyn.V(nil),
|
||||||
|
expect: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty sequence",
|
||||||
|
paths: dyn.V([]dyn.Value{}),
|
||||||
|
expect: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-empty sequence",
|
||||||
|
paths: dyn.V([]dyn.Value{dyn.V("something")}),
|
||||||
|
expect: []string{"something"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tcase := range tcases {
|
||||||
|
t.Run(tcase.name, func(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "/tmp/some/dir",
|
||||||
|
Config: config.Root{},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
v, err := dyn.Set(v, "sync", dyn.V(dyn.NewMapping()))
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
v, err = dyn.Set(v, "sync.paths", tcase.paths)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
return diag.FromErr(err)
|
||||||
|
})
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags = bundle.Apply(ctx, b, mutator.SyncDefaultPath())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
// If the sync paths field is already set, do nothing.
|
||||||
|
assert.Equal(t, tcase.expect, b.Config.Sync.Paths)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,120 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/vfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type syncInferRoot struct{}
|
||||||
|
|
||||||
|
// SyncInferRoot is a mutator that infers the root path of all files to synchronize by looking at the
|
||||||
|
// paths in the sync configuration. The sync root may be different from the bundle root
|
||||||
|
// when the user intends to synchronize files outside the bundle root.
|
||||||
|
//
|
||||||
|
// The sync root can be equivalent to or an ancestor of the bundle root, but not a descendant.
|
||||||
|
// That is, the sync root must contain the bundle root.
|
||||||
|
//
|
||||||
|
// This mutator requires all sync-related paths and patterns to be relative to the bundle root path.
|
||||||
|
// This is done by the [RewriteSyncPaths] mutator, which must run before this mutator.
|
||||||
|
func SyncInferRoot() bundle.Mutator {
|
||||||
|
return &syncInferRoot{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *syncInferRoot) Name() string {
|
||||||
|
return "SyncInferRoot"
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeRoot finds the innermost path that contains the specified path.
|
||||||
|
// It traverses up the root path until it finds the innermost path.
|
||||||
|
// If the path does not exist, it returns an empty string.
|
||||||
|
//
|
||||||
|
// See "sync_infer_root_internal_test.go" for examples.
|
||||||
|
func (m *syncInferRoot) computeRoot(path string, root string) string {
|
||||||
|
for !filepath.IsLocal(path) {
|
||||||
|
// Break if we have reached the root of the filesystem.
|
||||||
|
dir := filepath.Dir(root)
|
||||||
|
if dir == root {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the sync path as we navigate up the directory tree.
|
||||||
|
path = filepath.Join(filepath.Base(root), path)
|
||||||
|
|
||||||
|
// Move up the directory tree.
|
||||||
|
root = dir
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Clean(root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *syncInferRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
|
// Use the bundle root path as the starting point for inferring the sync root path.
|
||||||
|
bundleRootPath := filepath.Clean(b.RootPath)
|
||||||
|
|
||||||
|
// Infer the sync root path by looking at each one of the sync paths.
|
||||||
|
// Every sync path must be a descendant of the final sync root path.
|
||||||
|
syncRootPath := bundleRootPath
|
||||||
|
for _, path := range b.Config.Sync.Paths {
|
||||||
|
computedPath := m.computeRoot(path, bundleRootPath)
|
||||||
|
if computedPath == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update sync root path if the computed root path is an ancestor of the current sync root path.
|
||||||
|
if len(computedPath) < len(syncRootPath) {
|
||||||
|
syncRootPath = computedPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The new sync root path can only be an ancestor of the previous root path.
|
||||||
|
// Compute the relative path from the sync root to the bundle root.
|
||||||
|
rel, err := filepath.Rel(syncRootPath, bundleRootPath)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If during computation of the sync root path we hit the root of the filesystem,
|
||||||
|
// then one or more of the sync paths are outside the filesystem.
|
||||||
|
// Check if this happened by verifying that none of the paths escape the root
|
||||||
|
// when joined with the sync root path.
|
||||||
|
for i, path := range b.Config.Sync.Paths {
|
||||||
|
if filepath.IsLocal(filepath.Join(rel, path)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
diags = append(diags, diag.Diagnostic{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: fmt.Sprintf("invalid sync path %q", path),
|
||||||
|
Locations: b.Config.GetLocations(fmt.Sprintf("sync.paths[%d]", i)),
|
||||||
|
Paths: []dyn.Path{dyn.NewPath(dyn.Key("sync"), dyn.Key("paths"), dyn.Index(i))},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if diags.HasError() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update all paths in the sync configuration to be relative to the sync root.
|
||||||
|
for i, p := range b.Config.Sync.Paths {
|
||||||
|
b.Config.Sync.Paths[i] = filepath.Join(rel, p)
|
||||||
|
}
|
||||||
|
for i, p := range b.Config.Sync.Include {
|
||||||
|
b.Config.Sync.Include[i] = filepath.Join(rel, p)
|
||||||
|
}
|
||||||
|
for i, p := range b.Config.Sync.Exclude {
|
||||||
|
b.Config.Sync.Exclude[i] = filepath.Join(rel, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure the sync root path.
|
||||||
|
b.SyncRoot = vfs.MustNew(syncRootPath)
|
||||||
|
b.SyncRootPath = syncRootPath
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSyncInferRootInternal_ComputeRoot(t *testing.T) {
|
||||||
|
s := syncInferRoot{}
|
||||||
|
|
||||||
|
tcases := []struct {
|
||||||
|
path string
|
||||||
|
root string
|
||||||
|
out string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// Test that "." doesn't change the root.
|
||||||
|
path: ".",
|
||||||
|
root: "/tmp/some/dir",
|
||||||
|
out: "/tmp/some/dir",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test that a subdirectory doesn't change the root.
|
||||||
|
path: "sub",
|
||||||
|
root: "/tmp/some/dir",
|
||||||
|
out: "/tmp/some/dir",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test that a parent directory changes the root.
|
||||||
|
path: "../common",
|
||||||
|
root: "/tmp/some/dir",
|
||||||
|
out: "/tmp/some",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test that a deeply nested parent directory changes the root.
|
||||||
|
path: "../../../../../../common",
|
||||||
|
root: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||||
|
out: "/tmp/some",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test that a parent directory changes the root at the filesystem root boundary.
|
||||||
|
path: "../common",
|
||||||
|
root: "/tmp",
|
||||||
|
out: "/",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test that an invalid parent directory doesn't change the root and returns an empty string.
|
||||||
|
path: "../common",
|
||||||
|
root: "/",
|
||||||
|
out: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test that the returned path is cleaned even if the root doesn't change.
|
||||||
|
path: "sub",
|
||||||
|
root: "/tmp/some/../dir",
|
||||||
|
out: "/tmp/dir",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test that a relative root path also works.
|
||||||
|
path: "../common",
|
||||||
|
root: "foo/bar",
|
||||||
|
out: "foo",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tcases {
|
||||||
|
out := s.computeRoot(tc.path, tc.root)
|
||||||
|
assert.Equal(t, tc.out, filepath.ToSlash(out))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,198 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "/tmp/some/dir",
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Paths: []string{
|
||||||
|
".",
|
||||||
|
},
|
||||||
|
Include: []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
},
|
||||||
|
Exclude: []string{
|
||||||
|
"baz",
|
||||||
|
"qux",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, filepath.FromSlash("/tmp/some/dir"), b.SyncRootPath)
|
||||||
|
|
||||||
|
// Check that the paths are unchanged.
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
|
assert.Equal(t, []string{"foo", "bar"}, b.Config.Sync.Include)
|
||||||
|
assert.Equal(t, []string{"baz", "qux"}, b.Config.Sync.Exclude)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "./some/dir",
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Paths: []string{
|
||||||
|
".",
|
||||||
|
},
|
||||||
|
Include: []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
},
|
||||||
|
Exclude: []string{
|
||||||
|
"baz",
|
||||||
|
"qux",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, filepath.FromSlash("some/dir"), b.SyncRootPath)
|
||||||
|
|
||||||
|
// Check that the paths are unchanged.
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
|
assert.Equal(t, []string{"foo", "bar"}, b.Config.Sync.Include)
|
||||||
|
assert.Equal(t, []string{"baz", "qux"}, b.Config.Sync.Exclude)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "/tmp/some/dir",
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Paths: []string{
|
||||||
|
"../common",
|
||||||
|
},
|
||||||
|
Include: []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
},
|
||||||
|
Exclude: []string{
|
||||||
|
"baz",
|
||||||
|
"qux",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath)
|
||||||
|
|
||||||
|
// Check that the paths are updated.
|
||||||
|
assert.Equal(t, []string{"common"}, b.Config.Sync.Paths)
|
||||||
|
assert.Equal(t, []string{filepath.FromSlash("dir/foo"), filepath.FromSlash("dir/bar")}, b.Config.Sync.Include)
|
||||||
|
assert.Equal(t, []string{filepath.FromSlash("dir/baz"), filepath.FromSlash("dir/qux")}, b.Config.Sync.Exclude)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Paths: []string{
|
||||||
|
"../../../../../../common",
|
||||||
|
},
|
||||||
|
Include: []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
},
|
||||||
|
Exclude: []string{
|
||||||
|
"baz",
|
||||||
|
"qux",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath)
|
||||||
|
|
||||||
|
// Check that the paths are updated.
|
||||||
|
assert.Equal(t, []string{"common"}, b.Config.Sync.Paths)
|
||||||
|
assert.Equal(t, []string{
|
||||||
|
filepath.FromSlash("dir/that/is/very/deeply/nested/foo"),
|
||||||
|
filepath.FromSlash("dir/that/is/very/deeply/nested/bar"),
|
||||||
|
}, b.Config.Sync.Include)
|
||||||
|
assert.Equal(t, []string{
|
||||||
|
filepath.FromSlash("dir/that/is/very/deeply/nested/baz"),
|
||||||
|
filepath.FromSlash("dir/that/is/very/deeply/nested/qux"),
|
||||||
|
}, b.Config.Sync.Exclude)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "/tmp/some/bundle/root",
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Paths: []string{
|
||||||
|
"./foo",
|
||||||
|
"../common",
|
||||||
|
"./bar",
|
||||||
|
"../../baz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath)
|
||||||
|
|
||||||
|
// Check that the paths are updated.
|
||||||
|
assert.Equal(t, filepath.FromSlash("bundle/root/foo"), b.Config.Sync.Paths[0])
|
||||||
|
assert.Equal(t, filepath.FromSlash("bundle/common"), b.Config.Sync.Paths[1])
|
||||||
|
assert.Equal(t, filepath.FromSlash("bundle/root/bar"), b.Config.Sync.Paths[2])
|
||||||
|
assert.Equal(t, filepath.FromSlash("baz"), b.Config.Sync.Paths[3])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncInferRoot_Error(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "/tmp/some/dir",
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Paths: []string{
|
||||||
|
"../../../../error",
|
||||||
|
"../../../thisworks",
|
||||||
|
"../../../../../error",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
bundletest.SetLocation(b, "sync.paths", "databricks.yml")
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||||
|
require.Len(t, diags, 2)
|
||||||
|
assert.Equal(t, `invalid sync path "../../../../error"`, diags[0].Summary)
|
||||||
|
assert.Equal(t, "databricks.yml:0:0", diags[0].Locations[0].String())
|
||||||
|
assert.Equal(t, "sync.paths[0]", diags[0].Paths[0].String())
|
||||||
|
assert.Equal(t, `invalid sync path "../../../../../error"`, diags[1].Summary)
|
||||||
|
assert.Equal(t, "databricks.yml:0:0", diags[1].Locations[0].String())
|
||||||
|
assert.Equal(t, "sync.paths[2]", diags[1].Paths[0].String())
|
||||||
|
}
|
|
@ -82,7 +82,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
|
internalDirRel, err := filepath.Rel(b.SyncRootPath, internalDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,8 +56,12 @@ func TestGenerateTrampoline(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||||
|
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
FilePath: "/Workspace/files",
|
||||||
|
},
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "development",
|
Target: "development",
|
||||||
},
|
},
|
||||||
|
@ -89,6 +93,6 @@ func TestGenerateTrampoline(t *testing.T) {
|
||||||
require.Equal(t, "Hello from Trampoline", string(bytes))
|
require.Equal(t, "Hello from Trampoline", string(bytes))
|
||||||
|
|
||||||
task := b.Config.Resources.Jobs["test"].Tasks[0]
|
task := b.Config.Resources.Jobs["test"].Tasks[0]
|
||||||
require.Equal(t, task.NotebookTask.NotebookPath, ".databricks/bundle/development/.internal/notebook_test_to_trampoline")
|
require.Equal(t, "/Workspace/files/my_bundle/.databricks/bundle/development/.internal/notebook_test_to_trampoline", task.NotebookTask.NotebookPath)
|
||||||
require.Nil(t, task.PythonWheelTask)
|
require.Nil(t, task.PythonWheelTask)
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,14 +93,14 @@ func (t *translateContext) rewritePath(
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Local path must be contained in the bundle root.
|
// Local path must be contained in the sync root.
|
||||||
// If it isn't, it won't be synchronized into the workspace.
|
// If it isn't, it won't be synchronized into the workspace.
|
||||||
localRelPath, err := filepath.Rel(t.b.RootPath, localPath)
|
localRelPath, err := filepath.Rel(t.b.SyncRootPath, localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(localRelPath, "..") {
|
if strings.HasPrefix(localRelPath, "..") {
|
||||||
return fmt.Errorf("path %s is not contained in bundle root path", localPath)
|
return fmt.Errorf("path %s is not contained in sync root path", localPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prefix remote path with its remote root path.
|
// Prefix remote path with its remote root path.
|
||||||
|
@ -118,7 +118,7 @@ func (t *translateContext) rewritePath(
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||||
nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath))
|
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
return "", fmt.Errorf("notebook %s not found", literal)
|
return "", fmt.Errorf("notebook %s not found", literal)
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,7 @@ func (t *translateContext) translateNotebookPath(literal, localFullPath, localRe
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *translateContext) translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
func (t *translateContext) translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||||
nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath))
|
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
return "", fmt.Errorf("file %s not found", literal)
|
return "", fmt.Errorf("file %s not found", literal)
|
||||||
}
|
}
|
||||||
|
@ -148,7 +148,7 @@ func (t *translateContext) translateFilePath(literal, localFullPath, localRelPat
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *translateContext) translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
func (t *translateContext) translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||||
info, err := t.b.BundleRoot.Stat(filepath.ToSlash(localRelPath))
|
info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,6 +50,11 @@ func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern
|
||||||
t.translateNoOp,
|
t.translateNoOp,
|
||||||
noSkipRewrite,
|
noSkipRewrite,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
|
||||||
|
t.translateFilePath,
|
||||||
|
noSkipRewrite,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,8 +41,8 @@ func touchEmptyFile(t *testing.T, path string) {
|
||||||
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
|
@ -110,10 +110,11 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py"))
|
||||||
touchEmptyFile(t, filepath.Join(dir, "my_python_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "my_python_file.py"))
|
||||||
touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar"))
|
touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "requirements.txt"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
|
@ -140,6 +141,9 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
NotebookTask: &jobs.NotebookTask{
|
NotebookTask: &jobs.NotebookTask{
|
||||||
NotebookPath: "./my_job_notebook.py",
|
NotebookPath: "./my_job_notebook.py",
|
||||||
},
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Requirements: "./requirements.txt"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
PythonWheelTask: &jobs.PythonWheelTask{
|
PythonWheelTask: &jobs.PythonWheelTask{
|
||||||
|
@ -232,6 +236,11 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
"/bundle/my_job_notebook",
|
"/bundle/my_job_notebook",
|
||||||
b.Config.Resources.Jobs["job"].Tasks[2].NotebookTask.NotebookPath,
|
b.Config.Resources.Jobs["job"].Tasks[2].NotebookTask.NotebookPath,
|
||||||
)
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
"/bundle/requirements.txt",
|
||||||
|
b.Config.Resources.Jobs["job"].Tasks[2].Libraries[0].Requirements,
|
||||||
|
)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/bundle/my_python_file.py",
|
"/bundle/my_python_file.py",
|
||||||
|
@ -280,8 +289,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml"))
|
touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
|
@ -371,12 +380,12 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
func TestTranslatePathsOutsideSyncRoot(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
|
@ -402,15 +411,15 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, diags.Error(), "is not contained in bundle root")
|
assert.ErrorContains(t, diags.Error(), "is not contained in sync root path")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobNotebookDoesNotExistError(t *testing.T) {
|
func TestJobNotebookDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -440,8 +449,8 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -471,8 +480,8 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
@ -502,8 +511,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
@ -534,8 +543,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
|
@ -569,8 +578,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
|
@ -604,8 +613,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
|
@ -639,8 +648,8 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
|
@ -675,8 +684,8 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "env2.py"))
|
touchEmptyFile(t, filepath.Join(dir, "env2.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -715,8 +724,8 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
|
||||||
func TestTranslatePathWithComplexVariables(t *testing.T) {
|
func TestTranslatePathWithComplexVariables(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
BundleRoot: vfs.MustNew(dir),
|
SyncRoot: vfs.MustNew(dir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Variables: map[string]*variable.Variable{
|
Variables: map[string]*variable.Variable{
|
||||||
"cluster_libraries": {
|
"cluster_libraries": {
|
||||||
|
|
|
@ -40,6 +40,10 @@ func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Dia
|
||||||
}
|
}
|
||||||
|
|
||||||
if !c.Check(version) {
|
if !c.Check(version) {
|
||||||
|
if version.Prerelease() == "dev" && version.Major() == 0 {
|
||||||
|
return diag.Warningf("Ignoring Databricks CLI version constraint for development build. Required: %s, current: %s", constraint, currentVersion)
|
||||||
|
}
|
||||||
|
|
||||||
return diag.Errorf("Databricks CLI version constraint not satisfied. Required: %s, current: %s", constraint, currentVersion)
|
return diag.Errorf("Databricks CLI version constraint not satisfied. Required: %s, current: %s", constraint, currentVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,6 +107,11 @@ func TestVerifyCliVersion(t *testing.T) {
|
||||||
constraint: "^0.100",
|
constraint: "^0.100",
|
||||||
expectedError: "invalid version constraint \"^0.100\" specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)",
|
expectedError: "invalid version constraint \"^0.100\" specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
currentVersion: "0.0.0-dev+06b169284737",
|
||||||
|
constraint: ">= 0.100.0",
|
||||||
|
expectedError: "Ignoring Databricks CLI version constraint for development build. Required: >= 0.100.0",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
|
@ -130,7 +135,7 @@ func TestVerifyCliVersion(t *testing.T) {
|
||||||
diags := bundle.Apply(context.Background(), b, VerifyCliVersion())
|
diags := bundle.Apply(context.Background(), b, VerifyCliVersion())
|
||||||
if tc.expectedError != "" {
|
if tc.expectedError != "" {
|
||||||
require.NotEmpty(t, diags)
|
require.NotEmpty(t, diags)
|
||||||
require.Equal(t, tc.expectedError, diags.Error().Error())
|
require.Contains(t, diags[0].Summary, tc.expectedError)
|
||||||
} else {
|
} else {
|
||||||
require.Empty(t, diags)
|
require.Empty(t, diags)
|
||||||
}
|
}
|
||||||
|
|
|
@ -406,6 +406,30 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
||||||
return r.updateWithDynamicValue(root)
|
return r.updateWithDynamicValue(root)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var variableKeywords = []string{"default", "lookup"}
|
||||||
|
|
||||||
|
// isFullVariableOverrideDef checks if the given value is a full syntax varaible override.
|
||||||
|
// A full syntax variable override is a map with only one of the following
|
||||||
|
// keys: "default", "lookup".
|
||||||
|
func isFullVariableOverrideDef(v dyn.Value) bool {
|
||||||
|
mv, ok := v.AsMap()
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if mv.Len() != 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, keyword := range variableKeywords {
|
||||||
|
if _, ok := mv.GetByString(keyword); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// rewriteShorthands performs lightweight rewriting of the configuration
|
// rewriteShorthands performs lightweight rewriting of the configuration
|
||||||
// tree where we allow users to write a shorthand and must rewrite to the full form.
|
// tree where we allow users to write a shorthand and must rewrite to the full form.
|
||||||
func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
||||||
|
@ -433,20 +457,27 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
||||||
}, variable.Locations()), nil
|
}, variable.Locations()), nil
|
||||||
|
|
||||||
case dyn.KindMap, dyn.KindSequence:
|
case dyn.KindMap, dyn.KindSequence:
|
||||||
// Check if the original definition of variable has a type field.
|
// If it's a full variable definition, leave it as is.
|
||||||
typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type")))
|
if isFullVariableOverrideDef(variable) {
|
||||||
if err != nil {
|
|
||||||
return variable, nil
|
return variable, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if typeV.MustString() == "complex" {
|
// Check if the original definition of variable has a type field.
|
||||||
|
// If it has a type field, it means the shorthand is a value of a complex type.
|
||||||
|
// Type might not be found if the variable overriden in a separate file
|
||||||
|
// and configuration is not merged yet.
|
||||||
|
typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type")))
|
||||||
|
if err == nil && typeV.MustString() == "complex" {
|
||||||
return dyn.NewValue(map[string]dyn.Value{
|
return dyn.NewValue(map[string]dyn.Value{
|
||||||
"type": typeV,
|
"type": typeV,
|
||||||
"default": variable,
|
"default": variable,
|
||||||
}, variable.Locations()), nil
|
}, variable.Locations()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return variable, nil
|
// If it's a shorthand, rewrite it to a full variable definition.
|
||||||
|
return dyn.NewValue(map[string]dyn.Value{
|
||||||
|
"default": variable,
|
||||||
|
}, variable.Locations()), nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return variable, nil
|
return variable, nil
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
type Sync struct {
|
type Sync struct {
|
||||||
|
// Paths contains a list of paths to synchronize relative to the bundle root path.
|
||||||
|
// If not configured, this defaults to synchronizing everything in the bundle root path (i.e. `.`).
|
||||||
|
Paths []string `json:"paths,omitempty"`
|
||||||
|
|
||||||
// Include contains a list of globs evaluated relative to the bundle root path
|
// Include contains a list of globs evaluated relative to the bundle root path
|
||||||
// to explicitly include files that were excluded by the user's gitignore.
|
// to explicitly include files that were excluded by the user's gitignore.
|
||||||
Include []string `json:"include,omitempty"`
|
Include []string `json:"include,omitempty"`
|
||||||
|
|
|
@ -3,7 +3,6 @@ package validate
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -66,10 +65,7 @@ func (m *uniqueResourceKeys) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// dyn.Path under the hood is a slice. The code that walks the configuration
|
m.paths = append(m.paths, p)
|
||||||
// tree uses the same underlying slice to track the path as it walks
|
|
||||||
// the tree. So, we need to clone it here.
|
|
||||||
m.paths = append(m.paths, slices.Clone(p))
|
|
||||||
m.locations = append(m.locations, v.Locations()...)
|
m.locations = append(m.locations, v.Locations()...)
|
||||||
|
|
||||||
resourceMetadata[k] = m
|
resourceMetadata[k] = m
|
||||||
|
|
|
@ -220,6 +220,10 @@ type resolvers struct {
|
||||||
func allResolvers() *resolvers {
|
func allResolvers() *resolvers {
|
||||||
r := &resolvers{}
|
r := &resolvers{}
|
||||||
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Alert"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Alerts.GetByDisplayName(ctx, name)
|
entity, err := w.Alerts.GetByDisplayName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -228,6 +232,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.Id), nil
|
return fmt.Sprint(entity.Id), nil
|
||||||
}
|
}
|
||||||
r.ClusterPolicy = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.ClusterPolicy = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["ClusterPolicy"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.ClusterPolicies.GetByName(ctx, name)
|
entity, err := w.ClusterPolicies.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -236,6 +244,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.PolicyId), nil
|
return fmt.Sprint(entity.PolicyId), nil
|
||||||
}
|
}
|
||||||
r.Cluster = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Cluster = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Cluster"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Clusters.GetByClusterName(ctx, name)
|
entity, err := w.Clusters.GetByClusterName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -244,6 +256,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.ClusterId), nil
|
return fmt.Sprint(entity.ClusterId), nil
|
||||||
}
|
}
|
||||||
r.Dashboard = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Dashboard = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Dashboard"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Dashboards.GetByName(ctx, name)
|
entity, err := w.Dashboards.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -252,6 +268,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.Id), nil
|
return fmt.Sprint(entity.Id), nil
|
||||||
}
|
}
|
||||||
r.InstancePool = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.InstancePool = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["InstancePool"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.InstancePools.GetByInstancePoolName(ctx, name)
|
entity, err := w.InstancePools.GetByInstancePoolName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -260,6 +280,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.InstancePoolId), nil
|
return fmt.Sprint(entity.InstancePoolId), nil
|
||||||
}
|
}
|
||||||
r.Job = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Job = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Job"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Jobs.GetBySettingsName(ctx, name)
|
entity, err := w.Jobs.GetBySettingsName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -268,6 +292,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.JobId), nil
|
return fmt.Sprint(entity.JobId), nil
|
||||||
}
|
}
|
||||||
r.Metastore = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Metastore = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Metastore"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Metastores.GetByName(ctx, name)
|
entity, err := w.Metastores.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -276,6 +304,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.MetastoreId), nil
|
return fmt.Sprint(entity.MetastoreId), nil
|
||||||
}
|
}
|
||||||
r.Pipeline = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Pipeline = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Pipeline"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Pipelines.GetByName(ctx, name)
|
entity, err := w.Pipelines.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -284,6 +316,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.PipelineId), nil
|
return fmt.Sprint(entity.PipelineId), nil
|
||||||
}
|
}
|
||||||
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Query"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Queries.GetByDisplayName(ctx, name)
|
entity, err := w.Queries.GetByDisplayName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -292,6 +328,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.Id), nil
|
return fmt.Sprint(entity.Id), nil
|
||||||
}
|
}
|
||||||
r.ServicePrincipal = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.ServicePrincipal = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["ServicePrincipal"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.ServicePrincipals.GetByDisplayName(ctx, name)
|
entity, err := w.ServicePrincipals.GetByDisplayName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -300,6 +340,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.ApplicationId), nil
|
return fmt.Sprint(entity.ApplicationId), nil
|
||||||
}
|
}
|
||||||
r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Warehouse"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Warehouses.GetByName(ctx, name)
|
entity, err := w.Warehouses.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
package variable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
)
|
||||||
|
|
||||||
|
var lookupOverrides = map[string]resolverFunc{
|
||||||
|
"Cluster": resolveCluster,
|
||||||
|
}
|
||||||
|
|
||||||
|
// We added a custom resolver for the cluster to add filtering for the cluster source when we list all clusters.
|
||||||
|
// Without the filtering listing could take a very long time (5-10 mins) which leads to lookup timeouts.
|
||||||
|
func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
result, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{
|
||||||
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp := map[string][]compute.ClusterDetails{}
|
||||||
|
for _, v := range result {
|
||||||
|
key := v.ClusterName
|
||||||
|
tmp[key] = append(tmp[key], v)
|
||||||
|
}
|
||||||
|
alternatives, ok := tmp[name]
|
||||||
|
if !ok || len(alternatives) == 0 {
|
||||||
|
return "", fmt.Errorf("cluster named '%s' does not exist", name)
|
||||||
|
}
|
||||||
|
if len(alternatives) > 1 {
|
||||||
|
return "", fmt.Errorf("there are %d instances of clusters named '%s'", len(alternatives), name)
|
||||||
|
}
|
||||||
|
return alternatives[0].ClusterId, nil
|
||||||
|
}
|
|
@ -28,8 +28,8 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := &sync.SyncOptions{
|
opts := &sync.SyncOptions{
|
||||||
LocalRoot: rb.BundleRoot(),
|
LocalRoot: rb.SyncRoot(),
|
||||||
Paths: []string{"."},
|
Paths: rb.Config().Sync.Paths,
|
||||||
Include: includes,
|
Include: includes,
|
||||||
Exclude: rb.Config().Sync.Exclude,
|
Exclude: rb.Config().Sync.Exclude,
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof(ctx, "Creating new snapshot")
|
log.Infof(ctx, "Creating new snapshot")
|
||||||
snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.BundleRoot), opts)
|
snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.SyncRoot), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,6 +64,10 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
RootPath: tmpDir,
|
||||||
BundleRoot: vfs.MustNew(tmpDir),
|
BundleRoot: vfs.MustNew(tmpDir),
|
||||||
|
|
||||||
|
SyncRootPath: tmpDir,
|
||||||
|
SyncRoot: vfs.MustNew(tmpDir),
|
||||||
|
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
|
@ -81,11 +85,11 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
for _, file := range opts.localFiles {
|
for _, file := range opts.localFiles {
|
||||||
testutil.Touch(t, b.RootPath, "bar", file)
|
testutil.Touch(t, b.SyncRootPath, "bar", file)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range opts.localNotebooks {
|
for _, file := range opts.localNotebooks {
|
||||||
testutil.TouchNotebook(t, b.RootPath, "bar", file)
|
testutil.TouchNotebook(t, b.SyncRootPath, "bar", file)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.withExistingSnapshot {
|
if opts.withExistingSnapshot {
|
||||||
|
|
|
@ -69,6 +69,11 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
||||||
// Remove output starting from Warning until end of output
|
// Remove output starting from Warning until end of output
|
||||||
output = output[:bytes.Index([]byte(output), []byte("Warning:"))]
|
output = output[:bytes.Index([]byte(output), []byte("Warning:"))]
|
||||||
cmdio.LogString(ctx, output)
|
cmdio.LogString(ctx, output)
|
||||||
|
|
||||||
|
if !cmdio.IsPromptSupported(ctx) {
|
||||||
|
return diag.Errorf("This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed.")
|
||||||
|
}
|
||||||
|
|
||||||
ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.")
|
ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
|
|
|
@ -111,6 +111,13 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error {
|
||||||
environ["PATH"] = path
|
environ["PATH"] = path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Include $AZURE_CONFIG_FILE in set of environment variables to pass along.
|
||||||
|
// This is set in Azure DevOps by the AzureCLI@2 task.
|
||||||
|
azureConfigFile, ok := env.Lookup(ctx, "AZURE_CONFIG_FILE")
|
||||||
|
if ok {
|
||||||
|
environ["AZURE_CONFIG_FILE"] = azureConfigFile
|
||||||
|
}
|
||||||
|
|
||||||
// Include $TF_CLI_CONFIG_FILE to override terraform provider in development.
|
// Include $TF_CLI_CONFIG_FILE to override terraform provider in development.
|
||||||
// See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration
|
// See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration
|
||||||
devConfigFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE")
|
devConfigFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE")
|
||||||
|
|
|
@ -269,19 +269,20 @@ func TestSetUserAgentExtraEnvVar(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInheritEnvVars(t *testing.T) {
|
func TestInheritEnvVars(t *testing.T) {
|
||||||
env := map[string]string{}
|
|
||||||
|
|
||||||
t.Setenv("HOME", "/home/testuser")
|
t.Setenv("HOME", "/home/testuser")
|
||||||
t.Setenv("PATH", "/foo:/bar")
|
t.Setenv("PATH", "/foo:/bar")
|
||||||
t.Setenv("TF_CLI_CONFIG_FILE", "/tmp/config.tfrc")
|
t.Setenv("TF_CLI_CONFIG_FILE", "/tmp/config.tfrc")
|
||||||
|
t.Setenv("AZURE_CONFIG_FILE", "/tmp/foo/bar")
|
||||||
|
|
||||||
err := inheritEnvVars(context.Background(), env)
|
ctx := context.Background()
|
||||||
|
env := map[string]string{}
|
||||||
require.NoError(t, err)
|
err := inheritEnvVars(ctx, env)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
require.Equal(t, env["HOME"], "/home/testuser")
|
assert.Equal(t, "/home/testuser", env["HOME"])
|
||||||
require.Equal(t, env["PATH"], "/foo:/bar")
|
assert.Equal(t, "/foo:/bar", env["PATH"])
|
||||||
require.Equal(t, env["TF_CLI_CONFIG_FILE"], "/tmp/config.tfrc")
|
assert.Equal(t, "/tmp/config.tfrc", env["TF_CLI_CONFIG_FILE"])
|
||||||
|
assert.Equal(t, "/tmp/foo/bar", env["AZURE_CONFIG_FILE"])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetUserProfileFromInheritEnvVars(t *testing.T) {
|
func TestSetUserProfileFromInheritEnvVars(t *testing.T) {
|
||||||
|
|
|
@ -16,12 +16,10 @@ type expand struct {
|
||||||
|
|
||||||
func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic {
|
func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic {
|
||||||
return diag.Diagnostic{
|
return diag.Diagnostic{
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: message,
|
Summary: message,
|
||||||
Paths: []dyn.Path{
|
|
||||||
p.Append(),
|
|
||||||
},
|
|
||||||
Locations: l,
|
Locations: l,
|
||||||
|
Paths: []dyn.Path{p},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,16 +1,24 @@
|
||||||
package libraries
|
package libraries
|
||||||
|
|
||||||
import "github.com/databricks/databricks-sdk-go/service/compute"
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
func libraryPath(library *compute.Library) string {
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
)
|
||||||
|
|
||||||
|
func libraryPath(library *compute.Library) (string, error) {
|
||||||
if library.Whl != "" {
|
if library.Whl != "" {
|
||||||
return library.Whl
|
return library.Whl, nil
|
||||||
}
|
}
|
||||||
if library.Jar != "" {
|
if library.Jar != "" {
|
||||||
return library.Jar
|
return library.Jar, nil
|
||||||
}
|
}
|
||||||
if library.Egg != "" {
|
if library.Egg != "" {
|
||||||
return library.Egg
|
return library.Egg, nil
|
||||||
}
|
}
|
||||||
return ""
|
if library.Requirements != "" {
|
||||||
|
return library.Requirements, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("not supported library type")
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,8 +10,27 @@ import (
|
||||||
func TestLibraryPath(t *testing.T) {
|
func TestLibraryPath(t *testing.T) {
|
||||||
path := "/some/path"
|
path := "/some/path"
|
||||||
|
|
||||||
assert.Equal(t, path, libraryPath(&compute.Library{Whl: path}))
|
p, err := libraryPath(&compute.Library{Whl: path})
|
||||||
assert.Equal(t, path, libraryPath(&compute.Library{Jar: path}))
|
assert.Equal(t, path, p)
|
||||||
assert.Equal(t, path, libraryPath(&compute.Library{Egg: path}))
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, "", libraryPath(&compute.Library{}))
|
|
||||||
|
p, err = libraryPath(&compute.Library{Jar: path})
|
||||||
|
assert.Equal(t, path, p)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
p, err = libraryPath(&compute.Library{Egg: path})
|
||||||
|
assert.Equal(t, path, p)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
p, err = libraryPath(&compute.Library{Requirements: path})
|
||||||
|
assert.Equal(t, path, p)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
p, err = libraryPath(&compute.Library{})
|
||||||
|
assert.Equal(t, "", p)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
p, err = libraryPath(&compute.Library{Pypi: &compute.PythonPyPiLibrary{Package: "pypipackage"}})
|
||||||
|
assert.Equal(t, "", p)
|
||||||
|
assert.NotNil(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,12 @@ func FindTasksWithLocalLibraries(b *bundle.Bundle) []jobs.Task {
|
||||||
|
|
||||||
func isTaskWithLocalLibraries(task jobs.Task) bool {
|
func isTaskWithLocalLibraries(task jobs.Task) bool {
|
||||||
for _, l := range task.Libraries {
|
for _, l := range task.Libraries {
|
||||||
if IsLibraryLocal(libraryPath(&l)) {
|
p, err := libraryPath(&l)
|
||||||
|
// If there's an error, skip the library because it's not of supported type
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if IsLibraryLocal(p) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package libraries
|
||||||
import (
|
import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,6 +43,10 @@ func IsLocalPath(p string) bool {
|
||||||
// We can't use IsLocalPath beacuse environment dependencies can be
|
// We can't use IsLocalPath beacuse environment dependencies can be
|
||||||
// a pypi package name which can be misinterpreted as a local path by IsLocalPath.
|
// a pypi package name which can be misinterpreted as a local path by IsLocalPath.
|
||||||
func IsLibraryLocal(dep string) bool {
|
func IsLibraryLocal(dep string) bool {
|
||||||
|
if dep == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
possiblePrefixes := []string{
|
possiblePrefixes := []string{
|
||||||
".",
|
".",
|
||||||
}
|
}
|
||||||
|
@ -65,9 +70,29 @@ func IsLibraryLocal(dep string) bool {
|
||||||
return IsLocalPath(dep)
|
return IsLocalPath(dep)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_).
|
||||||
|
// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security].
|
||||||
|
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?): Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).
|
||||||
|
// ,?: Optionally matches a comma (,) at the end of the specifier which is used to separate multiple specifiers.
|
||||||
|
// There can be multiple version specifiers separated by commas or no specifiers.
|
||||||
|
// Spec for package name and version specifier: https://pip.pypa.io/en/stable/reference/requirement-specifiers/
|
||||||
|
var packageRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_]+\s?(\[.*\])?\s?((==|!=|<=|>=|~=|==|>|<)\s?\d+(\.\d+){0,2}(\.\*)?,?)*$`)
|
||||||
|
|
||||||
func isPackage(name string) bool {
|
func isPackage(name string) bool {
|
||||||
// If the dependency has no extension, it's a PyPi package name
|
if packageRegex.MatchString(name) {
|
||||||
return path.Ext(name) == ""
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return isUrlBasedLookup(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isUrlBasedLookup(name string) bool {
|
||||||
|
parts := strings.Split(name, " @ ")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return packageRegex.MatchString(parts[0]) && isRemoteStorageScheme(parts[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
func isRemoteStorageScheme(path string) bool {
|
func isRemoteStorageScheme(path string) bool {
|
||||||
|
|
|
@ -48,12 +48,25 @@ func TestIsLibraryLocal(t *testing.T) {
|
||||||
{path: "../../local/*.whl", expected: true},
|
{path: "../../local/*.whl", expected: true},
|
||||||
{path: "..\\..\\local\\*.whl", expected: true},
|
{path: "..\\..\\local\\*.whl", expected: true},
|
||||||
{path: "file://path/to/package/whl.whl", expected: true},
|
{path: "file://path/to/package/whl.whl", expected: true},
|
||||||
|
{path: "", expected: false},
|
||||||
{path: "pypipackage", expected: false},
|
{path: "pypipackage", expected: false},
|
||||||
{path: "/Volumes/catalog/schema/volume/path.whl", expected: false},
|
{path: "/Volumes/catalog/schema/volume/path.whl", expected: false},
|
||||||
{path: "/Workspace/my_project/dist.whl", expected: false},
|
{path: "/Workspace/my_project/dist.whl", expected: false},
|
||||||
{path: "-r /Workspace/my_project/requirements.txt", expected: false},
|
{path: "-r /Workspace/my_project/requirements.txt", expected: false},
|
||||||
{path: "s3://mybucket/path/to/package", expected: false},
|
{path: "s3://mybucket/path/to/package", expected: false},
|
||||||
{path: "dbfs:/mnt/path/to/package", expected: false},
|
{path: "dbfs:/mnt/path/to/package", expected: false},
|
||||||
|
{path: "beautifulsoup4", expected: false},
|
||||||
|
{path: "beautifulsoup4==4.12.3", expected: false},
|
||||||
|
{path: "beautifulsoup4 >= 4.12.3", expected: false},
|
||||||
|
{path: "beautifulsoup4 < 4.12.3", expected: false},
|
||||||
|
{path: "beautifulsoup4 ~= 4.12.3", expected: false},
|
||||||
|
{path: "beautifulsoup4[security, tests]", expected: false},
|
||||||
|
{path: "beautifulsoup4[security, tests] ~= 4.12.3", expected: false},
|
||||||
|
{path: "beautifulsoup4>=1.0.0,<2.0.0", expected: false},
|
||||||
|
{path: "beautifulsoup4>=1.0.0,~=1.2.0,<2.0.0", expected: false},
|
||||||
|
{path: "https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||||
|
{path: "pip @ https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||||
|
{path: "requests [security] @ https://github.com/psf/requests/archive/refs/heads/main.zip", expected: false},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
|
|
|
@ -76,7 +76,7 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error
|
||||||
|
|
||||||
source = filepath.Join(b.RootPath, source)
|
source = filepath.Join(b.RootPath, source)
|
||||||
libs[source] = append(libs[source], configLocation{
|
libs[source] = append(libs[source], configLocation{
|
||||||
configPath: p.Append(), // Hack to get the copy of path
|
configPath: p,
|
||||||
location: v.Location(),
|
location: v.Location(),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -29,8 +29,8 @@ func IsWorkspacePath(path string) bool {
|
||||||
|
|
||||||
// IsWorkspaceLibrary returns true if the specified library refers to a workspace path.
|
// IsWorkspaceLibrary returns true if the specified library refers to a workspace path.
|
||||||
func IsWorkspaceLibrary(library *compute.Library) bool {
|
func IsWorkspaceLibrary(library *compute.Library) bool {
|
||||||
path := libraryPath(library)
|
path, err := libraryPath(library)
|
||||||
if path == "" {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,9 +19,38 @@ import (
|
||||||
"github.com/databricks/cli/bundle/scripts"
|
"github.com/databricks/cli/bundle/scripts"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
terraformlib "github.com/databricks/cli/libs/terraform"
|
terraformlib "github.com/databricks/cli/libs/terraform"
|
||||||
|
tfjson "github.com/hashicorp/terraform-json"
|
||||||
)
|
)
|
||||||
|
|
||||||
func approvalForUcSchemaDelete(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
func parseTerraformActions(changes []*tfjson.ResourceChange, toInclude func(typ string, actions tfjson.Actions) bool) []terraformlib.Action {
|
||||||
|
res := make([]terraformlib.Action, 0)
|
||||||
|
for _, rc := range changes {
|
||||||
|
if !toInclude(rc.Type, rc.Change.Actions) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var actionType terraformlib.ActionType
|
||||||
|
switch {
|
||||||
|
case rc.Change.Actions.Delete():
|
||||||
|
actionType = terraformlib.ActionTypeDelete
|
||||||
|
case rc.Change.Actions.Replace():
|
||||||
|
actionType = terraformlib.ActionTypeRecreate
|
||||||
|
default:
|
||||||
|
// No use case for other action types yet.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
res = append(res, terraformlib.Action{
|
||||||
|
Action: actionType,
|
||||||
|
ResourceType: rc.Type,
|
||||||
|
ResourceName: rc.Name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return false, fmt.Errorf("terraform not initialized")
|
return false, fmt.Errorf("terraform not initialized")
|
||||||
|
@ -33,41 +62,52 @@ func approvalForUcSchemaDelete(ctx context.Context, b *bundle.Bundle) (bool, err
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
actions := make([]terraformlib.Action, 0)
|
schemaActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool {
|
||||||
for _, rc := range plan.ResourceChanges {
|
// Filter in only UC schema resources.
|
||||||
// We only care about destructive actions on UC schema resources.
|
if typ != "databricks_schema" {
|
||||||
if rc.Type != "databricks_schema" {
|
return false
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var actionType terraformlib.ActionType
|
// We only display prompts for destructive actions like deleting or
|
||||||
|
// recreating a schema.
|
||||||
|
return actions.Delete() || actions.Replace()
|
||||||
|
})
|
||||||
|
|
||||||
switch {
|
dltActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool {
|
||||||
case rc.Change.Actions.Delete():
|
// Filter in only DLT pipeline resources.
|
||||||
actionType = terraformlib.ActionTypeDelete
|
if typ != "databricks_pipeline" {
|
||||||
case rc.Change.Actions.Replace():
|
return false
|
||||||
actionType = terraformlib.ActionTypeRecreate
|
|
||||||
default:
|
|
||||||
// We don't need a prompt for non-destructive actions like creating
|
|
||||||
// or updating a schema.
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
actions = append(actions, terraformlib.Action{
|
// Recreating DLT pipeline leads to metadata loss and for a transient period
|
||||||
Action: actionType,
|
// the underling tables will be unavailable.
|
||||||
ResourceType: rc.Type,
|
return actions.Replace() || actions.Delete()
|
||||||
ResourceName: rc.Name,
|
})
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// No restricted actions planned. No need for approval.
|
// We don't need to display any prompts in this case.
|
||||||
if len(actions) == 0 {
|
if len(dltActions) == 0 && len(schemaActions) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "The following UC schemas will be deleted or recreated. Any underlying data may be lost:")
|
// One or more UC schema resources will be deleted or recreated.
|
||||||
for _, action := range actions {
|
if len(schemaActions) != 0 {
|
||||||
cmdio.Log(ctx, action)
|
cmdio.LogString(ctx, "The following UC schemas will be deleted or recreated. Any underlying data may be lost:")
|
||||||
|
for _, action := range schemaActions {
|
||||||
|
cmdio.Log(ctx, action)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// One or more DLT pipelines is being recreated.
|
||||||
|
if len(dltActions) != 0 {
|
||||||
|
msg := `
|
||||||
|
This action will result in the deletion or recreation of the following DLT Pipelines along with the
|
||||||
|
Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the Pipelines will
|
||||||
|
restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline
|
||||||
|
properties such as the 'catalog' or 'storage' are changed:`
|
||||||
|
cmdio.LogString(ctx, msg)
|
||||||
|
for _, action := range dltActions {
|
||||||
|
cmdio.Log(ctx, action)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.AutoApprove {
|
if b.AutoApprove {
|
||||||
|
@ -126,7 +166,7 @@ func Deploy() bundle.Mutator {
|
||||||
terraform.CheckRunningResource(),
|
terraform.CheckRunningResource(),
|
||||||
terraform.Plan(terraform.PlanGoal("deploy")),
|
terraform.Plan(terraform.PlanGoal("deploy")),
|
||||||
bundle.If(
|
bundle.If(
|
||||||
approvalForUcSchemaDelete,
|
approvalForDeploy,
|
||||||
deployCore,
|
deployCore,
|
||||||
bundle.LogString("Deployment cancelled!"),
|
bundle.LogString("Deployment cancelled!"),
|
||||||
),
|
),
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
package phases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
terraformlib "github.com/databricks/cli/libs/terraform"
|
||||||
|
tfjson "github.com/hashicorp/terraform-json"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseTerraformActions(t *testing.T) {
|
||||||
|
changes := []*tfjson.ResourceChange{
|
||||||
|
{
|
||||||
|
Type: "databricks_pipeline",
|
||||||
|
Change: &tfjson.Change{
|
||||||
|
Actions: tfjson.Actions{tfjson.ActionCreate},
|
||||||
|
},
|
||||||
|
Name: "create pipeline",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: "databricks_pipeline",
|
||||||
|
Change: &tfjson.Change{
|
||||||
|
Actions: tfjson.Actions{tfjson.ActionDelete},
|
||||||
|
},
|
||||||
|
Name: "delete pipeline",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: "databricks_pipeline",
|
||||||
|
Change: &tfjson.Change{
|
||||||
|
Actions: tfjson.Actions{tfjson.ActionDelete, tfjson.ActionCreate},
|
||||||
|
},
|
||||||
|
Name: "recreate pipeline",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: "databricks_whatever",
|
||||||
|
Change: &tfjson.Change{
|
||||||
|
Actions: tfjson.Actions{tfjson.ActionDelete, tfjson.ActionCreate},
|
||||||
|
},
|
||||||
|
Name: "recreate whatever",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
res := parseTerraformActions(changes, func(typ string, actions tfjson.Actions) bool {
|
||||||
|
if typ != "databricks_pipeline" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if actions.Delete() || actions.Replace() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, []terraformlib.Action{
|
||||||
|
{
|
||||||
|
Action: terraformlib.ActionTypeDelete,
|
||||||
|
ResourceType: "databricks_pipeline",
|
||||||
|
ResourceName: "delete pipeline",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: terraformlib.ActionTypeRecreate,
|
||||||
|
ResourceType: "databricks_pipeline",
|
||||||
|
ResourceName: "recreate pipeline",
|
||||||
|
},
|
||||||
|
}, res)
|
||||||
|
}
|
|
@ -21,7 +21,18 @@ func Initialize() bundle.Mutator {
|
||||||
"initialize",
|
"initialize",
|
||||||
[]bundle.Mutator{
|
[]bundle.Mutator{
|
||||||
validate.AllResourcesHaveValues(),
|
validate.AllResourcesHaveValues(),
|
||||||
|
|
||||||
|
// Update all path fields in the sync block to be relative to the bundle root path.
|
||||||
mutator.RewriteSyncPaths(),
|
mutator.RewriteSyncPaths(),
|
||||||
|
|
||||||
|
// Configure the default sync path to equal the bundle root if not explicitly configured.
|
||||||
|
// By default, this means all files in the bundle root directory are synchronized.
|
||||||
|
mutator.SyncDefaultPath(),
|
||||||
|
|
||||||
|
// Figure out if the sync root path is identical or an ancestor of the bundle root path.
|
||||||
|
// If it is an ancestor, this updates all paths to be relative to the sync root path.
|
||||||
|
mutator.SyncInferRoot(),
|
||||||
|
|
||||||
mutator.MergeJobClusters(),
|
mutator.MergeJobClusters(),
|
||||||
mutator.MergeJobParameters(),
|
mutator.MergeJobParameters(),
|
||||||
mutator.MergeJobTasks(),
|
mutator.MergeJobTasks(),
|
||||||
|
|
|
@ -2,7 +2,6 @@ package python
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -18,11 +17,15 @@ func TestNoTransformByDefault(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||||
|
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "development",
|
Target: "development",
|
||||||
},
|
},
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
FilePath: "/Workspace/files",
|
||||||
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job1": {
|
"job1": {
|
||||||
|
@ -63,11 +66,15 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||||
|
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "development",
|
Target: "development",
|
||||||
},
|
},
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
FilePath: "/Workspace/files",
|
||||||
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job1": {
|
"job1": {
|
||||||
|
@ -102,14 +109,7 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) {
|
||||||
task := b.Config.Resources.Jobs["job1"].Tasks[0]
|
task := b.Config.Resources.Jobs["job1"].Tasks[0]
|
||||||
require.Nil(t, task.PythonWheelTask)
|
require.Nil(t, task.PythonWheelTask)
|
||||||
require.NotNil(t, task.NotebookTask)
|
require.NotNil(t, task.NotebookTask)
|
||||||
|
require.Equal(t, "/Workspace/files/my_bundle/.databricks/bundle/development/.internal/notebook_job1_key1", task.NotebookTask.NotebookPath)
|
||||||
dir, err := b.InternalDir(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
internalDirRel, err := filepath.Rel(b.RootPath, dir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, path.Join(filepath.ToSlash(internalDirRel), "notebook_job1_key1"), task.NotebookTask.NotebookPath)
|
|
||||||
|
|
||||||
require.Len(t, task.Libraries, 1)
|
require.Len(t, task.Libraries, 1)
|
||||||
require.Equal(t, "/Workspace/Users/test@test.com/bundle/dist/test.jar", task.Libraries[0].Jar)
|
require.Equal(t, "/Workspace/Users/test@test.com/bundle/dist/test.jar", task.Libraries[0].Jar)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package python
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -38,7 +39,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
|
||||||
tasks := libraries.FindTasksWithLocalLibraries(b)
|
tasks := libraries.FindTasksWithLocalLibraries(b)
|
||||||
for _, task := range tasks {
|
for _, task := range tasks {
|
||||||
if task.NewCluster != nil {
|
if task.NewCluster != nil {
|
||||||
if lowerThanExpectedVersion(ctx, task.NewCluster.SparkVersion) {
|
if lowerThanExpectedVersion(task.NewCluster.SparkVersion) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -47,7 +48,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
|
||||||
for _, job := range b.Config.Resources.Jobs {
|
for _, job := range b.Config.Resources.Jobs {
|
||||||
for _, cluster := range job.JobClusters {
|
for _, cluster := range job.JobClusters {
|
||||||
if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster.SparkVersion != "" {
|
if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster.SparkVersion != "" {
|
||||||
if lowerThanExpectedVersion(ctx, cluster.NewCluster.SparkVersion) {
|
if lowerThanExpectedVersion(cluster.NewCluster.SparkVersion) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -64,7 +65,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if lowerThanExpectedVersion(ctx, version) {
|
if lowerThanExpectedVersion(version) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -73,7 +74,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func lowerThanExpectedVersion(ctx context.Context, sparkVersion string) bool {
|
func lowerThanExpectedVersion(sparkVersion string) bool {
|
||||||
parts := strings.Split(sparkVersion, ".")
|
parts := strings.Split(sparkVersion, ".")
|
||||||
if len(parts) < 2 {
|
if len(parts) < 2 {
|
||||||
return false
|
return false
|
||||||
|
@ -82,6 +83,17 @@ func lowerThanExpectedVersion(ctx context.Context, sparkVersion string) bool {
|
||||||
if parts[1][0] == 'x' { // treat versions like 13.x as the very latest minor (13.99)
|
if parts[1][0] == 'x' { // treat versions like 13.x as the very latest minor (13.99)
|
||||||
parts[1] = "99"
|
parts[1] = "99"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if any of the version parts are not numbers, we can't compare
|
||||||
|
// so consider it as compatible version
|
||||||
|
if _, err := strconv.Atoi(parts[0]); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := strconv.Atoi(parts[1]); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
v := "v" + parts[0] + "." + parts[1]
|
v := "v" + parts[0] + "." + parts[1]
|
||||||
return semver.Compare(v, "v13.1") < 0
|
return semver.Compare(v, "v13.1") < 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -223,6 +223,17 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
|
||||||
{Whl: "./dist/test.whl"},
|
{Whl: "./dist/test.whl"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
TaskKey: "key7",
|
||||||
|
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||||
|
ExistingClusterId: "test-key-2",
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Whl: "signol_lib-0.4.4-20240822+prod-py3-none-any.whl"},
|
||||||
|
{Pypi: &compute.PythonPyPiLibrary{
|
||||||
|
Package: "requests==2.25.1",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -241,6 +252,46 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
|
||||||
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTasksWithPyPiPackageAreCompatible(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
JobClusters: []jobs.JobCluster{
|
||||||
|
{
|
||||||
|
JobClusterKey: "cluster1",
|
||||||
|
NewCluster: compute.ClusterSpec{
|
||||||
|
SparkVersion: "12.2.x-scala2.12",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
TaskKey: "key1",
|
||||||
|
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||||
|
ExistingClusterId: "test-key-2",
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Pypi: &compute.PythonPyPiLibrary{
|
||||||
|
Package: "requests==2.25.1",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
|
|
||||||
|
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
||||||
|
}
|
||||||
|
|
||||||
func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) {
|
func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
@ -293,6 +344,8 @@ func TestSparkVersionLowerThanExpected(t *testing.T) {
|
||||||
"14.1.x-scala2.12": false,
|
"14.1.x-scala2.12": false,
|
||||||
"13.x-snapshot-scala-2.12": false,
|
"13.x-snapshot-scala-2.12": false,
|
||||||
"13.x-rc-scala-2.12": false,
|
"13.x-rc-scala-2.12": false,
|
||||||
|
"client.1.10-scala2.12": false,
|
||||||
|
"latest-stable-gpu-scala2.11": false,
|
||||||
"10.4.x-aarch64-photon-scala2.12": true,
|
"10.4.x-aarch64-photon-scala2.12": true,
|
||||||
"10.4.x-scala2.12": true,
|
"10.4.x-scala2.12": true,
|
||||||
"13.0.x-scala2.12": true,
|
"13.0.x-scala2.12": true,
|
||||||
|
@ -300,7 +353,7 @@ func TestSparkVersionLowerThanExpected(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range testCases {
|
for k, v := range testCases {
|
||||||
result := lowerThanExpectedVersion(context.Background(), k)
|
result := lowerThanExpectedVersion(k)
|
||||||
require.Equal(t, v, result, k)
|
require.Equal(t, v, result, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,6 +85,12 @@
|
||||||
"enabled": {
|
"enabled": {
|
||||||
"description": ""
|
"description": ""
|
||||||
},
|
},
|
||||||
|
"import": {
|
||||||
|
"description": "",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
"venv_path": {
|
"venv_path": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
@ -130,6 +136,29 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"presets": {
|
||||||
|
"description": "",
|
||||||
|
"properties": {
|
||||||
|
"jobs_max_concurrent_runs": {
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
"name_prefix": {
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
"pipelines_development": {
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
"tags": {
|
||||||
|
"description": "",
|
||||||
|
"additionalproperties": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"trigger_pause_status": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"description": "Collection of Databricks resources to deploy.",
|
"description": "Collection of Databricks resources to deploy.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -3079,6 +3108,12 @@
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"paths": {
|
||||||
|
"description": "",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -3202,6 +3237,29 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"presets": {
|
||||||
|
"description": "",
|
||||||
|
"properties": {
|
||||||
|
"jobs_max_concurrent_runs": {
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
"name_prefix": {
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
"pipelines_development": {
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
"tags": {
|
||||||
|
"description": "",
|
||||||
|
"additionalproperties": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"trigger_pause_status": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"description": "Collection of Databricks resources to deploy.",
|
"description": "Collection of Databricks resources to deploy.",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -6151,6 +6209,12 @@
|
||||||
"items": {
|
"items": {
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"paths": {
|
||||||
|
"description": "",
|
||||||
|
"items": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -68,3 +68,23 @@ func TestComplexVariablesOverride(t *testing.T) {
|
||||||
require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"])
|
require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"])
|
||||||
require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId)
|
require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestComplexVariablesOverrideWithMultipleFiles(t *testing.T) {
|
||||||
|
b, diags := loadTargetWithDiags("variables/complex_multiple_files", "dev")
|
||||||
|
require.Empty(t, diags)
|
||||||
|
|
||||||
|
diags = bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
|
mutator.SetVariables(),
|
||||||
|
mutator.ResolveVariableReferencesInComplexVariables(),
|
||||||
|
mutator.ResolveVariableReferences(
|
||||||
|
"variables",
|
||||||
|
),
|
||||||
|
))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
for _, cluster := range b.Config.Resources.Jobs["my_job"].JobClusters {
|
||||||
|
require.Equalf(t, "14.2.x-scala2.11", cluster.NewCluster.SparkVersion, "cluster: %v", cluster.JobClusterKey)
|
||||||
|
require.Equalf(t, "Standard_DS3_v2", cluster.NewCluster.NodeTypeId, "cluster: %v", cluster.JobClusterKey)
|
||||||
|
require.Equalf(t, 4, cluster.NewCluster.NumWorkers, "cluster: %v", cluster.JobClusterKey)
|
||||||
|
require.Equalf(t, "false", cluster.NewCluster.SparkConf["spark.speculation"], "cluster: %v", cluster.JobClusterKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -8,6 +8,10 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
"github.com/databricks/cli/bundle/phases"
|
"github.com/databricks/cli/bundle/phases"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/databricks-sdk-go/config"
|
||||||
|
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -36,6 +40,8 @@ func loadTargetWithDiags(path, env string) (*bundle.Bundle, diag.Diagnostics) {
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||||
phases.LoadNamedTarget(env),
|
phases.LoadNamedTarget(env),
|
||||||
mutator.RewriteSyncPaths(),
|
mutator.RewriteSyncPaths(),
|
||||||
|
mutator.SyncDefaultPath(),
|
||||||
|
mutator.SyncInferRoot(),
|
||||||
mutator.MergeJobClusters(),
|
mutator.MergeJobClusters(),
|
||||||
mutator.MergeJobParameters(),
|
mutator.MergeJobParameters(),
|
||||||
mutator.MergeJobTasks(),
|
mutator.MergeJobTasks(),
|
||||||
|
@ -43,3 +49,28 @@ func loadTargetWithDiags(path, env string) (*bundle.Bundle, diag.Diagnostics) {
|
||||||
))
|
))
|
||||||
return b, diags
|
return b, diags
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func configureMock(t *testing.T, b *bundle.Bundle) {
|
||||||
|
// Configure mock workspace client
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
m.WorkspaceClient.Config = &config.Config{
|
||||||
|
Host: "https://mock.databricks.workspace.com",
|
||||||
|
}
|
||||||
|
m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{
|
||||||
|
UserName: "user@domain.com",
|
||||||
|
}, nil)
|
||||||
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
func initializeTarget(t *testing.T, path, env string) (*bundle.Bundle, diag.Diagnostics) {
|
||||||
|
b := load(t, path)
|
||||||
|
configureMock(t, b)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||||
|
mutator.SelectTarget(env),
|
||||||
|
phases.Initialize(),
|
||||||
|
))
|
||||||
|
|
||||||
|
return b, diags
|
||||||
|
}
|
||||||
|
|
|
@ -1,33 +1,13 @@
|
||||||
package config_tests
|
package config_tests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/phases"
|
|
||||||
"github.com/databricks/databricks-sdk-go/config"
|
|
||||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestExpandPipelineGlobPaths(t *testing.T) {
|
func TestExpandPipelineGlobPaths(t *testing.T) {
|
||||||
b := loadTarget(t, "./pipeline_glob_paths", "default")
|
b, diags := initializeTarget(t, "./pipeline_glob_paths", "default")
|
||||||
|
|
||||||
// Configure mock workspace client
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
m.WorkspaceClient.Config = &config.Config{
|
|
||||||
Host: "https://mock.databricks.workspace.com",
|
|
||||||
}
|
|
||||||
m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{
|
|
||||||
UserName: "user@domain.com",
|
|
||||||
}, nil)
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
diags := bundle.Apply(ctx, b, phases.Initialize())
|
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(
|
require.Equal(
|
||||||
t,
|
t,
|
||||||
|
@ -37,19 +17,6 @@ func TestExpandPipelineGlobPaths(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) {
|
func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) {
|
||||||
b := loadTarget(t, "./pipeline_glob_paths", "error")
|
_, diags := initializeTarget(t, "./pipeline_glob_paths", "error")
|
||||||
|
|
||||||
// Configure mock workspace client
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
m.WorkspaceClient.Config = &config.Config{
|
|
||||||
Host: "https://mock.databricks.workspace.com",
|
|
||||||
}
|
|
||||||
m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{
|
|
||||||
UserName: "user@domain.com",
|
|
||||||
}, nil)
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
diags := bundle.Apply(ctx, b, phases.Initialize())
|
|
||||||
require.ErrorContains(t, diags.Error(), "notebook ./non-existent not found")
|
require.ErrorContains(t, diags.Error(), "notebook ./non-existent not found")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,36 +1,14 @@
|
||||||
package config_tests
|
package config_tests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/phases"
|
|
||||||
"github.com/databricks/databricks-sdk-go/config"
|
|
||||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func configureMock(t *testing.T, b *bundle.Bundle) {
|
|
||||||
// Configure mock workspace client
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
|
||||||
m.WorkspaceClient.Config = &config.Config{
|
|
||||||
Host: "https://mock.databricks.workspace.com",
|
|
||||||
}
|
|
||||||
m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{
|
|
||||||
UserName: "user@domain.com",
|
|
||||||
}, nil)
|
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRelativePathTranslationDefault(t *testing.T) {
|
func TestRelativePathTranslationDefault(t *testing.T) {
|
||||||
b := loadTarget(t, "./relative_path_translation", "default")
|
b, diags := initializeTarget(t, "./relative_path_translation", "default")
|
||||||
configureMock(t, b)
|
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, phases.Initialize())
|
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
t0 := b.Config.Resources.Jobs["job"].Tasks[0]
|
t0 := b.Config.Resources.Jobs["job"].Tasks[0]
|
||||||
|
@ -40,10 +18,7 @@ func TestRelativePathTranslationDefault(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRelativePathTranslationOverride(t *testing.T) {
|
func TestRelativePathTranslationOverride(t *testing.T) {
|
||||||
b := loadTarget(t, "./relative_path_translation", "override")
|
b, diags := initializeTarget(t, "./relative_path_translation", "override")
|
||||||
configureMock(t, b)
|
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, phases.Initialize())
|
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
t0 := b.Config.Resources.Jobs["job"].Tasks[0]
|
t0 := b.Config.Resources.Jobs["job"].Tasks[0]
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
bundle:
|
||||||
|
name: sync_paths
|
||||||
|
|
||||||
|
workspace:
|
||||||
|
host: https://acme.cloud.databricks.com/
|
||||||
|
|
||||||
|
sync:
|
||||||
|
paths:
|
||||||
|
- src
|
||||||
|
|
||||||
|
targets:
|
||||||
|
development:
|
||||||
|
sync:
|
||||||
|
paths:
|
||||||
|
- development
|
||||||
|
|
||||||
|
staging:
|
||||||
|
sync:
|
||||||
|
paths:
|
||||||
|
- staging
|
|
@ -0,0 +1,26 @@
|
||||||
|
bundle:
|
||||||
|
name: sync_paths
|
||||||
|
|
||||||
|
workspace:
|
||||||
|
host: https://acme.cloud.databricks.com/
|
||||||
|
|
||||||
|
targets:
|
||||||
|
development:
|
||||||
|
sync:
|
||||||
|
paths:
|
||||||
|
- development
|
||||||
|
|
||||||
|
staging:
|
||||||
|
sync:
|
||||||
|
paths:
|
||||||
|
- staging
|
||||||
|
|
||||||
|
undefined: ~
|
||||||
|
|
||||||
|
nil:
|
||||||
|
sync:
|
||||||
|
paths: ~
|
||||||
|
|
||||||
|
empty:
|
||||||
|
sync:
|
||||||
|
paths: []
|
|
@ -0,0 +1,10 @@
|
||||||
|
bundle:
|
||||||
|
name: shared_code
|
||||||
|
|
||||||
|
workspace:
|
||||||
|
host: https://acme.cloud.databricks.com/
|
||||||
|
|
||||||
|
sync:
|
||||||
|
paths:
|
||||||
|
- "../common"
|
||||||
|
- "."
|
|
@ -0,0 +1 @@
|
||||||
|
Placeholder for files to be deployed as part of multiple bundles.
|
|
@ -12,14 +12,20 @@ func TestSyncOverride(t *testing.T) {
|
||||||
var b *bundle.Bundle
|
var b *bundle.Bundle
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/override", "development")
|
b = loadTarget(t, "./sync/override", "development")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/override"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("tests/*")}, b.Config.Sync.Include)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("tests/*")}, b.Config.Sync.Include)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude)
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/override", "staging")
|
b = loadTarget(t, "./sync/override", "staging")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/override"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include)
|
||||||
assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude)
|
assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude)
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/override", "prod")
|
b = loadTarget(t, "./sync/override", "prod")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/override"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("src/*")}, b.Config.Sync.Include)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("src/*")}, b.Config.Sync.Include)
|
||||||
assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude)
|
assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude)
|
||||||
}
|
}
|
||||||
|
@ -28,14 +34,20 @@ func TestSyncOverrideNoRootSync(t *testing.T) {
|
||||||
var b *bundle.Bundle
|
var b *bundle.Bundle
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/override_no_root", "development")
|
b = loadTarget(t, "./sync/override_no_root", "development")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/override_no_root"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude)
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/override_no_root", "staging")
|
b = loadTarget(t, "./sync/override_no_root", "staging")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/override_no_root"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include)
|
||||||
assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude)
|
assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude)
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/override_no_root", "prod")
|
b = loadTarget(t, "./sync/override_no_root", "prod")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/override_no_root"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.ElementsMatch(t, []string{}, b.Config.Sync.Include)
|
assert.ElementsMatch(t, []string{}, b.Config.Sync.Include)
|
||||||
assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude)
|
assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude)
|
||||||
}
|
}
|
||||||
|
@ -44,10 +56,14 @@ func TestSyncNil(t *testing.T) {
|
||||||
var b *bundle.Bundle
|
var b *bundle.Bundle
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/nil", "development")
|
b = loadTarget(t, "./sync/nil", "development")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/nil"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.Nil(t, b.Config.Sync.Include)
|
assert.Nil(t, b.Config.Sync.Include)
|
||||||
assert.Nil(t, b.Config.Sync.Exclude)
|
assert.Nil(t, b.Config.Sync.Exclude)
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/nil", "staging")
|
b = loadTarget(t, "./sync/nil", "staging")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/nil"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude)
|
||||||
}
|
}
|
||||||
|
@ -56,10 +72,59 @@ func TestSyncNilRoot(t *testing.T) {
|
||||||
var b *bundle.Bundle
|
var b *bundle.Bundle
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/nil_root", "development")
|
b = loadTarget(t, "./sync/nil_root", "development")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/nil_root"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.Nil(t, b.Config.Sync.Include)
|
assert.Nil(t, b.Config.Sync.Include)
|
||||||
assert.Nil(t, b.Config.Sync.Exclude)
|
assert.Nil(t, b.Config.Sync.Exclude)
|
||||||
|
|
||||||
b = loadTarget(t, "./sync/nil_root", "staging")
|
b = loadTarget(t, "./sync/nil_root", "staging")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/nil_root"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include)
|
||||||
assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude)
|
assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSyncPaths(t *testing.T) {
|
||||||
|
var b *bundle.Bundle
|
||||||
|
|
||||||
|
b = loadTarget(t, "./sync/paths", "development")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/paths"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"src", "development"}, b.Config.Sync.Paths)
|
||||||
|
|
||||||
|
b = loadTarget(t, "./sync/paths", "staging")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/paths"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"src", "staging"}, b.Config.Sync.Paths)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncPathsNoRoot(t *testing.T) {
|
||||||
|
var b *bundle.Bundle
|
||||||
|
|
||||||
|
b = loadTarget(t, "./sync/paths_no_root", "development")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath)
|
||||||
|
assert.ElementsMatch(t, []string{"development"}, b.Config.Sync.Paths)
|
||||||
|
|
||||||
|
b = loadTarget(t, "./sync/paths_no_root", "staging")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath)
|
||||||
|
assert.ElementsMatch(t, []string{"staging"}, b.Config.Sync.Paths)
|
||||||
|
|
||||||
|
// If not set at all, it defaults to "."
|
||||||
|
b = loadTarget(t, "./sync/paths_no_root", "undefined")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath)
|
||||||
|
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||||
|
|
||||||
|
// If set to nil, it won't sync anything.
|
||||||
|
b = loadTarget(t, "./sync/paths_no_root", "nil")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath)
|
||||||
|
assert.Len(t, b.Config.Sync.Paths, 0)
|
||||||
|
|
||||||
|
// If set to an empty sequence, it won't sync anything.
|
||||||
|
b = loadTarget(t, "./sync/paths_no_root", "empty")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath)
|
||||||
|
assert.Len(t, b.Config.Sync.Paths, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncSharedCode(t *testing.T) {
|
||||||
|
b := loadTarget(t, "./sync/shared_code/bundle", "default")
|
||||||
|
assert.Equal(t, filepath.FromSlash("sync/shared_code"), b.SyncRootPath)
|
||||||
|
assert.ElementsMatch(t, []string{"common", "bundle"}, b.Config.Sync.Paths)
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
bundle:
|
||||||
|
name: complex-variables-multiple-files
|
||||||
|
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
my_job:
|
||||||
|
job_clusters:
|
||||||
|
- job_cluster_key: key1
|
||||||
|
new_cluster: ${var.cluster1}
|
||||||
|
- job_cluster_key: key2
|
||||||
|
new_cluster: ${var.cluster2}
|
||||||
|
- job_cluster_key: key3
|
||||||
|
new_cluster: ${var.cluster3}
|
||||||
|
- job_cluster_key: key4
|
||||||
|
new_cluster: ${var.cluster4}
|
||||||
|
variables:
|
||||||
|
cluster1:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
cluster2:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
cluster3:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
cluster4:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
|
||||||
|
include:
|
||||||
|
- ./variables/*.yml
|
||||||
|
|
||||||
|
|
||||||
|
targets:
|
||||||
|
default:
|
||||||
|
dev:
|
||||||
|
variables:
|
||||||
|
cluster3:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
||||||
|
cluster4:
|
||||||
|
default:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
|
@ -0,0 +1,19 @@
|
||||||
|
targets:
|
||||||
|
default:
|
||||||
|
dev:
|
||||||
|
variables:
|
||||||
|
cluster1:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
||||||
|
cluster2:
|
||||||
|
default:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
|
@ -124,8 +124,13 @@ func TestVariablesWithTargetLookupOverrides(t *testing.T) {
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
clustersApi := mockWorkspaceClient.GetMockClustersAPI()
|
clustersApi := mockWorkspaceClient.GetMockClustersAPI()
|
||||||
clustersApi.EXPECT().GetByClusterName(mock.Anything, "some-test-cluster").Return(&compute.ClusterDetails{
|
clustersApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
ClusterId: "4321",
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "4321", ClusterName: "some-test-cluster"},
|
||||||
|
{ClusterId: "9876", ClusterName: "some-other-cluster"},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI()
|
clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI()
|
||||||
|
|
|
@ -19,7 +19,7 @@ import (
|
||||||
|
|
||||||
func promptForProfile(ctx context.Context, defaultValue string) (string, error) {
|
func promptForProfile(ctx context.Context, defaultValue string) (string, error) {
|
||||||
if !cmdio.IsInTTY(ctx) {
|
if !cmdio.IsInTTY(ctx) {
|
||||||
return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify a profile using --profile")
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
prompt := cmdio.Prompt(ctx)
|
prompt := cmdio.Prompt(ctx)
|
||||||
|
|
|
@ -29,6 +29,12 @@ func (f *progressLoggerFlag) resolveModeDefault(format flags.ProgressLogFormat)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Context, error) {
|
func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Context, error) {
|
||||||
|
// No need to initialize the logger if it's already set in the context. This
|
||||||
|
// happens in unit tests where the logger is setup as a fixture.
|
||||||
|
if _, ok := cmdio.FromContext(ctx); ok {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
|
||||||
if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" &&
|
if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" &&
|
||||||
f.ProgressLogFormat == flags.ModeInplace {
|
f.ProgressLogFormat == flags.ModeInplace {
|
||||||
return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr")
|
return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr")
|
||||||
|
|
|
@ -17,8 +17,10 @@ import (
|
||||||
func TestSyncOptionsFromBundle(t *testing.T) {
|
func TestSyncOptionsFromBundle(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tempDir,
|
RootPath: tempDir,
|
||||||
BundleRoot: vfs.MustNew(tempDir),
|
BundleRoot: vfs.MustNew(tempDir),
|
||||||
|
SyncRootPath: tempDir,
|
||||||
|
SyncRoot: vfs.MustNew(tempDir),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
|
|
|
@ -1,17 +1,83 @@
|
||||||
package clusters
|
package clusters
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
func listOverride(listCmd *cobra.Command, _ *compute.ListClustersRequest) {
|
// Below we add overrides for filter flags for cluster list command to allow for custom filtering
|
||||||
|
// Auto generating such flags is not yet supported by the CLI generator
|
||||||
|
func listOverride(listCmd *cobra.Command, listReq *compute.ListClustersRequest) {
|
||||||
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
|
listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(`
|
||||||
{{header "ID"}} {{header "Name"}} {{header "State"}}`)
|
{{header "ID"}} {{header "Name"}} {{header "State"}}`)
|
||||||
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
listCmd.Annotations["template"] = cmdio.Heredoc(`
|
||||||
{{range .}}{{.ClusterId | green}} {{.ClusterName | cyan}} {{if eq .State "RUNNING"}}{{green "%s" .State}}{{else if eq .State "TERMINATED"}}{{red "%s" .State}}{{else}}{{blue "%s" .State}}{{end}}
|
{{range .}}{{.ClusterId | green}} {{.ClusterName | cyan}} {{if eq .State "RUNNING"}}{{green "%s" .State}}{{else if eq .State "TERMINATED"}}{{red "%s" .State}}{{else}}{{blue "%s" .State}}{{end}}
|
||||||
{{end}}`)
|
{{end}}`)
|
||||||
|
|
||||||
|
listReq.FilterBy = &compute.ListClustersFilterBy{}
|
||||||
|
listCmd.Flags().BoolVar(&listReq.FilterBy.IsPinned, "is-pinned", false, "Filter clusters by pinned status")
|
||||||
|
listCmd.Flags().StringVar(&listReq.FilterBy.PolicyId, "policy-id", "", "Filter clusters by policy id")
|
||||||
|
|
||||||
|
sources := &clusterSources{source: &listReq.FilterBy.ClusterSources}
|
||||||
|
listCmd.Flags().Var(sources, "cluster-sources", "Filter clusters by source")
|
||||||
|
|
||||||
|
states := &clusterStates{state: &listReq.FilterBy.ClusterStates}
|
||||||
|
listCmd.Flags().Var(states, "cluster-states", "Filter clusters by states")
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterSources struct {
|
||||||
|
source *[]compute.ClusterSource
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clusterSources) String() string {
|
||||||
|
s := make([]string, len(*c.source))
|
||||||
|
for i, source := range *c.source {
|
||||||
|
s[i] = string(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(s, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clusterSources) Set(value string) error {
|
||||||
|
splits := strings.Split(value, ",")
|
||||||
|
for _, split := range splits {
|
||||||
|
*c.source = append(*c.source, compute.ClusterSource(split))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clusterSources) Type() string {
|
||||||
|
return "[]string"
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterStates struct {
|
||||||
|
state *[]compute.State
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clusterStates) String() string {
|
||||||
|
s := make([]string, len(*c.state))
|
||||||
|
for i, source := range *c.state {
|
||||||
|
s[i] = string(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(s, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clusterStates) Set(value string) error {
|
||||||
|
splits := strings.Split(value, ",")
|
||||||
|
for _, split := range splits {
|
||||||
|
*c.state = append(*c.state, compute.State(split))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clusterStates) Type() string {
|
||||||
|
return "[]string"
|
||||||
}
|
}
|
||||||
|
|
||||||
func listNodeTypesOverride(listNodeTypesCmd *cobra.Command) {
|
func listNodeTypesOverride(listNodeTypesCmd *cobra.Command) {
|
||||||
|
|
|
@ -44,6 +44,8 @@ import (
|
||||||
permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration"
|
permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration"
|
||||||
permissions "github.com/databricks/cli/cmd/workspace/permissions"
|
permissions "github.com/databricks/cli/cmd/workspace/permissions"
|
||||||
pipelines "github.com/databricks/cli/cmd/workspace/pipelines"
|
pipelines "github.com/databricks/cli/cmd/workspace/pipelines"
|
||||||
|
policy_compliance_for_clusters "github.com/databricks/cli/cmd/workspace/policy-compliance-for-clusters"
|
||||||
|
policy_compliance_for_jobs "github.com/databricks/cli/cmd/workspace/policy-compliance-for-jobs"
|
||||||
policy_families "github.com/databricks/cli/cmd/workspace/policy-families"
|
policy_families "github.com/databricks/cli/cmd/workspace/policy-families"
|
||||||
provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters"
|
provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters"
|
||||||
provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges"
|
provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges"
|
||||||
|
@ -63,6 +65,7 @@ import (
|
||||||
recipients "github.com/databricks/cli/cmd/workspace/recipients"
|
recipients "github.com/databricks/cli/cmd/workspace/recipients"
|
||||||
registered_models "github.com/databricks/cli/cmd/workspace/registered-models"
|
registered_models "github.com/databricks/cli/cmd/workspace/registered-models"
|
||||||
repos "github.com/databricks/cli/cmd/workspace/repos"
|
repos "github.com/databricks/cli/cmd/workspace/repos"
|
||||||
|
resource_quotas "github.com/databricks/cli/cmd/workspace/resource-quotas"
|
||||||
schemas "github.com/databricks/cli/cmd/workspace/schemas"
|
schemas "github.com/databricks/cli/cmd/workspace/schemas"
|
||||||
secrets "github.com/databricks/cli/cmd/workspace/secrets"
|
secrets "github.com/databricks/cli/cmd/workspace/secrets"
|
||||||
service_principals "github.com/databricks/cli/cmd/workspace/service-principals"
|
service_principals "github.com/databricks/cli/cmd/workspace/service-principals"
|
||||||
|
@ -130,6 +133,8 @@ func All() []*cobra.Command {
|
||||||
out = append(out, permission_migration.New())
|
out = append(out, permission_migration.New())
|
||||||
out = append(out, permissions.New())
|
out = append(out, permissions.New())
|
||||||
out = append(out, pipelines.New())
|
out = append(out, pipelines.New())
|
||||||
|
out = append(out, policy_compliance_for_clusters.New())
|
||||||
|
out = append(out, policy_compliance_for_jobs.New())
|
||||||
out = append(out, policy_families.New())
|
out = append(out, policy_families.New())
|
||||||
out = append(out, provider_exchange_filters.New())
|
out = append(out, provider_exchange_filters.New())
|
||||||
out = append(out, provider_exchanges.New())
|
out = append(out, provider_exchanges.New())
|
||||||
|
@ -149,6 +154,7 @@ func All() []*cobra.Command {
|
||||||
out = append(out, recipients.New())
|
out = append(out, recipients.New())
|
||||||
out = append(out, registered_models.New())
|
out = append(out, registered_models.New())
|
||||||
out = append(out, repos.New())
|
out = append(out, repos.New())
|
||||||
|
out = append(out, resource_quotas.New())
|
||||||
out = append(out, schemas.New())
|
out = append(out, schemas.New())
|
||||||
out = append(out, secrets.New())
|
out = append(out, secrets.New())
|
||||||
out = append(out, service_principals.New())
|
out = append(out, service_principals.New())
|
||||||
|
|
|
@ -75,6 +75,7 @@ func newCreate() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&createReq.AccessPoint, "access-point", createReq.AccessPoint, `The AWS access point to use when accesing s3 for this external location.`)
|
cmd.Flags().StringVar(&createReq.AccessPoint, "access-point", createReq.AccessPoint, `The AWS access point to use when accesing s3 for this external location.`)
|
||||||
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`)
|
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`)
|
||||||
// TODO: complex arg: encryption_details
|
// TODO: complex arg: encryption_details
|
||||||
|
cmd.Flags().BoolVar(&createReq.Fallback, "fallback", createReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`)
|
||||||
cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Indicates whether the external location is read-only.`)
|
cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Indicates whether the external location is read-only.`)
|
||||||
cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`)
|
cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`)
|
||||||
|
|
||||||
|
@ -347,6 +348,7 @@ func newUpdate() *cobra.Command {
|
||||||
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
|
||||||
cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`)
|
cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`)
|
||||||
// TODO: complex arg: encryption_details
|
// TODO: complex arg: encryption_details
|
||||||
|
cmd.Flags().BoolVar(&updateReq.Fallback, "fallback", updateReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`)
|
||||||
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`)
|
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`)
|
||||||
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
|
||||||
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`)
|
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`)
|
||||||
|
|
260
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go
generated
Executable file
260
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go
generated
Executable file
|
@ -0,0 +1,260 @@
|
||||||
|
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package policy_compliance_for_clusters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/cmd/root"
|
||||||
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/flags"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var cmdOverrides []func(*cobra.Command)
|
||||||
|
|
||||||
|
func New() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "policy-compliance-for-clusters",
|
||||||
|
Short: `The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace.`,
|
||||||
|
Long: `The policy compliance APIs allow you to view and manage the policy compliance
|
||||||
|
status of clusters in your workspace.
|
||||||
|
|
||||||
|
A cluster is compliant with its policy if its configuration satisfies all its
|
||||||
|
policy rules. Clusters could be out of compliance if their policy was updated
|
||||||
|
after the cluster was last edited.
|
||||||
|
|
||||||
|
The get and list compliance APIs allow you to view the policy compliance
|
||||||
|
status of a cluster. The enforce compliance API allows you to update a cluster
|
||||||
|
to be compliant with the current version of its policy.`,
|
||||||
|
GroupID: "compute",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "compute",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add methods
|
||||||
|
cmd.AddCommand(newEnforceCompliance())
|
||||||
|
cmd.AddCommand(newGetCompliance())
|
||||||
|
cmd.AddCommand(newListCompliance())
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range cmdOverrides {
|
||||||
|
fn(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start enforce-compliance command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var enforceComplianceOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*compute.EnforceClusterComplianceRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newEnforceCompliance() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var enforceComplianceReq compute.EnforceClusterComplianceRequest
|
||||||
|
var enforceComplianceJson flags.JsonFlag
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews the changes that would be made to a cluster to enforce compliance but does not update the cluster.`)
|
||||||
|
|
||||||
|
cmd.Use = "enforce-compliance CLUSTER_ID"
|
||||||
|
cmd.Short = `Enforce cluster policy compliance.`
|
||||||
|
cmd.Long = `Enforce cluster policy compliance.
|
||||||
|
|
||||||
|
Updates a cluster to be compliant with the current version of its policy. A
|
||||||
|
cluster can be updated if it is in a RUNNING or TERMINATED state.
|
||||||
|
|
||||||
|
If a cluster is updated while in a RUNNING state, it will be restarted so
|
||||||
|
that the new attributes can take effect.
|
||||||
|
|
||||||
|
If a cluster is updated while in a TERMINATED state, it will remain
|
||||||
|
TERMINATED. The next time the cluster is started, the new attributes will
|
||||||
|
take effect.
|
||||||
|
|
||||||
|
Clusters created by the Databricks Jobs, DLT, or Models services cannot be
|
||||||
|
enforced by this API. Instead, use the "Enforce job policy compliance" API to
|
||||||
|
enforce policy compliance on jobs.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
CLUSTER_ID: The ID of the cluster you want to enforce policy compliance on.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err := root.ExactArgs(0)(cmd, args)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = enforceComplianceJson.Unmarshal(&enforceComplianceReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !cmd.Flags().Changed("json") {
|
||||||
|
enforceComplianceReq.ClusterId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := w.PolicyComplianceForClusters.EnforceCompliance(ctx, enforceComplianceReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range enforceComplianceOverrides {
|
||||||
|
fn(cmd, &enforceComplianceReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start get-compliance command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var getComplianceOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*compute.GetClusterComplianceRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newGetCompliance() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var getComplianceReq compute.GetClusterComplianceRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Use = "get-compliance CLUSTER_ID"
|
||||||
|
cmd.Short = `Get cluster policy compliance.`
|
||||||
|
cmd.Long = `Get cluster policy compliance.
|
||||||
|
|
||||||
|
Returns the policy compliance status of a cluster. Clusters could be out of
|
||||||
|
compliance if their policy was updated after the cluster was last edited.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
CLUSTER_ID: The ID of the cluster to get the compliance status`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
getComplianceReq.ClusterId = args[0]
|
||||||
|
|
||||||
|
response, err := w.PolicyComplianceForClusters.GetCompliance(ctx, getComplianceReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range getComplianceOverrides {
|
||||||
|
fn(cmd, &getComplianceReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start list-compliance command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var listComplianceOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*compute.ListClusterCompliancesRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newListCompliance() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var listComplianceReq compute.ListClusterCompliancesRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`)
|
||||||
|
cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`)
|
||||||
|
|
||||||
|
cmd.Use = "list-compliance POLICY_ID"
|
||||||
|
cmd.Short = `List cluster policy compliance.`
|
||||||
|
cmd.Long = `List cluster policy compliance.
|
||||||
|
|
||||||
|
Returns the policy compliance status of all clusters that use a given policy.
|
||||||
|
Clusters could be out of compliance if their policy was updated after the
|
||||||
|
cluster was last edited.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
POLICY_ID: Canonical unique identifier for the cluster policy.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
listComplianceReq.PolicyId = args[0]
|
||||||
|
|
||||||
|
response := w.PolicyComplianceForClusters.ListCompliance(ctx, listComplianceReq)
|
||||||
|
return cmdio.RenderIterator(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range listComplianceOverrides {
|
||||||
|
fn(cmd, &listComplianceReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// end service PolicyComplianceForClusters
|
262
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go
generated
Executable file
262
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go
generated
Executable file
|
@ -0,0 +1,262 @@
|
||||||
|
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package policy_compliance_for_jobs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/cmd/root"
|
||||||
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/flags"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var cmdOverrides []func(*cobra.Command)
|
||||||
|
|
||||||
|
func New() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "policy-compliance-for-jobs",
|
||||||
|
Short: `The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace.`,
|
||||||
|
Long: `The compliance APIs allow you to view and manage the policy compliance status
|
||||||
|
of jobs in your workspace. This API currently only supports compliance
|
||||||
|
controls for cluster policies.
|
||||||
|
|
||||||
|
A job is in compliance if its cluster configurations satisfy the rules of all
|
||||||
|
their respective cluster policies. A job could be out of compliance if a
|
||||||
|
cluster policy it uses was updated after the job was last edited. The job is
|
||||||
|
considered out of compliance if any of its clusters no longer comply with
|
||||||
|
their updated policies.
|
||||||
|
|
||||||
|
The get and list compliance APIs allow you to view the policy compliance
|
||||||
|
status of a job. The enforce compliance API allows you to update a job so that
|
||||||
|
it becomes compliant with all of its policies.`,
|
||||||
|
GroupID: "jobs",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "jobs",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add methods
|
||||||
|
cmd.AddCommand(newEnforceCompliance())
|
||||||
|
cmd.AddCommand(newGetCompliance())
|
||||||
|
cmd.AddCommand(newListCompliance())
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range cmdOverrides {
|
||||||
|
fn(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start enforce-compliance command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var enforceComplianceOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*jobs.EnforcePolicyComplianceRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newEnforceCompliance() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var enforceComplianceReq jobs.EnforcePolicyComplianceRequest
|
||||||
|
var enforceComplianceJson flags.JsonFlag
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews changes made to the job to comply with its policy, but does not update the job.`)
|
||||||
|
|
||||||
|
cmd.Use = "enforce-compliance JOB_ID"
|
||||||
|
cmd.Short = `Enforce job policy compliance.`
|
||||||
|
cmd.Long = `Enforce job policy compliance.
|
||||||
|
|
||||||
|
Updates a job so the job clusters that are created when running the job
|
||||||
|
(specified in new_cluster) are compliant with the current versions of their
|
||||||
|
respective cluster policies. All-purpose clusters used in the job will not be
|
||||||
|
updated.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
JOB_ID: The ID of the job you want to enforce policy compliance on.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err := root.ExactArgs(0)(cmd, args)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = enforceComplianceJson.Unmarshal(&enforceComplianceReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !cmd.Flags().Changed("json") {
|
||||||
|
_, err = fmt.Sscan(args[0], &enforceComplianceReq.JobId)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid JOB_ID: %s", args[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := w.PolicyComplianceForJobs.EnforceCompliance(ctx, enforceComplianceReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range enforceComplianceOverrides {
|
||||||
|
fn(cmd, &enforceComplianceReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start get-compliance command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var getComplianceOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*jobs.GetPolicyComplianceRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newGetCompliance() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var getComplianceReq jobs.GetPolicyComplianceRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Use = "get-compliance JOB_ID"
|
||||||
|
cmd.Short = `Get job policy compliance.`
|
||||||
|
cmd.Long = `Get job policy compliance.
|
||||||
|
|
||||||
|
Returns the policy compliance status of a job. Jobs could be out of compliance
|
||||||
|
if a cluster policy they use was updated after the job was last edited and
|
||||||
|
some of its job clusters no longer comply with their updated policies.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
JOB_ID: The ID of the job whose compliance status you are requesting.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
_, err = fmt.Sscan(args[0], &getComplianceReq.JobId)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid JOB_ID: %s", args[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := w.PolicyComplianceForJobs.GetCompliance(ctx, getComplianceReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range getComplianceOverrides {
|
||||||
|
fn(cmd, &getComplianceReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start list-compliance command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var listComplianceOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*jobs.ListJobComplianceRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newListCompliance() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var listComplianceReq jobs.ListJobComplianceRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`)
|
||||||
|
cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`)
|
||||||
|
|
||||||
|
cmd.Use = "list-compliance POLICY_ID"
|
||||||
|
cmd.Short = `List job policy compliance.`
|
||||||
|
cmd.Long = `List job policy compliance.
|
||||||
|
|
||||||
|
Returns the policy compliance status of all jobs that use a given policy. Jobs
|
||||||
|
could be out of compliance if a cluster policy they use was updated after the
|
||||||
|
job was last edited and its job clusters no longer comply with the updated
|
||||||
|
policy.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
POLICY_ID: Canonical unique identifier for the cluster policy.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
listComplianceReq.PolicyId = args[0]
|
||||||
|
|
||||||
|
response := w.PolicyComplianceForJobs.ListCompliance(ctx, listComplianceReq)
|
||||||
|
return cmdio.RenderIterator(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range listComplianceOverrides {
|
||||||
|
fn(cmd, &listComplianceReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// end service PolicyComplianceForJobs
|
|
@ -16,9 +16,9 @@ var cmdOverrides []func(*cobra.Command)
|
||||||
func New() *cobra.Command {
|
func New() *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "query-history",
|
Use: "query-history",
|
||||||
Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints, serverless compute, and DLT.`,
|
Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.`,
|
||||||
Long: `A service responsible for storing and retrieving the list of queries run
|
Long: `A service responsible for storing and retrieving the list of queries run
|
||||||
against SQL endpoints, serverless compute, and DLT.`,
|
against SQL endpoints and serverless compute.`,
|
||||||
GroupID: "sql",
|
GroupID: "sql",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"package": "sql",
|
"package": "sql",
|
||||||
|
@ -53,6 +53,7 @@ func newList() *cobra.Command {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
|
||||||
// TODO: complex arg: filter_by
|
// TODO: complex arg: filter_by
|
||||||
|
cmd.Flags().BoolVar(&listReq.IncludeMetrics, "include-metrics", listReq.IncludeMetrics, `Whether to include the query metrics with each query.`)
|
||||||
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Limit the number of results returned in one page.`)
|
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Limit the number of results returned in one page.`)
|
||||||
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`)
|
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`)
|
||||||
|
|
||||||
|
@ -60,8 +61,7 @@ func newList() *cobra.Command {
|
||||||
cmd.Short = `List Queries.`
|
cmd.Short = `List Queries.`
|
||||||
cmd.Long = `List Queries.
|
cmd.Long = `List Queries.
|
||||||
|
|
||||||
List the history of queries through SQL warehouses, serverless compute, and
|
List the history of queries through SQL warehouses, and serverless compute.
|
||||||
DLT.
|
|
||||||
|
|
||||||
You can filter by user ID, warehouse ID, status, and time range. Most recently
|
You can filter by user ID, warehouse ID, status, and time range. Most recently
|
||||||
started queries are returned first (up to max_results in request). The
|
started queries are returned first (up to max_results in request). The
|
||||||
|
|
|
@ -0,0 +1,168 @@
|
||||||
|
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package resource_quotas
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/databricks/cli/cmd/root"
|
||||||
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var cmdOverrides []func(*cobra.Command)
|
||||||
|
|
||||||
|
func New() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "resource-quotas",
|
||||||
|
Short: `Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created.`,
|
||||||
|
Long: `Unity Catalog enforces resource quotas on all securable objects, which limits
|
||||||
|
the number of resources that can be created. Quotas are expressed in terms of
|
||||||
|
a resource type and a parent (for example, tables per metastore or schemas per
|
||||||
|
catalog). The resource quota APIs enable you to monitor your current usage and
|
||||||
|
limits. For more information on resource quotas see the [Unity Catalog
|
||||||
|
documentation].
|
||||||
|
|
||||||
|
[Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas`,
|
||||||
|
GroupID: "catalog",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "catalog",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add methods
|
||||||
|
cmd.AddCommand(newGetQuota())
|
||||||
|
cmd.AddCommand(newListQuotas())
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range cmdOverrides {
|
||||||
|
fn(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start get-quota command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var getQuotaOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*catalog.GetQuotaRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newGetQuota() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var getQuotaReq catalog.GetQuotaRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Use = "get-quota PARENT_SECURABLE_TYPE PARENT_FULL_NAME QUOTA_NAME"
|
||||||
|
cmd.Short = `Get information for a single resource quota.`
|
||||||
|
cmd.Long = `Get information for a single resource quota.
|
||||||
|
|
||||||
|
The GetQuota API returns usage information for a single resource quota,
|
||||||
|
defined as a child-parent pair. This API also refreshes the quota count if it
|
||||||
|
is out of date. Refreshes are triggered asynchronously. The updated count
|
||||||
|
might not be returned in the first call.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
PARENT_SECURABLE_TYPE: Securable type of the quota parent.
|
||||||
|
PARENT_FULL_NAME: Full name of the parent resource. Provide the metastore ID if the parent
|
||||||
|
is a metastore.
|
||||||
|
QUOTA_NAME: Name of the quota. Follows the pattern of the quota type, with "-quota"
|
||||||
|
added as a suffix.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(3)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
getQuotaReq.ParentSecurableType = args[0]
|
||||||
|
getQuotaReq.ParentFullName = args[1]
|
||||||
|
getQuotaReq.QuotaName = args[2]
|
||||||
|
|
||||||
|
response, err := w.ResourceQuotas.GetQuota(ctx, getQuotaReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range getQuotaOverrides {
|
||||||
|
fn(cmd, &getQuotaReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start list-quotas command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var listQuotasOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*catalog.ListQuotasRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newListQuotas() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var listQuotasReq catalog.ListQuotasRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Flags().IntVar(&listQuotasReq.MaxResults, "max-results", listQuotasReq.MaxResults, `The number of quotas to return.`)
|
||||||
|
cmd.Flags().StringVar(&listQuotasReq.PageToken, "page-token", listQuotasReq.PageToken, `Opaque token for the next page of results.`)
|
||||||
|
|
||||||
|
cmd.Use = "list-quotas"
|
||||||
|
cmd.Short = `List all resource quotas under a metastore.`
|
||||||
|
cmd.Long = `List all resource quotas under a metastore.
|
||||||
|
|
||||||
|
ListQuotas returns all quota values under the metastore. There are no SLAs on
|
||||||
|
the freshness of the counts returned. This API does not trigger a refresh of
|
||||||
|
quota counts.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(0)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
|
response := w.ResourceQuotas.ListQuotas(ctx, listQuotasReq)
|
||||||
|
return cmdio.RenderIterator(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range listQuotasOverrides {
|
||||||
|
fn(cmd, &listQuotasReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// end service ResourceQuotas
|
|
@ -241,28 +241,16 @@ func newGet() *cobra.Command {
|
||||||
|
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(1)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
cmd.PreRunE = root.MustWorkspaceClient
|
cmd.PreRunE = root.MustWorkspaceClient
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
w := root.WorkspaceClient(ctx)
|
w := root.WorkspaceClient(ctx)
|
||||||
|
|
||||||
if len(args) == 0 {
|
|
||||||
promptSpinner := cmdio.Spinner(ctx)
|
|
||||||
promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down."
|
|
||||||
names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx, catalog.ListStorageCredentialsRequest{})
|
|
||||||
close(promptSpinner)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
|
||||||
}
|
|
||||||
id, err := cmdio.Select(ctx, names, "Name of the storage credential")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
args = append(args, id)
|
|
||||||
}
|
|
||||||
if len(args) != 1 {
|
|
||||||
return fmt.Errorf("expected to have name of the storage credential")
|
|
||||||
}
|
|
||||||
getReq.Name = args[0]
|
getReq.Name = args[0]
|
||||||
|
|
||||||
response, err := w.StorageCredentials.Get(ctx, getReq)
|
response, err := w.StorageCredentials.Get(ctx, getReq)
|
||||||
|
|
7
go.mod
7
go.mod
|
@ -3,14 +3,14 @@ module github.com/databricks/cli
|
||||||
go 1.22
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
github.com/Masterminds/semver/v3 v3.3.0 // MIT
|
||||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||||
github.com/databricks/databricks-sdk-go v0.44.0 // Apache 2.0
|
github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0
|
||||||
github.com/fatih/color v1.17.0 // MIT
|
github.com/fatih/color v1.17.0 // MIT
|
||||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||||
github.com/hashicorp/go-version v1.7.0 // MPL 2.0
|
github.com/hashicorp/go-version v1.7.0 // MPL 2.0
|
||||||
github.com/hashicorp/hc-install v0.8.0 // MPL 2.0
|
github.com/hashicorp/hc-install v0.7.0 // MPL 2.0
|
||||||
github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0
|
github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0
|
||||||
github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0
|
github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0
|
||||||
github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause
|
github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause
|
||||||
|
@ -49,7 +49,6 @@ require (
|
||||||
github.com/google/s2a-go v0.1.7 // indirect
|
github.com/google/s2a-go v0.1.7 // indirect
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
|
|
@ -8,8 +8,8 @@ cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h
|
||||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
|
||||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg=
|
github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg=
|
||||||
|
@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||||
github.com/databricks/databricks-sdk-go v0.44.0 h1:9/FZACv4EFQIOYxfwYVKnY7v46xio9FKCw9tpKB2O/s=
|
github.com/databricks/databricks-sdk-go v0.45.0 h1:wdx5Wm/ESrahdHeq62WrjLeGjV4r722LLanD8ahI0Mo=
|
||||||
github.com/databricks/databricks-sdk-go v0.44.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
github.com/databricks/databricks-sdk-go v0.45.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
@ -99,14 +99,10 @@ github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw
|
||||||
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
|
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
|
||||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
|
||||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||||
github.com/hashicorp/hc-install v0.8.0 h1:LdpZeXkZYMQhoKPCecJHlKvUkQFixN/nvyR1CdfOLjI=
|
github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk=
|
||||||
github.com/hashicorp/hc-install v0.8.0/go.mod h1:+MwJYjDfCruSD/udvBmRB22Nlkwwkwf5sAB6uTIhSaU=
|
github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA=
|
||||||
github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ=
|
github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ=
|
||||||
github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg=
|
github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg=
|
||||||
github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec=
|
github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec=
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -101,12 +102,15 @@ func TestAccAbortBind(t *testing.T) {
|
||||||
destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Bind should fail because prompting is not possible.
|
||||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
|
t.Setenv("TERM", "dumb")
|
||||||
c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
||||||
|
|
||||||
// Simulate user aborting the bind. This is done by not providing any input to the prompt in non-interactive mode.
|
// Expect error suggesting to use --auto-approve
|
||||||
_, _, err = c.Run()
|
_, _, err = c.Run()
|
||||||
require.ErrorContains(t, err, "failed to bind the resource")
|
assert.ErrorContains(t, err, "failed to bind the resource")
|
||||||
|
assert.ErrorContains(t, err, "This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
err = deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
{
|
||||||
|
"properties": {
|
||||||
|
"unique_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Unique ID for the schema and pipeline names"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
bundle:
|
||||||
|
name: "bundle-playground"
|
||||||
|
|
||||||
|
variables:
|
||||||
|
catalog:
|
||||||
|
description: The catalog the DLT pipeline should use.
|
||||||
|
default: main
|
||||||
|
|
||||||
|
|
||||||
|
resources:
|
||||||
|
pipelines:
|
||||||
|
foo:
|
||||||
|
name: test-pipeline-{{.unique_id}}
|
||||||
|
libraries:
|
||||||
|
- notebook:
|
||||||
|
path: ./nb.sql
|
||||||
|
development: true
|
||||||
|
catalog: ${var.catalog}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- "*.yml"
|
||||||
|
|
||||||
|
targets:
|
||||||
|
development:
|
||||||
|
default: true
|
|
@ -0,0 +1,2 @@
|
||||||
|
-- Databricks notebook source
|
||||||
|
select 1
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
"github.com/databricks/cli/internal"
|
"github.com/databricks/cli/internal"
|
||||||
"github.com/databricks/cli/internal/acc"
|
"github.com/databricks/cli/internal/acc"
|
||||||
|
"github.com/databricks/cli/libs/env"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/databricks/databricks-sdk-go/apierr"
|
"github.com/databricks/databricks-sdk-go/apierr"
|
||||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
|
@ -119,7 +120,126 @@ func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) {
|
||||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
t.Setenv("TERM", "dumb")
|
t.Setenv("TERM", "dumb")
|
||||||
c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock")
|
c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock")
|
||||||
stdout, _, err := c.Run()
|
stdout, stderr, err := c.Run()
|
||||||
|
|
||||||
assert.EqualError(t, err, root.ErrAlreadyPrinted.Error())
|
assert.EqualError(t, err, root.ErrAlreadyPrinted.Error())
|
||||||
|
assert.Contains(t, stderr.String(), "The following UC schemas will be deleted or recreated. Any underlying data may be lost:\n delete schema bar")
|
||||||
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccBundlePipelineDeleteWithoutAutoApprove(t *testing.T) {
|
||||||
|
ctx, wt := acc.WorkspaceTest(t)
|
||||||
|
w := wt.W
|
||||||
|
|
||||||
|
nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV"))
|
||||||
|
uniqueId := uuid.New().String()
|
||||||
|
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||||
|
"unique_id": uniqueId,
|
||||||
|
"node_type_id": nodeTypeId,
|
||||||
|
"spark_version": defaultSparkVersion,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// deploy pipeline
|
||||||
|
err = deployBundle(t, ctx, bundleRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// assert pipeline is created
|
||||||
|
pipelineName := "test-bundle-pipeline-" + uniqueId
|
||||||
|
pipeline, err := w.Pipelines.GetByName(ctx, pipelineName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, pipeline.Name, pipelineName)
|
||||||
|
|
||||||
|
// assert job is created
|
||||||
|
jobName := "test-bundle-job-" + uniqueId
|
||||||
|
job, err := w.Jobs.GetBySettingsName(ctx, jobName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, job.Settings.Name, jobName)
|
||||||
|
|
||||||
|
// delete resources.yml
|
||||||
|
err = os.Remove(filepath.Join(bundleRoot, "resources.yml"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Redeploy the bundle. Expect it to fail because deleting the pipeline requires --auto-approve.
|
||||||
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
|
t.Setenv("TERM", "dumb")
|
||||||
|
c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock")
|
||||||
|
stdout, stderr, err := c.Run()
|
||||||
|
|
||||||
|
assert.EqualError(t, err, root.ErrAlreadyPrinted.Error())
|
||||||
|
assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following DLT Pipelines along with the
|
||||||
|
Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the Pipelines will
|
||||||
|
restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline
|
||||||
|
properties such as the 'catalog' or 'storage' are changed:
|
||||||
|
delete pipeline bar`)
|
||||||
|
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) {
|
||||||
|
ctx, wt := acc.UcWorkspaceTest(t)
|
||||||
|
w := wt.W
|
||||||
|
uniqueId := uuid.New().String()
|
||||||
|
|
||||||
|
bundleRoot, err := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{
|
||||||
|
"unique_id": uniqueId,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = deployBundle(t, ctx, bundleRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Assert the pipeline is created
|
||||||
|
pipelineName := "test-pipeline-" + uniqueId
|
||||||
|
pipeline, err := w.Pipelines.GetByName(ctx, pipelineName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, pipelineName, pipeline.Name)
|
||||||
|
|
||||||
|
// Redeploy the bundle, pointing the DLT pipeline to a different UC catalog.
|
||||||
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
|
t.Setenv("TERM", "dumb")
|
||||||
|
c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"")
|
||||||
|
stdout, stderr, err := c.Run()
|
||||||
|
|
||||||
|
assert.EqualError(t, err, root.ErrAlreadyPrinted.Error())
|
||||||
|
assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following DLT Pipelines along with the
|
||||||
|
Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the Pipelines will
|
||||||
|
restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline
|
||||||
|
properties such as the 'catalog' or 'storage' are changed:
|
||||||
|
recreate pipeline foo`)
|
||||||
|
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccDeployBasicBundleLogs(t *testing.T) {
|
||||||
|
ctx, wt := acc.WorkspaceTest(t)
|
||||||
|
|
||||||
|
nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV"))
|
||||||
|
uniqueId := uuid.New().String()
|
||||||
|
root, err := initTestTemplate(t, ctx, "basic", map[string]any{
|
||||||
|
"unique_id": uniqueId,
|
||||||
|
"node_type_id": nodeTypeId,
|
||||||
|
"spark_version": defaultSparkVersion,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
err = destroyBundle(t, ctx, root)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
currentUser, err := wt.W.CurrentUser.Me(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
stdout, stderr := blackBoxRun(t, root, "bundle", "deploy")
|
||||||
|
assert.Equal(t, strings.Join([]string{
|
||||||
|
fmt.Sprintf("Uploading bundle files to /Users/%s/.bundle/%s/files...", currentUser.UserName, uniqueId),
|
||||||
|
"Deploying resources...",
|
||||||
|
"Updating deployment state...",
|
||||||
|
"Deployment complete!\n",
|
||||||
|
}, "\n"), stderr)
|
||||||
|
assert.Equal(t, "", stdout)
|
||||||
|
}
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
package bundle
|
package bundle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -15,6 +17,7 @@ import (
|
||||||
"github.com/databricks/cli/libs/env"
|
"github.com/databricks/cli/libs/env"
|
||||||
"github.com/databricks/cli/libs/flags"
|
"github.com/databricks/cli/libs/flags"
|
||||||
"github.com/databricks/cli/libs/template"
|
"github.com/databricks/cli/libs/template"
|
||||||
|
"github.com/databricks/cli/libs/vfs"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -114,3 +117,29 @@ func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, unique
|
||||||
root := fmt.Sprintf("/Users/%s/.bundle/%s", me.UserName, uniqueId)
|
root := fmt.Sprintf("/Users/%s/.bundle/%s", me.UserName, uniqueId)
|
||||||
return root
|
return root
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func blackBoxRun(t *testing.T, root string, args ...string) (stdout string, stderr string) {
|
||||||
|
cwd := vfs.MustNew(".")
|
||||||
|
gitRoot, err := vfs.FindLeafInTree(cwd, ".git")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Setenv("BUNDLE_ROOT", root)
|
||||||
|
|
||||||
|
// Create the command
|
||||||
|
cmd := exec.Command("go", append([]string{"run", "main.go"}, args...)...)
|
||||||
|
cmd.Dir = gitRoot.Native()
|
||||||
|
|
||||||
|
// Create buffers to capture output
|
||||||
|
var outBuffer, errBuffer bytes.Buffer
|
||||||
|
cmd.Stdout = &outBuffer
|
||||||
|
cmd.Stderr = &errBuffer
|
||||||
|
|
||||||
|
// Run the command
|
||||||
|
err = cmd.Run()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Get the output
|
||||||
|
stdout = outBuffer.String()
|
||||||
|
stderr = errBuffer.String()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"path"
|
"path"
|
||||||
|
@ -722,67 +721,6 @@ func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) {
|
||||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
tcases := []struct {
|
|
||||||
files []struct{ name, content string }
|
|
||||||
name string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "python",
|
|
||||||
files: []struct{ name, content string }{
|
|
||||||
{"foo.py", "print('foo')"},
|
|
||||||
{"foo.py", "# Databricks notebook source\nprint('foo')"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "r",
|
|
||||||
files: []struct{ name, content string }{
|
|
||||||
{"foo.r", "print('foo')"},
|
|
||||||
{"foo.r", "# Databricks notebook source\nprint('foo')"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sql",
|
|
||||||
files: []struct{ name, content string }{
|
|
||||||
{"foo.sql", "SELECT 'foo'"},
|
|
||||||
{"foo.sql", "-- Databricks notebook source\nSELECT 'foo'"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "scala",
|
|
||||||
files: []struct{ name, content string }{
|
|
||||||
{"foo.scala", "println('foo')"},
|
|
||||||
{"foo.scala", "// Databricks notebook source\nprintln('foo')"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
// We don't need to test this for ipynb notebooks. The import API
|
|
||||||
// fails when the file extension is .ipynb but the content is not a
|
|
||||||
// valid juptyer notebook.
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range tcases {
|
|
||||||
tc := tcases[i]
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
wf, tmpDir := setupWsfsExtensionsFiler(t)
|
|
||||||
|
|
||||||
for _, f := range tc.files {
|
|
||||||
err := wf.Write(ctx, f.name, strings.NewReader(f.content), filer.CreateParentDirectories)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := wf.ReadDir(ctx, ".")
|
|
||||||
assert.ErrorAs(t, err, &filer.DuplicatePathError{})
|
|
||||||
assert.ErrorContains(t, err, fmt.Sprintf("failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at %s and FILE at %s resolve to the same name %s. Changing the name of one of these objects will resolve this issue", path.Join(tmpDir, "foo"), path.Join(tmpDir, tc.files[0].name), tc.files[0].name))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) {
|
func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
|
|
@ -267,6 +267,8 @@ func (n normalizeOptions) normalizeString(typ reflect.Type, src dyn.Value, path
|
||||||
out = strconv.FormatInt(src.MustInt(), 10)
|
out = strconv.FormatInt(src.MustInt(), 10)
|
||||||
case dyn.KindFloat:
|
case dyn.KindFloat:
|
||||||
out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64)
|
out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64)
|
||||||
|
case dyn.KindTime:
|
||||||
|
out = src.MustTime().String()
|
||||||
case dyn.KindNil:
|
case dyn.KindNil:
|
||||||
// Return a warning if the field is present but has a null value.
|
// Return a warning if the field is present but has a null value.
|
||||||
return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindString, src, path))
|
return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindString, src, path))
|
||||||
|
|
|
@ -569,6 +569,14 @@ func TestNormalizeStringFromFloat(t *testing.T) {
|
||||||
assert.Equal(t, dyn.NewValue("1.2", vin.Locations()), vout)
|
assert.Equal(t, dyn.NewValue("1.2", vin.Locations()), vout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNormalizeStringFromTime(t *testing.T) {
|
||||||
|
var typ string
|
||||||
|
vin := dyn.NewValue(dyn.MustTime("2024-08-29"), []dyn.Location{{File: "file", Line: 1, Column: 1}})
|
||||||
|
vout, err := Normalize(&typ, vin)
|
||||||
|
assert.Empty(t, err)
|
||||||
|
assert.Equal(t, dyn.NewValue("2024-08-29", vin.Locations()), vout)
|
||||||
|
}
|
||||||
|
|
||||||
func TestNormalizeStringError(t *testing.T) {
|
func TestNormalizeStringError(t *testing.T) {
|
||||||
var typ string
|
var typ string
|
||||||
vin := dyn.V(map[string]dyn.Value{"an": dyn.V("error")})
|
vin := dyn.V(map[string]dyn.Value{"an": dyn.V("error")})
|
||||||
|
|
|
@ -2,7 +2,6 @@ package dyn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Kind int
|
type Kind int
|
||||||
|
@ -34,7 +33,7 @@ func kindOf(v any) Kind {
|
||||||
return KindInt
|
return KindInt
|
||||||
case float32, float64:
|
case float32, float64:
|
||||||
return KindFloat
|
return KindFloat
|
||||||
case time.Time:
|
case Time:
|
||||||
return KindTime
|
return KindTime
|
||||||
case nil:
|
case nil:
|
||||||
return KindNil
|
return KindNil
|
||||||
|
|
|
@ -83,16 +83,16 @@ func TestOverride_Primitive(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "time (updated)",
|
name: "time (updated)",
|
||||||
state: visitorState{updated: []string{"root"}},
|
state: visitorState{updated: []string{"root"}},
|
||||||
left: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}),
|
left: dyn.NewValue(dyn.FromTime(time.UnixMilli(10000)), []dyn.Location{leftLocation}),
|
||||||
right: dyn.NewValue(time.UnixMilli(10001), []dyn.Location{rightLocation}),
|
right: dyn.NewValue(dyn.FromTime(time.UnixMilli(10001)), []dyn.Location{rightLocation}),
|
||||||
expected: dyn.NewValue(time.UnixMilli(10001), []dyn.Location{rightLocation}),
|
expected: dyn.NewValue(dyn.FromTime(time.UnixMilli(10001)), []dyn.Location{rightLocation}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "time (not updated)",
|
name: "time (not updated)",
|
||||||
state: visitorState{},
|
state: visitorState{},
|
||||||
left: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}),
|
left: dyn.NewValue(dyn.FromTime(time.UnixMilli(10000)), []dyn.Location{leftLocation}),
|
||||||
right: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{rightLocation}),
|
right: dyn.NewValue(dyn.FromTime(time.UnixMilli(10000)), []dyn.Location{rightLocation}),
|
||||||
expected: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}),
|
expected: dyn.NewValue(dyn.FromTime(time.UnixMilli(10000)), []dyn.Location{leftLocation}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "different types (updated)",
|
name: "different types (updated)",
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
package dyn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Time represents a time-like primitive value.
|
||||||
|
//
|
||||||
|
// It represents a timestamp and includes the original string value
|
||||||
|
// that was parsed to create the timestamp. This makes it possible
|
||||||
|
// to coalesce a value that YAML interprets as a timestamp back into
|
||||||
|
// a string without losing information.
|
||||||
|
type Time struct {
|
||||||
|
t time.Time
|
||||||
|
s string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTime creates a new Time from the given string.
|
||||||
|
func NewTime(str string) (Time, error) {
|
||||||
|
// Try a couple of layouts
|
||||||
|
for _, layout := range []string{
|
||||||
|
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
||||||
|
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
||||||
|
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
||||||
|
"2006-1-2", // date only
|
||||||
|
} {
|
||||||
|
t, terr := time.Parse(layout, str)
|
||||||
|
if terr == nil {
|
||||||
|
return Time{t: t, s: str}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Time{}, fmt.Errorf("invalid time value: %q", str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustTime creates a new Time from the given string.
|
||||||
|
// It panics if the string cannot be parsed.
|
||||||
|
func MustTime(str string) Time {
|
||||||
|
t, err := NewTime(str)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromTime creates a new Time from the given time.Time.
|
||||||
|
// It uses the RFC3339Nano format for its string representation.
|
||||||
|
// This guarantees that it can roundtrip into a string without losing information.
|
||||||
|
func FromTime(t time.Time) Time {
|
||||||
|
return Time{t: t, s: t.Format(time.RFC3339Nano)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time returns the time.Time value.
|
||||||
|
func (t Time) Time() time.Time {
|
||||||
|
return t.t
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the original string value that was parsed to create the timestamp.
|
||||||
|
func (t Time) String() string {
|
||||||
|
return t.s
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package dyn_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTimeValid(t *testing.T) {
|
||||||
|
for _, tc := range []string{
|
||||||
|
"2024-08-29",
|
||||||
|
"2024-01-15T12:34:56.789012345Z",
|
||||||
|
} {
|
||||||
|
tm, err := dyn.NewTime(tc)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
assert.NotEqual(t, time.Time{}, tm.Time())
|
||||||
|
assert.Equal(t, tc, tm.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimeInvalid(t *testing.T) {
|
||||||
|
tm, err := dyn.NewTime("invalid")
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, dyn.Time{}, tm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimeFromTime(t *testing.T) {
|
||||||
|
tref := time.Now()
|
||||||
|
t1 := dyn.FromTime(tref)
|
||||||
|
|
||||||
|
// Verify that the underlying value is the same.
|
||||||
|
assert.Equal(t, tref, t1.Time())
|
||||||
|
|
||||||
|
// Verify that the string representation can be used to construct the same.
|
||||||
|
t2, err := dyn.NewTime(t1.String())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, t1.Time().Equal(t2.Time()))
|
||||||
|
}
|
|
@ -127,7 +127,8 @@ func (v Value) AsAny() any {
|
||||||
case KindFloat:
|
case KindFloat:
|
||||||
return v.v
|
return v.v
|
||||||
case KindTime:
|
case KindTime:
|
||||||
return v.v
|
t := v.v.(Time)
|
||||||
|
return t.Time()
|
||||||
default:
|
default:
|
||||||
// Panic because we only want to deal with known types.
|
// Panic because we only want to deal with known types.
|
||||||
panic(fmt.Sprintf("invalid kind: %d", v.k))
|
panic(fmt.Sprintf("invalid kind: %d", v.k))
|
||||||
|
|
|
@ -2,7 +2,6 @@ package dyn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// AsMap returns the underlying mapping if this value is a map,
|
// AsMap returns the underlying mapping if this value is a map,
|
||||||
|
@ -123,14 +122,14 @@ func (v Value) MustFloat() float64 {
|
||||||
|
|
||||||
// AsTime returns the underlying time if this value is a time,
|
// AsTime returns the underlying time if this value is a time,
|
||||||
// the zero value and false otherwise.
|
// the zero value and false otherwise.
|
||||||
func (v Value) AsTime() (time.Time, bool) {
|
func (v Value) AsTime() (Time, bool) {
|
||||||
vv, ok := v.v.(time.Time)
|
vv, ok := v.v.(Time)
|
||||||
return vv, ok
|
return vv, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustTime returns the underlying time if this value is a time,
|
// MustTime returns the underlying time if this value is a time,
|
||||||
// panics otherwise.
|
// panics otherwise.
|
||||||
func (v Value) MustTime() time.Time {
|
func (v Value) MustTime() Time {
|
||||||
vv, ok := v.AsTime()
|
vv, ok := v.AsTime()
|
||||||
if !ok || v.k != KindTime {
|
if !ok || v.k != KindTime {
|
||||||
panic(fmt.Sprintf("expected kind %s, got %s", KindTime, v.k))
|
panic(fmt.Sprintf("expected kind %s, got %s", KindTime, v.k))
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue