mirror of https://github.com/databricks/cli.git
Merge branch 'main' into feature/logout
This commit is contained in:
commit
d2bead3fe6
|
@ -1 +1 @@
|
|||
d05898328669a3f8ab0c2ecee37db2673d3ea3f7
|
||||
6f6b1371e640f2dfeba72d365ac566368656f6b6
|
|
@ -6,6 +6,7 @@ cmd/account/cmd.go linguist-generated=true
|
|||
cmd/account/credentials/credentials.go linguist-generated=true
|
||||
cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true
|
||||
cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true
|
||||
cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true
|
||||
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
||||
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
|
||||
cmd/account/groups/groups.go linguist-generated=true
|
||||
|
@ -52,6 +53,7 @@ cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
|||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
|
||||
cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true
|
||||
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
|
||||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
||||
|
@ -108,6 +110,7 @@ cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
|
|||
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
||||
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
|
||||
cmd/workspace/tables/tables.go linguist-generated=true
|
||||
cmd/workspace/temporary-table-credentials/temporary-table-credentials.go linguist-generated=true
|
||||
cmd/workspace/token-management/token-management.go linguist-generated=true
|
||||
cmd/workspace/tokens/tokens.go linguist-generated=true
|
||||
cmd/workspace/users/users.go linguist-generated=true
|
||||
|
|
|
@ -33,7 +33,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
|
@ -68,7 +68,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
# No need to download cached dependencies when running gofmt.
|
||||
cache: false
|
||||
|
@ -100,7 +100,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||
- name: Install ajv-cli
|
||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
|
40
CHANGELOG.md
40
CHANGELOG.md
|
@ -1,5 +1,45 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.229.0
|
||||
|
||||
Bundles:
|
||||
* Added support for creating all-purpose clusters ([#1698](https://github.com/databricks/cli/pull/1698)).
|
||||
* Reduce time until the prompt is shown for bundle run ([#1727](https://github.com/databricks/cli/pull/1727)).
|
||||
* Use Unity Catalog for pipelines in the default-python template ([#1766](https://github.com/databricks/cli/pull/1766)).
|
||||
* Add verbose flag to the "bundle deploy" command ([#1774](https://github.com/databricks/cli/pull/1774)).
|
||||
* Fixed full variable override detection ([#1787](https://github.com/databricks/cli/pull/1787)).
|
||||
* Add sub-extension to resource files in built-in templates ([#1777](https://github.com/databricks/cli/pull/1777)).
|
||||
* Fix panic in `apply_presets.go` ([#1796](https://github.com/databricks/cli/pull/1796)).
|
||||
|
||||
Internal:
|
||||
* Assert tokens are redacted in origin URL when username is not specified ([#1785](https://github.com/databricks/cli/pull/1785)).
|
||||
* Refactor jobs path translation ([#1782](https://github.com/databricks/cli/pull/1782)).
|
||||
* Add JobTaskClusterSpec validate mutator ([#1784](https://github.com/databricks/cli/pull/1784)).
|
||||
* Pin Go toolchain to 1.22.7 ([#1790](https://github.com/databricks/cli/pull/1790)).
|
||||
* Modify SetLocation test utility to take full locations as argument ([#1788](https://github.com/databricks/cli/pull/1788)).
|
||||
* Simplified isFullVariableOverrideDef implementation ([#1791](https://github.com/databricks/cli/pull/1791)).
|
||||
* Sort tasks by `task_key` before generating the Terraform configuration ([#1776](https://github.com/databricks/cli/pull/1776)).
|
||||
* Trim trailing whitespace ([#1794](https://github.com/databricks/cli/pull/1794)).
|
||||
* Move trampoline code into trampoline package ([#1793](https://github.com/databricks/cli/pull/1793)).
|
||||
* Rename `RootPath` -> `BundleRootPath` ([#1792](https://github.com/databricks/cli/pull/1792)).
|
||||
|
||||
API Changes:
|
||||
* Changed `databricks apps delete` command to return .
|
||||
* Changed `databricks apps deploy` command with new required argument order.
|
||||
* Changed `databricks apps start` command to return .
|
||||
* Changed `databricks apps stop` command to return .
|
||||
* Added `databricks temporary-table-credentials` command group.
|
||||
* Added `databricks serving-endpoints put-ai-gateway` command.
|
||||
* Added `databricks disable-legacy-access` command group.
|
||||
* Added `databricks account disable-legacy-features` command group.
|
||||
|
||||
OpenAPI commit 6f6b1371e640f2dfeba72d365ac566368656f6b6 (2024-09-19)
|
||||
Dependency updates:
|
||||
* Upgrade to Go SDK 0.47.0 ([#1799](https://github.com/databricks/cli/pull/1799)).
|
||||
* Upgrade to TF provider 1.52 ([#1781](https://github.com/databricks/cli/pull/1781)).
|
||||
* Bump golang.org/x/mod from 0.20.0 to 0.21.0 ([#1758](https://github.com/databricks/cli/pull/1758)).
|
||||
* Bump github.com/hashicorp/hc-install from 0.7.0 to 0.9.0 ([#1772](https://github.com/databricks/cli/pull/1772)).
|
||||
|
||||
## [Release] Release v0.228.1
|
||||
|
||||
Bundles:
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -23,7 +24,7 @@ func TestExpandGlobs_Nominal(t *testing.T) {
|
|||
testutil.Touch(t, tmpDir, "bc.txt")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Artifacts: config.Artifacts{
|
||||
"test": {
|
||||
|
@ -36,7 +37,7 @@ func TestExpandGlobs_Nominal(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
||||
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
|
@ -62,7 +63,7 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
|
|||
tmpDir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Artifacts: config.Artifacts{
|
||||
"test": {
|
||||
|
@ -77,7 +78,7 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
||||
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
|
@ -110,7 +111,7 @@ func TestExpandGlobs_NoMatches(t *testing.T) {
|
|||
testutil.Touch(t, tmpDir, "b2.txt")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Artifacts: config.Artifacts{
|
||||
"test": {
|
||||
|
@ -125,7 +126,7 @@ func TestExpandGlobs_NoMatches(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
||||
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
|
|
|
@ -47,7 +47,7 @@ func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
|
||||
// If artifact path is not provided, use bundle root dir
|
||||
if artifact.Path == "" {
|
||||
artifact.Path = b.RootPath
|
||||
artifact.Path = b.BundleRootPath
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(artifact.Path) {
|
||||
|
|
|
@ -35,21 +35,21 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
log.Infof(ctx, "Detecting Python wheel project...")
|
||||
|
||||
// checking if there is setup.py in the bundle root
|
||||
setupPy := filepath.Join(b.RootPath, "setup.py")
|
||||
setupPy := filepath.Join(b.BundleRootPath, "setup.py")
|
||||
_, err := os.Stat(setupPy)
|
||||
if err != nil {
|
||||
log.Infof(ctx, "No Python wheel project found at bundle root folder")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.RootPath))
|
||||
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.BundleRootPath))
|
||||
module := extractModuleName(setupPy)
|
||||
|
||||
if b.Config.Artifacts == nil {
|
||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||
}
|
||||
|
||||
pkgPath, err := filepath.Abs(b.RootPath)
|
||||
pkgPath, err := filepath.Abs(b.BundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -31,22 +31,26 @@ import (
|
|||
const internalFolder = ".internal"
|
||||
|
||||
type Bundle struct {
|
||||
// RootPath contains the directory path to the root of the bundle.
|
||||
// BundleRootPath is the local path to the root directory of the bundle.
|
||||
// It is set when we instantiate a new bundle instance.
|
||||
RootPath string
|
||||
BundleRootPath string
|
||||
|
||||
// BundleRoot is a virtual filesystem path to the root of the bundle.
|
||||
// BundleRoot is a virtual filesystem path to [BundleRootPath].
|
||||
// Exclusively use this field for filesystem operations.
|
||||
BundleRoot vfs.Path
|
||||
|
||||
// SyncRoot is a virtual filesystem path to the root directory of the files that are synchronized to the workspace.
|
||||
// It can be an ancestor to [BundleRoot], but not a descendant; that is, [SyncRoot] must contain [BundleRoot].
|
||||
SyncRoot vfs.Path
|
||||
|
||||
// SyncRootPath is the local path to the root directory of files that are synchronized to the workspace.
|
||||
// It is equal to `SyncRoot.Native()` and included as dedicated field for convenient access.
|
||||
// By default, it is the same as [BundleRootPath].
|
||||
// If it is different, it must be an ancestor to [BundleRootPath].
|
||||
// That is, [SyncRootPath] must contain [BundleRootPath].
|
||||
SyncRootPath string
|
||||
|
||||
// SyncRoot is a virtual filesystem path to [SyncRootPath].
|
||||
// Exclusively use this field for filesystem operations.
|
||||
SyncRoot vfs.Path
|
||||
|
||||
// Config contains the bundle configuration.
|
||||
// It is loaded from the bundle configuration files and mutators may update it.
|
||||
Config config.Root
|
||||
|
||||
// Metadata about the bundle deployment. This is the interface Databricks services
|
||||
|
@ -84,14 +88,14 @@ type Bundle struct {
|
|||
|
||||
func Load(ctx context.Context, path string) (*Bundle, error) {
|
||||
b := &Bundle{
|
||||
RootPath: filepath.Clean(path),
|
||||
BundleRoot: vfs.MustNew(path),
|
||||
BundleRootPath: filepath.Clean(path),
|
||||
BundleRoot: vfs.MustNew(path),
|
||||
}
|
||||
configFile, err := config.FileNames.FindInPath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.RootPath, configFile)
|
||||
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.BundleRootPath, configFile)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
|
@ -160,7 +164,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error)
|
|||
if !exists || cacheDirName == "" {
|
||||
cacheDirName = filepath.Join(
|
||||
// Anchor at bundle root directory.
|
||||
b.RootPath,
|
||||
b.BundleRootPath,
|
||||
// Static cache directory.
|
||||
".databricks",
|
||||
"bundle",
|
||||
|
@ -212,7 +216,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
|
||||
internalDirRel, err := filepath.Rel(b.BundleRootPath, internalDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func (r ReadOnlyBundle) Config() config.Root {
|
|||
}
|
||||
|
||||
func (r ReadOnlyBundle) RootPath() string {
|
||||
return r.b.RootPath
|
||||
return r.b.BundleRootPath
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) BundleRoot() vfs.Path {
|
||||
|
|
|
@ -79,7 +79,7 @@ func TestBundleMustLoadSuccess(t *testing.T) {
|
|||
t.Setenv(env.RootVariable, "./tests/basic")
|
||||
b, err := MustLoad(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.BundleRootPath))
|
||||
}
|
||||
|
||||
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
||||
|
@ -98,7 +98,7 @@ func TestBundleTryLoadSuccess(t *testing.T) {
|
|||
t.Setenv(env.RootVariable, "./tests/basic")
|
||||
b, err := TryLoad(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.BundleRootPath))
|
||||
}
|
||||
|
||||
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
||||
|
|
|
@ -20,7 +20,7 @@ func (m *entryPoint) Name() string {
|
|||
}
|
||||
|
||||
func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
path, err := config.FileNames.FindInPath(b.RootPath)
|
||||
path, err := config.FileNames.FindInPath(b.BundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestEntryPointNoRootPath(t *testing.T) {
|
|||
|
||||
func TestEntryPoint(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "testdata",
|
||||
BundleRootPath: "testdata",
|
||||
}
|
||||
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
||||
require.NoError(t, diags.Error())
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
|
||||
func TestProcessInclude(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "testdata",
|
||||
BundleRootPath: "testdata",
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
Host: "foo",
|
||||
|
@ -22,7 +22,7 @@ func TestProcessInclude(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
m := loader.ProcessInclude(filepath.Join(b.RootPath, "host.yml"), "host.yml")
|
||||
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, "host.yml"), "host.yml")
|
||||
assert.Equal(t, "ProcessInclude(host.yml)", m.Name())
|
||||
|
||||
// Assert the host value prior to applying the mutator
|
||||
|
|
|
@ -47,7 +47,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
}
|
||||
|
||||
// Anchor includes to the bundle root path.
|
||||
matches, err := filepath.Glob(filepath.Join(b.RootPath, entry))
|
||||
matches, err := filepath.Glob(filepath.Join(b.BundleRootPath, entry))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
// Filter matches to ones we haven't seen yet.
|
||||
var includes []string
|
||||
for _, match := range matches {
|
||||
rel, err := filepath.Rel(b.RootPath, match)
|
||||
rel, err := filepath.Rel(b.BundleRootPath, match)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
slices.Sort(includes)
|
||||
files = append(files, includes...)
|
||||
for _, include := range includes {
|
||||
out = append(out, ProcessInclude(filepath.Join(b.RootPath, include), include))
|
||||
out = append(out, ProcessInclude(filepath.Join(b.BundleRootPath, include), include))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
func TestProcessRootIncludesEmpty(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
}
|
||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -30,7 +30,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"/tmp/*.yml",
|
||||
|
@ -44,7 +44,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
|||
|
||||
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"*.yml",
|
||||
|
@ -52,9 +52,9 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
testutil.Touch(t, b.RootPath, "databricks.yml")
|
||||
testutil.Touch(t, b.RootPath, "a.yml")
|
||||
testutil.Touch(t, b.RootPath, "b.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "databricks.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "a.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "b.yml")
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -63,7 +63,7 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
|||
|
||||
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"a*.yml",
|
||||
|
@ -72,8 +72,8 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
testutil.Touch(t, b.RootPath, "a1.yml")
|
||||
testutil.Touch(t, b.RootPath, "b1.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "a1.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "b1.yml")
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -82,7 +82,7 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
|||
|
||||
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"*.yml",
|
||||
|
@ -91,7 +91,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
testutil.Touch(t, b.RootPath, "a.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "a.yml")
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -100,7 +100,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
|||
|
||||
func TestProcessRootIncludesNotExists(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"notexist.yml",
|
||||
|
|
|
@ -35,8 +35,10 @@ func (m *applyPresets) Name() string {
|
|||
}
|
||||
|
||||
func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
if d := validatePauseStatus(b); d != nil {
|
||||
return d
|
||||
diags = diags.Extend(d)
|
||||
}
|
||||
|
||||
r := b.Config.Resources
|
||||
|
@ -45,7 +47,11 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
tags := toTagArray(t.Tags)
|
||||
|
||||
// Jobs presets: Prefix, Tags, JobsMaxConcurrentRuns, TriggerPauseStatus
|
||||
for _, j := range r.Jobs {
|
||||
for key, j := range r.Jobs {
|
||||
if j.JobSettings == nil {
|
||||
diags = diags.Extend(diag.Errorf("job %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
j.Name = prefix + j.Name
|
||||
if j.Tags == nil {
|
||||
j.Tags = make(map[string]string)
|
||||
|
@ -77,20 +83,27 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
|
||||
// Pipelines presets: Prefix, PipelinesDevelopment
|
||||
for i := range r.Pipelines {
|
||||
r.Pipelines[i].Name = prefix + r.Pipelines[i].Name
|
||||
for key, p := range r.Pipelines {
|
||||
if p.PipelineSpec == nil {
|
||||
diags = diags.Extend(diag.Errorf("pipeline %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
p.Name = prefix + p.Name
|
||||
if config.IsExplicitlyEnabled(t.PipelinesDevelopment) {
|
||||
r.Pipelines[i].Development = true
|
||||
p.Development = true
|
||||
}
|
||||
if t.TriggerPauseStatus == config.Paused {
|
||||
r.Pipelines[i].Continuous = false
|
||||
p.Continuous = false
|
||||
}
|
||||
|
||||
// As of 2024-06, pipelines don't yet support tags
|
||||
}
|
||||
|
||||
// Models presets: Prefix, Tags
|
||||
for _, m := range r.Models {
|
||||
for key, m := range r.Models {
|
||||
if m.Model == nil {
|
||||
diags = diags.Extend(diag.Errorf("model %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
m.Name = prefix + m.Name
|
||||
for _, t := range tags {
|
||||
exists := slices.ContainsFunc(m.Tags, func(modelTag ml.ModelTag) bool {
|
||||
|
@ -104,7 +117,11 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
|
||||
// Experiments presets: Prefix, Tags
|
||||
for _, e := range r.Experiments {
|
||||
for key, e := range r.Experiments {
|
||||
if e.Experiment == nil {
|
||||
diags = diags.Extend(diag.Errorf("experiment %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
filepath := e.Name
|
||||
dir := path.Dir(filepath)
|
||||
base := path.Base(filepath)
|
||||
|
@ -128,40 +145,60 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
|
||||
// Model serving endpoint presets: Prefix
|
||||
for i := range r.ModelServingEndpoints {
|
||||
r.ModelServingEndpoints[i].Name = normalizePrefix(prefix) + r.ModelServingEndpoints[i].Name
|
||||
for key, e := range r.ModelServingEndpoints {
|
||||
if e.CreateServingEndpoint == nil {
|
||||
diags = diags.Extend(diag.Errorf("model serving endpoint %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
e.Name = normalizePrefix(prefix) + e.Name
|
||||
|
||||
// As of 2024-06, model serving endpoints don't yet support tags
|
||||
}
|
||||
|
||||
// Registered models presets: Prefix
|
||||
for i := range r.RegisteredModels {
|
||||
r.RegisteredModels[i].Name = normalizePrefix(prefix) + r.RegisteredModels[i].Name
|
||||
for key, m := range r.RegisteredModels {
|
||||
if m.CreateRegisteredModelRequest == nil {
|
||||
diags = diags.Extend(diag.Errorf("registered model %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
m.Name = normalizePrefix(prefix) + m.Name
|
||||
|
||||
// As of 2024-06, registered models don't yet support tags
|
||||
}
|
||||
|
||||
// Quality monitors presets: Prefix
|
||||
// Quality monitors presets: Schedule
|
||||
if t.TriggerPauseStatus == config.Paused {
|
||||
for i := range r.QualityMonitors {
|
||||
for key, q := range r.QualityMonitors {
|
||||
if q.CreateMonitor == nil {
|
||||
diags = diags.Extend(diag.Errorf("quality monitor %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
// Remove all schedules from monitors, since they don't support pausing/unpausing.
|
||||
// Quality monitors might support the "pause" property in the future, so at the
|
||||
// CLI level we do respect that property if it is set to "unpaused."
|
||||
if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||
r.QualityMonitors[i].Schedule = nil
|
||||
if q.Schedule != nil && q.Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||
q.Schedule = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Schemas: Prefix
|
||||
for i := range r.Schemas {
|
||||
r.Schemas[i].Name = normalizePrefix(prefix) + r.Schemas[i].Name
|
||||
for key, s := range r.Schemas {
|
||||
if s.CreateSchema == nil {
|
||||
diags = diags.Extend(diag.Errorf("schema %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
s.Name = normalizePrefix(prefix) + s.Name
|
||||
// HTTP API for schemas doesn't yet support tags. It's only supported in
|
||||
// the Databricks UI and via the SQL API.
|
||||
}
|
||||
|
||||
// Clusters: Prefix, Tags
|
||||
for _, c := range r.Clusters {
|
||||
for key, c := range r.Clusters {
|
||||
if c.ClusterSpec == nil {
|
||||
diags = diags.Extend(diag.Errorf("cluster %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
c.ClusterName = prefix + c.ClusterName
|
||||
if c.CustomTags == nil {
|
||||
c.CustomTags = make(map[string]string)
|
||||
|
@ -175,7 +212,7 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return diags
|
||||
}
|
||||
|
||||
func validatePauseStatus(b *bundle.Bundle) diag.Diagnostics {
|
||||
|
|
|
@ -251,3 +251,116 @@ func TestApplyPresetsJobsMaxConcurrentRuns(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsPrefixWithoutJobSettings(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {}, // no jobsettings inside
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
NamePrefix: "prefix-",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
require.ErrorContains(t, diags.Error(), "job job1 is not defined")
|
||||
}
|
||||
|
||||
func TestApplyPresetsResourceNotDefined(t *testing.T) {
|
||||
tests := []struct {
|
||||
resources config.Resources
|
||||
error string
|
||||
}{
|
||||
{
|
||||
resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {}, // no jobsettings inside
|
||||
},
|
||||
},
|
||||
error: "job job1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline1": {}, // no pipelinespec inside
|
||||
},
|
||||
},
|
||||
error: "pipeline pipeline1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"model1": {}, // no model inside
|
||||
},
|
||||
},
|
||||
error: "model model1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"experiment1": {}, // no experiment inside
|
||||
},
|
||||
},
|
||||
error: "experiment experiment1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||
"endpoint1": {}, // no CreateServingEndpoint inside
|
||||
},
|
||||
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||
"model1": {}, // no CreateRegisteredModelRequest inside
|
||||
},
|
||||
},
|
||||
error: "model serving endpoint endpoint1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||
"monitor1": {}, // no CreateMonitor inside
|
||||
},
|
||||
},
|
||||
error: "quality monitor monitor1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Schemas: map[string]*resources.Schema{
|
||||
"schema1": {}, // no CreateSchema inside
|
||||
},
|
||||
},
|
||||
error: "schema schema1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"cluster1": {}, // no ClusterSpec inside
|
||||
},
|
||||
},
|
||||
error: "cluster cluster1 is not defined",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.error, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: tt.resources,
|
||||
Presets: config.Presets{
|
||||
TriggerPauseStatus: config.Paused,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
require.ErrorContains(t, diags.Error(), tt.error)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -41,7 +42,7 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "skip/test7.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRootPath: dir,
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
|
@ -105,8 +106,8 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", []dyn.Location{{File: filepath.Join(dir, "relative", "resource.yml")}})
|
||||
|
||||
m := ExpandPipelineGlobPaths()
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
|
|
|
@ -56,7 +56,7 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
|||
}
|
||||
|
||||
// Compute relative path of the bundle root from the Git repo root.
|
||||
absBundlePath, err := filepath.Abs(b.RootPath)
|
||||
absBundlePath, err := filepath.Abs(b.BundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
package paths
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type jobRewritePattern struct {
|
||||
pattern dyn.Pattern
|
||||
kind PathKind
|
||||
skipRewrite func(string) bool
|
||||
}
|
||||
|
||||
func noSkipRewrite(string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func jobTaskRewritePatterns(base dyn.Pattern) []jobRewritePattern {
|
||||
return []jobRewritePattern{
|
||||
{
|
||||
base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")),
|
||||
PathKindNotebook,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")),
|
||||
PathKindWorkspaceFile,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")),
|
||||
PathKindDirectory,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")),
|
||||
PathKindWorkspaceFile,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")),
|
||||
PathKindLibrary,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")),
|
||||
PathKindLibrary,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
|
||||
PathKindWorkspaceFile,
|
||||
noSkipRewrite,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func jobRewritePatterns() []jobRewritePattern {
|
||||
// Base pattern to match all tasks in all jobs.
|
||||
base := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("tasks"),
|
||||
dyn.AnyIndex(),
|
||||
)
|
||||
|
||||
// Compile list of patterns and their respective rewrite functions.
|
||||
jobEnvironmentsPatterns := []jobRewritePattern{
|
||||
{
|
||||
dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("environments"),
|
||||
dyn.AnyIndex(),
|
||||
dyn.Key("spec"),
|
||||
dyn.Key("dependencies"),
|
||||
dyn.AnyIndex(),
|
||||
),
|
||||
PathKindWithPrefix,
|
||||
func(s string) bool {
|
||||
return !libraries.IsLibraryLocal(s)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
taskPatterns := jobTaskRewritePatterns(base)
|
||||
forEachPatterns := jobTaskRewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task")))
|
||||
allPatterns := append(taskPatterns, jobEnvironmentsPatterns...)
|
||||
allPatterns = append(allPatterns, forEachPatterns...)
|
||||
return allPatterns
|
||||
}
|
||||
|
||||
// VisitJobPaths visits all paths in job resources and applies a function to each path.
|
||||
func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) {
|
||||
var err error
|
||||
var newValue = value
|
||||
|
||||
for _, rewritePattern := range jobRewritePatterns() {
|
||||
newValue, err = dyn.MapByPattern(newValue, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
if rewritePattern.skipRewrite(v.MustString()) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return fn(p, rewritePattern.kind, v)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
}
|
||||
|
||||
return newValue, nil
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
package paths
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVisitJobPaths(t *testing.T) {
|
||||
task0 := jobs.Task{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "abc",
|
||||
},
|
||||
}
|
||||
task1 := jobs.Task{
|
||||
SparkPythonTask: &jobs.SparkPythonTask{
|
||||
PythonFile: "abc",
|
||||
},
|
||||
}
|
||||
task2 := jobs.Task{
|
||||
DbtTask: &jobs.DbtTask{
|
||||
ProjectDirectory: "abc",
|
||||
},
|
||||
}
|
||||
task3 := jobs.Task{
|
||||
SqlTask: &jobs.SqlTask{
|
||||
File: &jobs.SqlTaskFile{
|
||||
Path: "abc",
|
||||
},
|
||||
},
|
||||
}
|
||||
task4 := jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{Whl: "dist/foo.whl"},
|
||||
},
|
||||
}
|
||||
task5 := jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{Jar: "dist/foo.jar"},
|
||||
},
|
||||
}
|
||||
task6 := jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{Requirements: "requirements.txt"},
|
||||
},
|
||||
}
|
||||
|
||||
job0 := &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
task0,
|
||||
task1,
|
||||
task2,
|
||||
task3,
|
||||
task4,
|
||||
task5,
|
||||
task6,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
root := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job0": job0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actual := visitJobPaths(t, root)
|
||||
expected := []dyn.Path{
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[0].notebook_task.notebook_path"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[1].spark_python_task.python_file"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[2].dbt_task.project_directory"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[3].sql_task.file.path"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[4].libraries[0].whl"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[5].libraries[0].jar"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[6].libraries[0].requirements"),
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestVisitJobPaths_environments(t *testing.T) {
|
||||
environment0 := jobs.JobEnvironment{
|
||||
Spec: &compute.Environment{
|
||||
Dependencies: []string{
|
||||
"dist_0/*.whl",
|
||||
"dist_1/*.whl",
|
||||
},
|
||||
},
|
||||
}
|
||||
job0 := &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Environments: []jobs.JobEnvironment{
|
||||
environment0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
root := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job0": job0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actual := visitJobPaths(t, root)
|
||||
expected := []dyn.Path{
|
||||
dyn.MustPathFromString("resources.jobs.job0.environments[0].spec.dependencies[0]"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.environments[0].spec.dependencies[1]"),
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestVisitJobPaths_foreach(t *testing.T) {
|
||||
task0 := jobs.Task{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "abc",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
job0 := &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
task0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
root := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job0": job0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actual := visitJobPaths(t, root)
|
||||
expected := []dyn.Path{
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[0].for_each_task.task.notebook_task.notebook_path"),
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expected, actual)
|
||||
}
|
||||
|
||||
func visitJobPaths(t *testing.T, root config.Root) []dyn.Path {
|
||||
var actual []dyn.Path
|
||||
err := root.Mutate(func(value dyn.Value) (dyn.Value, error) {
|
||||
return VisitJobPaths(value, func(p dyn.Path, kind PathKind, v dyn.Value) (dyn.Value, error) {
|
||||
actual = append(actual, p)
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return actual
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package paths
|
||||
|
||||
import "github.com/databricks/cli/libs/dyn"
|
||||
|
||||
type PathKind int
|
||||
|
||||
const (
|
||||
// PathKindLibrary is a path to a library file
|
||||
PathKindLibrary = iota
|
||||
|
||||
// PathKindNotebook is a path to a notebook file
|
||||
PathKindNotebook
|
||||
|
||||
// PathKindWorkspaceFile is a path to a regular workspace file,
|
||||
// notebooks are not allowed because they are uploaded a special
|
||||
// kind of workspace object.
|
||||
PathKindWorkspaceFile
|
||||
|
||||
// PathKindWithPrefix is a path that starts with './'
|
||||
PathKindWithPrefix
|
||||
|
||||
// PathKindDirectory is a path to directory
|
||||
PathKindDirectory
|
||||
)
|
||||
|
||||
type VisitFunc func(path dyn.Path, kind PathKind, value dyn.Value) (dyn.Value, error)
|
|
@ -108,7 +108,7 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err)
|
||||
}
|
||||
|
||||
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot)
|
||||
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.BundleRootPath, pythonPath, leftRoot)
|
||||
mutateDiags = diags
|
||||
if diags.HasError() {
|
||||
return dyn.InvalidValue, mutateDiagsHasError
|
||||
|
@ -228,12 +228,12 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
|||
return output, pythonDiagnostics
|
||||
}
|
||||
|
||||
const installExplanation = `If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||
const installExplanation = `If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||
and that the wheel is installed in the Python environment:
|
||||
|
||||
$ .venv/bin/pip install -e .
|
||||
|
||||
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||
or activate the environment before running CLI commands:
|
||||
|
||||
experimental:
|
||||
|
|
|
@ -570,12 +570,12 @@ func TestExplainProcessErr(t *testing.T) {
|
|||
|
||||
Explanation: 'databricks-pydabs' library is not installed in the Python environment.
|
||||
|
||||
If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||
If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||
and that the wheel is installed in the Python environment:
|
||||
|
||||
$ .venv/bin/pip install -e .
|
||||
|
||||
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||
or activate the environment before running CLI commands:
|
||||
|
||||
experimental:
|
||||
|
|
|
@ -45,15 +45,15 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc {
|
|||
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
||||
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
|
|
@ -9,12 +9,13 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -33,12 +34,12 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths[0]", "./databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[1]", "./databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.include[0]", "./file.yml")
|
||||
bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[0]", []dyn.Location{{File: "./databricks.yml"}})
|
||||
bundletest.SetLocation(b, "sync.paths[1]", []dyn.Location{{File: "./databricks.yml"}})
|
||||
bundletest.SetLocation(b, "sync.include[0]", []dyn.Location{{File: "./file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.include[1]", []dyn.Location{{File: "./a/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", []dyn.Location{{File: "./a/b/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.exclude[1]", []dyn.Location{{File: "./a/b/c/file.yml"}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
assert.NoError(t, diags.Error())
|
||||
|
@ -53,7 +54,7 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
|
||||
func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/dir",
|
||||
BundleRootPath: "/tmp/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -72,12 +73,12 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths[0]", "/tmp/dir/databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[1]", "/tmp/dir/databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml")
|
||||
bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[0]", []dyn.Location{{File: "/tmp/dir/databricks.yml"}})
|
||||
bundletest.SetLocation(b, "sync.paths[1]", []dyn.Location{{File: "/tmp/dir/databricks.yml"}})
|
||||
bundletest.SetLocation(b, "sync.include[0]", []dyn.Location{{File: "/tmp/dir/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.include[1]", []dyn.Location{{File: "/tmp/dir/a/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", []dyn.Location{{File: "/tmp/dir/a/b/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.exclude[1]", []dyn.Location{{File: "/tmp/dir/a/b/c/file.yml"}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
assert.NoError(t, diags.Error())
|
||||
|
@ -93,7 +94,7 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
||||
t.Run("no sync block", func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
|
@ -102,7 +103,7 @@ func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
|||
|
||||
t.Run("empty include/exclude blocks", func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Include: []string{},
|
||||
|
|
|
@ -15,8 +15,8 @@ import (
|
|||
|
||||
func TestSyncDefaultPath_DefaultIfUnset(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -51,8 +51,8 @@ func TestSyncDefaultPath_SkipIfSet(t *testing.T) {
|
|||
for _, tcase := range tcases {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
}
|
||||
|
||||
diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
|
|
|
@ -57,7 +57,7 @@ func (m *syncInferRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
var diags diag.Diagnostics
|
||||
|
||||
// Use the bundle root path as the starting point for inferring the sync root path.
|
||||
bundleRootPath := filepath.Clean(b.RootPath)
|
||||
bundleRootPath := filepath.Clean(b.BundleRootPath)
|
||||
|
||||
// Infer the sync root path by looking at each one of the sync paths.
|
||||
// Every sync path must be a descendant of the final sync root path.
|
||||
|
|
|
@ -9,13 +9,14 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -46,7 +47,7 @@ func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "./some/dir",
|
||||
BundleRootPath: "./some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -77,7 +78,7 @@ func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -108,7 +109,7 @@ func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||
BundleRootPath: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -145,7 +146,7 @@ func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/bundle/root",
|
||||
BundleRootPath: "/tmp/some/bundle/root",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -172,7 +173,7 @@ func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_Error(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -184,7 +185,7 @@ func TestSyncInferRoot_Error(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths", "databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths", []dyn.Location{{File: "databricks.yml"}})
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
|
|
|
@ -4,97 +4,11 @@ import (
|
|||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/bundle/config/mutator/paths"
|
||||
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type jobRewritePattern struct {
|
||||
pattern dyn.Pattern
|
||||
fn rewriteFunc
|
||||
skipRewrite func(string) bool
|
||||
}
|
||||
|
||||
func noSkipRewrite(string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern {
|
||||
return []jobRewritePattern{
|
||||
{
|
||||
base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")),
|
||||
t.translateNotebookPath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")),
|
||||
t.translateFilePath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")),
|
||||
t.translateDirectoryPath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")),
|
||||
t.translateFilePath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")),
|
||||
t.translateNoOp,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")),
|
||||
t.translateNoOp,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
|
||||
t.translateFilePath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *translateContext) jobRewritePatterns() []jobRewritePattern {
|
||||
// Base pattern to match all tasks in all jobs.
|
||||
base := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("tasks"),
|
||||
dyn.AnyIndex(),
|
||||
)
|
||||
|
||||
// Compile list of patterns and their respective rewrite functions.
|
||||
jobEnvironmentsPatterns := []jobRewritePattern{
|
||||
{
|
||||
dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("environments"),
|
||||
dyn.AnyIndex(),
|
||||
dyn.Key("spec"),
|
||||
dyn.Key("dependencies"),
|
||||
dyn.AnyIndex(),
|
||||
),
|
||||
t.translateNoOpWithPrefix,
|
||||
func(s string) bool {
|
||||
return !libraries.IsLibraryLocal(s)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
taskPatterns := rewritePatterns(t, base)
|
||||
forEachPatterns := rewritePatterns(t, base.Append(dyn.Key("for_each_task"), dyn.Key("task")))
|
||||
allPatterns := append(taskPatterns, jobEnvironmentsPatterns...)
|
||||
allPatterns = append(allPatterns, forEachPatterns...)
|
||||
return allPatterns
|
||||
}
|
||||
|
||||
func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
|
||||
|
@ -111,30 +25,41 @@ func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error)
|
|||
}
|
||||
}
|
||||
|
||||
for _, rewritePattern := range t.jobRewritePatterns() {
|
||||
v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
key := p[2].Key()
|
||||
return paths.VisitJobPaths(v, func(p dyn.Path, kind paths.PathKind, v dyn.Value) (dyn.Value, error) {
|
||||
key := p[2].Key()
|
||||
|
||||
// Skip path translation if the job is using git source.
|
||||
if slices.Contains(ignore, key) {
|
||||
return v, nil
|
||||
}
|
||||
// Skip path translation if the job is using git source.
|
||||
if slices.Contains(ignore, key) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
dir, err := v.Location().Directory()
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||
}
|
||||
dir, err := v.Location().Directory()
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||
}
|
||||
|
||||
sv := v.MustString()
|
||||
if rewritePattern.skipRewrite(sv) {
|
||||
return v, nil
|
||||
}
|
||||
return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key])
|
||||
})
|
||||
rewritePatternFn, err := t.getRewritePatternFn(kind)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
||||
return t.rewriteRelativeTo(p, v, rewritePatternFn, dir, fallback[key])
|
||||
})
|
||||
}
|
||||
|
||||
func (t *translateContext) getRewritePatternFn(kind paths.PathKind) (rewriteFunc, error) {
|
||||
switch kind {
|
||||
case paths.PathKindLibrary:
|
||||
return t.translateNoOp, nil
|
||||
case paths.PathKindNotebook:
|
||||
return t.translateNotebookPath, nil
|
||||
case paths.PathKindWorkspaceFile:
|
||||
return t.translateFilePath, nil
|
||||
case paths.PathKindDirectory:
|
||||
return t.translateDirectoryPath, nil
|
||||
case paths.PathKindWithPrefix:
|
||||
return t.translateNoOpWithPrefix, nil
|
||||
}
|
||||
|
||||
return v, nil
|
||||
return nil, fmt.Errorf("unsupported path kind: %d", kind)
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -210,7 +210,7 @@ func TestTranslatePaths(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -346,8 +346,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
||||
bundletest.SetLocation(b, "resources.pipelines", filepath.Join(dir, "pipeline/resource.yml"))
|
||||
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
|
||||
bundletest.SetLocation(b, "resources.pipelines", []dyn.Location{{File: filepath.Join(dir, "pipeline/resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -408,7 +408,7 @@ func TestTranslatePathsOutsideSyncRoot(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "../resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), "is not contained in sync root path")
|
||||
|
@ -439,7 +439,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
||||
|
@ -470,7 +470,7 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
||||
|
@ -501,7 +501,7 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
||||
|
@ -532,7 +532,7 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
||||
|
@ -567,7 +567,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`)
|
||||
|
@ -602,7 +602,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`)
|
||||
|
@ -637,7 +637,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`)
|
||||
|
@ -672,7 +672,7 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`)
|
||||
|
@ -710,7 +710,7 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
||||
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -753,8 +753,8 @@ func TestTranslatePathWithComplexVariables(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "variables", filepath.Join(dir, "variables/variables.yml"))
|
||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
||||
bundletest.SetLocation(b, "variables", []dyn.Location{{File: filepath.Join(dir, "variables/variables.yml")}})
|
||||
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
|
||||
|
||||
ctx := context.Background()
|
||||
// Assign the variables to the dynamic configuration.
|
||||
|
|
|
@ -406,7 +406,14 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
|||
return r.updateWithDynamicValue(root)
|
||||
}
|
||||
|
||||
var variableKeywords = []string{"default", "lookup"}
|
||||
var allowedVariableDefinitions = []([]string){
|
||||
{"default", "type", "description"},
|
||||
{"default", "type"},
|
||||
{"default", "description"},
|
||||
{"lookup", "description"},
|
||||
{"default"},
|
||||
{"lookup"},
|
||||
}
|
||||
|
||||
// isFullVariableOverrideDef checks if the given value is a full syntax varaible override.
|
||||
// A full syntax variable override is a map with either 1 of 2 keys.
|
||||
|
@ -418,26 +425,26 @@ func isFullVariableOverrideDef(v dyn.Value) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// If the map has more than 2 keys, it is not a full variable override.
|
||||
if mv.Len() > 2 {
|
||||
// If the map has more than 3 keys, it is not a full variable override.
|
||||
if mv.Len() > 3 {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the map has 2 keys, one of them should be "default" and the other is "type"
|
||||
if mv.Len() == 2 {
|
||||
if _, ok := mv.GetByString("type"); !ok {
|
||||
return false
|
||||
for _, keys := range allowedVariableDefinitions {
|
||||
if len(keys) != mv.Len() {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := mv.GetByString("default"); !ok {
|
||||
return false
|
||||
// Check if the keys are the same.
|
||||
match := true
|
||||
for _, key := range keys {
|
||||
if _, ok := mv.GetByString(key); !ok {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for _, keyword := range variableKeywords {
|
||||
if _, ok := mv.GetByString(keyword); ok {
|
||||
if match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -169,3 +170,87 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) {
|
|||
assert.Equal(t, "complex var", root.Variables["complex"].Description)
|
||||
|
||||
}
|
||||
|
||||
func TestIsFullVariableOverrideDef(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value dyn.Value
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"default": dyn.V("foo"),
|
||||
"description": dyn.V("foo var"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"lookup": dyn.V("foo"),
|
||||
"description": dyn.V("foo var"),
|
||||
}),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"default": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"lookup": dyn.V("foo"),
|
||||
}),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"description": dyn.V("string"),
|
||||
"default": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"description": dyn.V("string"),
|
||||
"lookup": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"default": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"lookup": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
}),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"default": dyn.V("foo"),
|
||||
"description": dyn.V("foo var"),
|
||||
"lookup": dyn.V("foo"),
|
||||
}),
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
assert.Equal(t, tc.expected, isFullVariableOverrideDef(tc.value), "test case %d", i)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
)
|
||||
|
||||
// JobTaskClusterSpec validates that job tasks have cluster spec defined
|
||||
// if task requires a cluster
|
||||
func JobTaskClusterSpec() bundle.ReadOnlyMutator {
|
||||
return &jobTaskClusterSpec{}
|
||||
}
|
||||
|
||||
type jobTaskClusterSpec struct {
|
||||
}
|
||||
|
||||
func (v *jobTaskClusterSpec) Name() string {
|
||||
return "validate:job_task_cluster_spec"
|
||||
}
|
||||
|
||||
func (v *jobTaskClusterSpec) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs"))
|
||||
|
||||
for resourceName, job := range rb.Config().Resources.Jobs {
|
||||
resourcePath := jobsPath.Append(dyn.Key(resourceName))
|
||||
|
||||
for taskIndex, task := range job.Tasks {
|
||||
taskPath := resourcePath.Append(dyn.Key("tasks"), dyn.Index(taskIndex))
|
||||
|
||||
diags = diags.Extend(validateJobTask(rb, task, taskPath))
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func validateJobTask(rb bundle.ReadOnlyBundle, task jobs.Task, taskPath dyn.Path) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
var specified []string
|
||||
var unspecified []string
|
||||
|
||||
if task.JobClusterKey != "" {
|
||||
specified = append(specified, "job_cluster_key")
|
||||
} else {
|
||||
unspecified = append(unspecified, "job_cluster_key")
|
||||
}
|
||||
|
||||
if task.EnvironmentKey != "" {
|
||||
specified = append(specified, "environment_key")
|
||||
} else {
|
||||
unspecified = append(unspecified, "environment_key")
|
||||
}
|
||||
|
||||
if task.ExistingClusterId != "" {
|
||||
specified = append(specified, "existing_cluster_id")
|
||||
} else {
|
||||
unspecified = append(unspecified, "existing_cluster_id")
|
||||
}
|
||||
|
||||
if task.NewCluster != nil {
|
||||
specified = append(specified, "new_cluster")
|
||||
} else {
|
||||
unspecified = append(unspecified, "new_cluster")
|
||||
}
|
||||
|
||||
if task.ForEachTask != nil {
|
||||
forEachTaskPath := taskPath.Append(dyn.Key("for_each_task"), dyn.Key("task"))
|
||||
|
||||
diags = diags.Extend(validateJobTask(rb, task.ForEachTask.Task, forEachTaskPath))
|
||||
}
|
||||
|
||||
if isComputeTask(task) && len(specified) == 0 {
|
||||
if task.NotebookTask != nil {
|
||||
// notebook tasks without cluster spec will use notebook environment
|
||||
} else {
|
||||
// path might be not very helpful, adding user-specified task key clarifies the context
|
||||
detail := fmt.Sprintf(
|
||||
"Task %q requires a cluster or an environment to run.\nSpecify one of the following fields: %s.",
|
||||
task.TaskKey,
|
||||
strings.Join(unspecified, ", "),
|
||||
)
|
||||
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: "Missing required cluster or environment settings",
|
||||
Detail: detail,
|
||||
Locations: rb.Config().GetLocations(taskPath.String()),
|
||||
Paths: []dyn.Path{taskPath},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// isComputeTask returns true if the task runs on a cluster or serverless GC
|
||||
func isComputeTask(task jobs.Task) bool {
|
||||
if task.NotebookTask != nil {
|
||||
// if warehouse_id is set, it's SQL notebook that doesn't need cluster or serverless GC
|
||||
if task.NotebookTask.WarehouseId != "" {
|
||||
return false
|
||||
} else {
|
||||
// task settings don't require specifying a cluster/serverless GC, but task itself can run on one
|
||||
// we handle that case separately in validateJobTask
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if task.PythonWheelTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.DbtTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.SparkJarTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.SparkSubmitTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.SparkPythonTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.SqlTask != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if task.PipelineTask != nil {
|
||||
// while pipelines use clusters, pipeline tasks don't, they only trigger pipelines
|
||||
return false
|
||||
}
|
||||
|
||||
if task.RunJobTask != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if task.ConditionTask != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// for each task doesn't use clusters, underlying task(s) can though
|
||||
if task.ForEachTask != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,203 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestJobTaskClusterSpec(t *testing.T) {
|
||||
expectedSummary := "Missing required cluster or environment settings"
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
task jobs.Task
|
||||
errorPath string
|
||||
errorDetail string
|
||||
errorSummary string
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "valid notebook task",
|
||||
task: jobs.Task{
|
||||
// while a cluster is needed, it will use notebook environment to create one
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid notebook task (job_cluster_key)",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid notebook task (new_cluster)",
|
||||
task: jobs.Task{
|
||||
NewCluster: &compute.ClusterSpec{},
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid notebook task (existing_cluster_id)",
|
||||
task: jobs.Task{
|
||||
ExistingClusterId: "cluster1",
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid SQL notebook task",
|
||||
task: jobs.Task{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
WarehouseId: "warehouse1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid python wheel task",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid python wheel task (environment_key)",
|
||||
task: jobs.Task{
|
||||
EnvironmentKey: "environment1",
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid dbt task",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
DbtTask: &jobs.DbtTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid spark jar task",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
SparkJarTask: &jobs.SparkJarTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid spark submit",
|
||||
task: jobs.Task{
|
||||
NewCluster: &compute.ClusterSpec{},
|
||||
SparkSubmitTask: &jobs.SparkSubmitTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid spark python task",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
SparkPythonTask: &jobs.SparkPythonTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid SQL task",
|
||||
task: jobs.Task{
|
||||
SqlTask: &jobs.SqlTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid pipeline task",
|
||||
task: jobs.Task{
|
||||
PipelineTask: &jobs.PipelineTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid run job task",
|
||||
task: jobs.Task{
|
||||
RunJobTask: &jobs.RunJobTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid condition task",
|
||||
task: jobs.Task{
|
||||
ConditionTask: &jobs.ConditionTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid for each task",
|
||||
task: jobs.Task{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid python wheel task",
|
||||
task: jobs.Task{
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
TaskKey: "my_task",
|
||||
},
|
||||
errorPath: "resources.jobs.job1.tasks[0]",
|
||||
errorDetail: `Task "my_task" requires a cluster or an environment to run.
|
||||
Specify one of the following fields: job_cluster_key, environment_key, existing_cluster_id, new_cluster.`,
|
||||
errorSummary: expectedSummary,
|
||||
},
|
||||
{
|
||||
name: "invalid for each task",
|
||||
task: jobs.Task{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
TaskKey: "my_task",
|
||||
},
|
||||
},
|
||||
},
|
||||
errorPath: "resources.jobs.job1.tasks[0].for_each_task.task",
|
||||
errorDetail: `Task "my_task" requires a cluster or an environment to run.
|
||||
Specify one of the following fields: job_cluster_key, environment_key, existing_cluster_id, new_cluster.`,
|
||||
errorSummary: expectedSummary,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
job := &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{tc.task},
|
||||
},
|
||||
}
|
||||
|
||||
b := createBundle(map[string]*resources.Job{"job1": job})
|
||||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobTaskClusterSpec())
|
||||
|
||||
if tc.errorPath != "" || tc.errorDetail != "" || tc.errorSummary != "" {
|
||||
assert.Len(t, diags, 1)
|
||||
assert.Len(t, diags[0].Paths, 1)
|
||||
|
||||
diag := diags[0]
|
||||
|
||||
assert.Equal(t, tc.errorPath, diag.Paths[0].String())
|
||||
assert.Equal(t, tc.errorSummary, diag.Summary)
|
||||
assert.Equal(t, tc.errorDetail, diag.Detail)
|
||||
} else {
|
||||
assert.ElementsMatch(t, []string{}, diags)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createBundle(jobs map[string]*resources.Job) *bundle.Bundle {
|
||||
return &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: jobs,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -34,6 +34,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
JobClusterKeyDefined(),
|
||||
FilesToSync(),
|
||||
ValidateSyncPatterns(),
|
||||
JobTaskClusterSpec(),
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
// Compute config file path the job is defined in, relative to the bundle
|
||||
// root
|
||||
l := b.Config.GetLocation("resources.jobs." + name)
|
||||
relativePath, err := filepath.Rel(b.RootPath, l.File)
|
||||
relativePath, err := filepath.Rel(b.BundleRootPath, l.File)
|
||||
if err != nil {
|
||||
return diag.Errorf("failed to compute relative path for job %s: %v", name, err)
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/bundle/metadata"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -55,9 +56,9 @@ func TestComputeMetadataMutator(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.jobs.my-job-1", "a/b/c")
|
||||
bundletest.SetLocation(b, "resources.jobs.my-job-2", "d/e/f")
|
||||
bundletest.SetLocation(b, "resources.pipelines.my-pipeline", "abc")
|
||||
bundletest.SetLocation(b, "resources.jobs.my-job-1", []dyn.Location{{File: "a/b/c"}})
|
||||
bundletest.SetLocation(b, "resources.jobs.my-job-2", []dyn.Location{{File: "d/e/f"}})
|
||||
bundletest.SetLocation(b, "resources.pipelines.my-pipeline", []dyn.Location{{File: "abc"}})
|
||||
|
||||
expectedMetadata := metadata.Metadata{
|
||||
Version: metadata.Version,
|
||||
|
|
|
@ -62,8 +62,8 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
|||
|
||||
tmpDir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRoot: vfs.MustNew(tmpDir),
|
||||
BundleRootPath: tmpDir,
|
||||
BundleRoot: vfs.MustNew(tmpDir),
|
||||
|
||||
SyncRootPath: tmpDir,
|
||||
SyncRoot: vfs.MustNew(tmpDir),
|
||||
|
@ -259,7 +259,7 @@ func TestStatePullNoState(t *testing.T) {
|
|||
}}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
@ -447,7 +447,7 @@ func TestStatePullNewerDeploymentStateVersion(t *testing.T) {
|
|||
}}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -45,7 +45,7 @@ func TestStatePush(t *testing.T) {
|
|||
}}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -27,7 +27,7 @@ func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle {
|
|||
require.NoError(t, err)
|
||||
|
||||
return &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
|
@ -82,6 +83,10 @@ func BundleToTerraform(config *config.Root) *schema.Root {
|
|||
conv(src, &dst)
|
||||
|
||||
if src.JobSettings != nil {
|
||||
sort.Slice(src.JobSettings.Tasks, func(i, j int) bool {
|
||||
return src.JobSettings.Tasks[i].TaskKey < src.JobSettings.Tasks[j].TaskKey
|
||||
})
|
||||
|
||||
for _, v := range src.Tasks {
|
||||
var t schema.ResourceJobTask
|
||||
conv(v, &t)
|
||||
|
|
|
@ -33,7 +33,7 @@ func TestInitEnvironmentVariables(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -60,7 +60,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -88,7 +88,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirNotSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -114,7 +114,7 @@ func TestSetTempDirEnvVarsForWindowWithAllTmpDirEnvVarsSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -144,7 +144,7 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -174,7 +174,7 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -202,7 +202,7 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) {
|
|||
|
||||
func TestSetProxyEnvVars(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -250,7 +250,7 @@ func TestSetProxyEnvVars(t *testing.T) {
|
|||
|
||||
func TestSetUserAgentExtraEnvVar(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Experimental: &config.Experimental{
|
||||
PyDABs: config.PyDABs{
|
||||
|
@ -333,7 +333,7 @@ func TestFindExecPathFromEnvironmentWithWrongVersion(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
m := &initialize{}
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -357,7 +357,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndNoBinary(t *testing.T)
|
|||
ctx := context.Background()
|
||||
m := &initialize{}
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -380,7 +380,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndBinary(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
m := &initialize{}
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestLoadWithNoState(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
|
|
@ -32,7 +32,7 @@ func mockStateFilerForPull(t *testing.T, contents map[string]any, merr error) fi
|
|||
|
||||
func statePullTestBundle(t *testing.T) *bundle.Bundle {
|
||||
return &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -29,7 +29,7 @@ func mockStateFilerForPush(t *testing.T, fn func(body io.Reader)) filer.Filer {
|
|||
|
||||
func statePushTestBundle(t *testing.T) *bundle.Bundle {
|
||||
return &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -3,6 +3,7 @@ package tfdyn
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
|
@ -19,8 +20,38 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
|
|||
log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary)
|
||||
}
|
||||
|
||||
// Sort the tasks of each job in the bundle by task key. Sorting
|
||||
// the task keys ensures that the diff computed by terraform is correct and avoids
|
||||
// recreates. For more details see the NOTE at
|
||||
// https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/job#example-usage
|
||||
// and https://github.com/databricks/terraform-provider-databricks/issues/4011
|
||||
// and https://github.com/databricks/cli/pull/1776
|
||||
vout := vin
|
||||
var err error
|
||||
tasks, ok := vin.Get("tasks").AsSequence()
|
||||
if ok {
|
||||
sort.Slice(tasks, func(i, j int) bool {
|
||||
// We sort the tasks by their task key. Tasks without task keys are ordered
|
||||
// before tasks with task keys. We do not error for those tasks
|
||||
// since presence of a task_key is validated for in the Jobs backend.
|
||||
tk1, ok := tasks[i].Get("task_key").AsString()
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
tk2, ok := tasks[j].Get("task_key").AsString()
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return tk1 < tk2
|
||||
})
|
||||
vout, err = dyn.Set(vin, "tasks", dyn.V(tasks))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
}
|
||||
|
||||
// Modify top-level keys.
|
||||
vout, err := renameKeys(vin, map[string]string{
|
||||
vout, err = renameKeys(vout, map[string]string{
|
||||
"tasks": "task",
|
||||
"job_clusters": "job_cluster",
|
||||
"parameters": "parameter",
|
||||
|
|
|
@ -42,8 +42,8 @@ func TestConvertJob(t *testing.T) {
|
|||
},
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
TaskKey: "task_key",
|
||||
JobClusterKey: "job_cluster_key",
|
||||
TaskKey: "task_key_b",
|
||||
JobClusterKey: "job_cluster_key_b",
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Pypi: &compute.PythonPyPiLibrary{
|
||||
|
@ -55,6 +55,17 @@ func TestConvertJob(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
TaskKey: "task_key_a",
|
||||
JobClusterKey: "job_cluster_key_a",
|
||||
},
|
||||
{
|
||||
TaskKey: "task_key_c",
|
||||
JobClusterKey: "job_cluster_key_c",
|
||||
},
|
||||
{
|
||||
Description: "missing task key 😱",
|
||||
},
|
||||
},
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
|
@ -100,8 +111,15 @@ func TestConvertJob(t *testing.T) {
|
|||
},
|
||||
"task": []any{
|
||||
map[string]any{
|
||||
"task_key": "task_key",
|
||||
"job_cluster_key": "job_cluster_key",
|
||||
"description": "missing task key 😱",
|
||||
},
|
||||
map[string]any{
|
||||
"task_key": "task_key_a",
|
||||
"job_cluster_key": "job_cluster_key_a",
|
||||
},
|
||||
map[string]any{
|
||||
"task_key": "task_key_b",
|
||||
"job_cluster_key": "job_cluster_key_b",
|
||||
"library": []any{
|
||||
map[string]any{
|
||||
"pypi": map[string]any{
|
||||
|
@ -113,6 +131,10 @@ func TestConvertJob(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"task_key": "task_key_c",
|
||||
"job_cluster_key": "job_cluster_key_c",
|
||||
},
|
||||
},
|
||||
}, out.Job["my_job"])
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
func TestParseResourcesStateWithNoFile(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -31,7 +31,7 @@ func TestParseResourcesStateWithNoFile(t *testing.T) {
|
|||
func TestParseResourcesStateWithExistingStateFile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
|
|
@ -8,15 +8,13 @@ import (
|
|||
// SetLocation sets the location of all values in the bundle to the given path.
|
||||
// This is useful for testing where we need to associate configuration
|
||||
// with the path it is loaded from.
|
||||
func SetLocation(b *bundle.Bundle, prefix string, filePath string) {
|
||||
func SetLocation(b *bundle.Bundle, prefix string, locations []dyn.Location) {
|
||||
start := dyn.MustPathFromString(prefix)
|
||||
b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
// If the path has the given prefix, set the location.
|
||||
if p.HasPrefix(start) {
|
||||
return v.WithLocations([]dyn.Location{{
|
||||
File: filePath,
|
||||
}}), nil
|
||||
return v.WithLocations(locations), nil
|
||||
}
|
||||
|
||||
// The path is not nested under the given prefix.
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -61,7 +62,7 @@ func TestGlobReferencesExpandedForTaskLibraries(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Empty(t, diags)
|
||||
|
@ -146,7 +147,7 @@ func TestGlobReferencesExpandedForForeachTaskLibraries(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Empty(t, diags)
|
||||
|
@ -221,7 +222,7 @@ func TestGlobReferencesExpandedForEnvironmentsDeps(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Empty(t, diags)
|
||||
|
|
|
@ -15,8 +15,8 @@ import (
|
|||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/bundle/python"
|
||||
"github.com/databricks/cli/bundle/scripts"
|
||||
"github.com/databricks/cli/bundle/trampoline"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
terraformlib "github.com/databricks/cli/libs/terraform"
|
||||
|
@ -157,7 +157,7 @@ func Deploy(outputHandler sync.OutputHandler) bundle.Mutator {
|
|||
artifacts.CleanUp(),
|
||||
libraries.ExpandGlobReferences(),
|
||||
libraries.Upload(),
|
||||
python.TransformWheelTask(),
|
||||
trampoline.TransformWheelTask(),
|
||||
files.Upload(outputHandler),
|
||||
deploy.StateUpdate(),
|
||||
deploy.StatePush(),
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
"github.com/databricks/cli/bundle/deploy/metadata"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/bundle/python"
|
||||
"github.com/databricks/cli/bundle/scripts"
|
||||
"github.com/databricks/cli/bundle/trampoline"
|
||||
)
|
||||
|
||||
// The initialize phase fills in defaults and connects to the workspace.
|
||||
|
@ -66,7 +66,7 @@ func Initialize() bundle.Mutator {
|
|||
mutator.ConfigureWSFS(),
|
||||
|
||||
mutator.TranslatePaths(),
|
||||
python.WrapperWarning(),
|
||||
trampoline.WrapperWarning(),
|
||||
permissions.ApplyBundlePermissions(),
|
||||
permissions.FilterCurrentUser(),
|
||||
metadata.AnnotateJobs(),
|
||||
|
|
|
@ -148,7 +148,7 @@ func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics)
|
|||
|
||||
// Make location relative to bundle root
|
||||
if d.Locations[i].File != "" {
|
||||
out, err := filepath.Rel(b.RootPath, d.Locations[i].File)
|
||||
out, err := filepath.Rel(b.BundleRootPath, d.Locations[i].File)
|
||||
// if we can't relativize the path, just use path as-is
|
||||
if err == nil {
|
||||
d.Locations[i].File = out
|
||||
|
|
|
@ -59,6 +59,127 @@
|
|||
"cli": {
|
||||
"bundle": {
|
||||
"config": {
|
||||
"resources.Cluster": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apply_policy_default_values": {
|
||||
"description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied.",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"autoscale": {
|
||||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.AutoScale"
|
||||
},
|
||||
"autotermination_minutes": {
|
||||
"description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination.",
|
||||
"$ref": "#/$defs/int"
|
||||
},
|
||||
"aws_attributes": {
|
||||
"description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes"
|
||||
},
|
||||
"azure_attributes": {
|
||||
"description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.AzureAttributes"
|
||||
},
|
||||
"cluster_log_conf": {
|
||||
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.ClusterLogConf"
|
||||
},
|
||||
"cluster_name": {
|
||||
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"custom_tags": {
|
||||
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
||||
"$ref": "#/$defs/map/string"
|
||||
},
|
||||
"data_security_mode": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode"
|
||||
},
|
||||
"docker_image": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.DockerImage"
|
||||
},
|
||||
"driver_instance_pool_id": {
|
||||
"description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"driver_node_type_id": {
|
||||
"description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"enable_elastic_disk": {
|
||||
"description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details.",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"enable_local_disk_encryption": {
|
||||
"description": "Whether to enable LUKS on cluster VMs' local disks",
|
||||
"$ref": "#/$defs/bool"
|
||||
},
|
||||
"gcp_attributes": {
|
||||
"description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes"
|
||||
},
|
||||
"init_scripts": {
|
||||
"description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.",
|
||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/compute.InitScriptInfo"
|
||||
},
|
||||
"instance_pool_id": {
|
||||
"description": "The optional ID of the instance pool to which the cluster belongs.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"node_type_id": {
|
||||
"description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"num_workers": {
|
||||
"description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned.",
|
||||
"$ref": "#/$defs/int"
|
||||
},
|
||||
"permissions": {
|
||||
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||
},
|
||||
"policy_id": {
|
||||
"description": "The ID of the cluster policy used to create the cluster if applicable.",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"runtime_engine": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.RuntimeEngine"
|
||||
},
|
||||
"single_user_name": {
|
||||
"description": "Single user name if data_security_mode is `SINGLE_USER`",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"spark_conf": {
|
||||
"description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n",
|
||||
"$ref": "#/$defs/map/string"
|
||||
},
|
||||
"spark_env_vars": {
|
||||
"description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`",
|
||||
"$ref": "#/$defs/map/string"
|
||||
},
|
||||
"spark_version": {
|
||||
"description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"ssh_public_keys": {
|
||||
"description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.",
|
||||
"$ref": "#/$defs/slice/string"
|
||||
},
|
||||
"workload_type": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"resources.Grant": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
@ -109,7 +230,7 @@
|
|||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobEmailNotifications"
|
||||
},
|
||||
"environments": {
|
||||
"description": "A list of task execution environment specifications that can be referenced by tasks of this job.",
|
||||
"description": "A list of task execution environment specifications that can be referenced by serverless tasks of this job.\nAn environment is required to be present for serverless tasks.\nFor serverless notebook tasks, the environment is accessible in the notebook environment panel.\nFor other serverless tasks, the task environment is required to be specified using environment_key in the task settings.",
|
||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment"
|
||||
},
|
||||
"format": {
|
||||
|
@ -293,7 +414,7 @@
|
|||
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||
},
|
||||
"rate_limits": {
|
||||
"description": "Rate limits to be applied to the serving endpoint. NOTE: only external and foundation model endpoints are supported as of now.",
|
||||
"description": "Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits.",
|
||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/serving.RateLimit"
|
||||
},
|
||||
"route_optimized": {
|
||||
|
@ -747,6 +868,9 @@
|
|||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cluster_id": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"compute_id": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
|
@ -923,6 +1047,9 @@
|
|||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"clusters": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster"
|
||||
},
|
||||
"experiments": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment"
|
||||
},
|
||||
|
@ -990,6 +1117,9 @@
|
|||
"bundle": {
|
||||
"$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle"
|
||||
},
|
||||
"cluster_id": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"compute_id": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
|
@ -2028,7 +2158,7 @@
|
|||
},
|
||||
"compute.RuntimeEngine": {
|
||||
"type": "string",
|
||||
"description": "Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime\nengine is inferred from spark_version.",
|
||||
"description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that contain `-photon-`.\nRemove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the spark_version\ncontains -photon-, in which case Photon will be used.\n",
|
||||
"enum": [
|
||||
"NULL",
|
||||
"STANDARD",
|
||||
|
@ -2610,7 +2740,7 @@
|
|||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"description": "Write-only setting, available only in Create/Update/Reset and Submit calls. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nOnly `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.",
|
||||
"description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nExactly one of `user_name`, `service_principal_name`, `group_name` should be specified. If not, an error is thrown.",
|
||||
"properties": {
|
||||
"service_principal_name": {
|
||||
"description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.",
|
||||
|
@ -4904,6 +5034,20 @@
|
|||
"cli": {
|
||||
"bundle": {
|
||||
"config": {
|
||||
"resources.Cluster": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Cluster"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"resources.Job": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
|
|
@ -30,7 +30,7 @@ func (m *script) Name() string {
|
|||
}
|
||||
|
||||
func (m *script) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
executor, err := exec.NewCommandExecutor(b.RootPath)
|
||||
executor, err := exec.NewCommandExecutor(b.BundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ func TestExecutesHook(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
executor, err := exec.NewCommandExecutor(b.RootPath)
|
||||
executor, err := exec.NewCommandExecutor(b.BundleRootPath)
|
||||
require.NoError(t, err)
|
||||
_, out, err := executeHook(context.Background(), executor, b, config.ScriptPreBuild)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package python
|
||||
package trampoline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -17,8 +17,8 @@ func TestNoTransformByDefault(t *testing.T) {
|
|||
tmpDir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
BundleRootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "development",
|
||||
|
@ -66,8 +66,8 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) {
|
|||
tmpDir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
BundleRootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "development",
|
|
@ -1,4 +1,4 @@
|
|||
package python
|
||||
package trampoline
|
||||
|
||||
import (
|
||||
"context"
|
|
@ -1,4 +1,4 @@
|
|||
package python
|
||||
package trampoline
|
||||
|
||||
import (
|
||||
"context"
|
|
@ -1,4 +1,4 @@
|
|||
package python
|
||||
package trampoline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -69,7 +69,7 @@ func TransformWheelTask() bundle.Mutator {
|
|||
res := b.Config.Experimental != nil && b.Config.Experimental.PythonWheelWrapper
|
||||
return res, nil
|
||||
},
|
||||
mutator.NewTrampoline(
|
||||
NewTrampoline(
|
||||
"python_wheel",
|
||||
&pythonTrampoline{},
|
||||
NOTEBOOK_TEMPLATE,
|
||||
|
@ -94,9 +94,9 @@ func (t *pythonTrampoline) CleanUp(task *jobs.Task) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey {
|
||||
func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []TaskWithJobKey {
|
||||
r := b.Config.Resources
|
||||
result := make([]mutator.TaskWithJobKey, 0)
|
||||
result := make([]TaskWithJobKey, 0)
|
||||
for k := range b.Config.Resources.Jobs {
|
||||
tasks := r.Jobs[k].JobSettings.Tasks
|
||||
for i := range tasks {
|
||||
|
@ -110,7 +110,7 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey {
|
|||
continue
|
||||
}
|
||||
|
||||
result = append(result, mutator.TaskWithJobKey{
|
||||
result = append(result, TaskWithJobKey{
|
||||
JobKey: k,
|
||||
Task: task,
|
||||
})
|
|
@ -1,4 +1,4 @@
|
|||
package python
|
||||
package trampoline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -115,7 +115,7 @@ func TestTransformFiltersWheelTasksOnly(t *testing.T) {
|
|||
func TestNoPanicWithNoPythonWheelTasks(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "development",
|
|
@ -1,4 +1,4 @@
|
|||
package mutator
|
||||
package trampoline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -23,6 +23,7 @@ type TrampolineFunctions interface {
|
|||
GetTasks(b *bundle.Bundle) []TaskWithJobKey
|
||||
CleanUp(task *jobs.Task) error
|
||||
}
|
||||
|
||||
type trampoline struct {
|
||||
name string
|
||||
functions TrampolineFunctions
|
|
@ -1,4 +1,4 @@
|
|||
package mutator
|
||||
package trampoline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -56,8 +56,8 @@ func TestGenerateTrampoline(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
BundleRootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/Workspace/files",
|
|
@ -0,0 +1,215 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package disable_legacy_features
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/settings"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "disable-legacy-features",
|
||||
Short: `Disable legacy features for new Databricks workspaces.`,
|
||||
Long: `Disable legacy features for new Databricks workspaces.
|
||||
|
||||
For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2.
|
||||
Hive Metastore will not be provisioned. 3. Disables the use of ‘No-isolation
|
||||
clusters’. 4. Disables Databricks Runtime versions prior to 13.3LTS.`,
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newDelete())
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start delete command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var deleteOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.DeleteDisableLegacyFeaturesRequest,
|
||||
)
|
||||
|
||||
func newDelete() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var deleteReq settings.DeleteDisableLegacyFeaturesRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "delete"
|
||||
cmd.Short = `Delete the disable legacy features setting.`
|
||||
cmd.Long = `Delete the disable legacy features setting.
|
||||
|
||||
Deletes the disable legacy features setting.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
response, err := a.Settings.DisableLegacyFeatures().Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range deleteOverrides {
|
||||
fn(cmd, &deleteReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.GetDisableLegacyFeaturesRequest,
|
||||
)
|
||||
|
||||
func newGet() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getReq settings.GetDisableLegacyFeaturesRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "get"
|
||||
cmd.Short = `Get the disable legacy features setting.`
|
||||
cmd.Long = `Get the disable legacy features setting.
|
||||
|
||||
Gets the value of the disable legacy features setting.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
response, err := a.Settings.DisableLegacyFeatures().Get(ctx, getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getOverrides {
|
||||
fn(cmd, &getReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.UpdateDisableLegacyFeaturesRequest,
|
||||
)
|
||||
|
||||
func newUpdate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq settings.UpdateDisableLegacyFeaturesRequest
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "update"
|
||||
cmd.Short = `Update the disable legacy features setting.`
|
||||
cmd.Long = `Update the disable legacy features setting.
|
||||
|
||||
Updates the value of the disable legacy features setting.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustAccountClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = updateJson.Unmarshal(&updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := a.Settings.DisableLegacyFeatures().Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateOverrides {
|
||||
fn(cmd, &updateReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service DisableLegacyFeatures
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
csp_enablement_account "github.com/databricks/cli/cmd/account/csp-enablement-account"
|
||||
disable_legacy_features "github.com/databricks/cli/cmd/account/disable-legacy-features"
|
||||
esm_enablement_account "github.com/databricks/cli/cmd/account/esm-enablement-account"
|
||||
personal_compute "github.com/databricks/cli/cmd/account/personal-compute"
|
||||
)
|
||||
|
@ -27,6 +28,7 @@ func New() *cobra.Command {
|
|||
|
||||
// Add subservices
|
||||
cmd.AddCommand(csp_enablement_account.New())
|
||||
cmd.AddCommand(disable_legacy_features.New())
|
||||
cmd.AddCommand(esm_enablement_account.New())
|
||||
cmd.AddCommand(personal_compute.New())
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ func TestGeneratePipelineCommand(t *testing.T) {
|
|||
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: root,
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
|
@ -122,7 +122,7 @@ func TestGenerateJobCommand(t *testing.T) {
|
|||
|
||||
root := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: root,
|
||||
BundleRootPath: root,
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
|
|
|
@ -17,10 +17,10 @@ import (
|
|||
func TestSyncOptionsFromBundle(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tempDir,
|
||||
BundleRoot: vfs.MustNew(tempDir),
|
||||
SyncRootPath: tempDir,
|
||||
SyncRoot: vfs.MustNew(tempDir),
|
||||
BundleRootPath: tempDir,
|
||||
BundleRoot: vfs.MustNew(tempDir),
|
||||
SyncRootPath: tempDir,
|
||||
SyncRoot: vfs.MustNew(tempDir),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -75,8 +75,8 @@ func newCreate() *cobra.Command {
|
|||
var createSkipWait bool
|
||||
var createTimeout time.Duration
|
||||
|
||||
cmd.Flags().BoolVar(&createSkipWait, "no-wait", createSkipWait, `do not wait to reach IDLE state`)
|
||||
cmd.Flags().DurationVar(&createTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach IDLE state`)
|
||||
cmd.Flags().BoolVar(&createSkipWait, "no-wait", createSkipWait, `do not wait to reach ACTIVE state`)
|
||||
cmd.Flags().DurationVar(&createTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach ACTIVE state`)
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
|
@ -130,13 +130,13 @@ func newCreate() *cobra.Command {
|
|||
}
|
||||
spinner := cmdio.Spinner(ctx)
|
||||
info, err := wait.OnProgress(func(i *apps.App) {
|
||||
if i.Status == nil {
|
||||
if i.ComputeStatus == nil {
|
||||
return
|
||||
}
|
||||
status := i.Status.State
|
||||
status := i.ComputeStatus.State
|
||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||
if i.Status != nil {
|
||||
statusMessage = i.Status.Message
|
||||
if i.ComputeStatus != nil {
|
||||
statusMessage = i.ComputeStatus.Message
|
||||
}
|
||||
spinner <- statusMessage
|
||||
}).GetWithTimeout(createTimeout)
|
||||
|
@ -198,11 +198,11 @@ func newDelete() *cobra.Command {
|
|||
|
||||
deleteReq.Name = args[0]
|
||||
|
||||
err = w.Apps.Delete(ctx, deleteReq)
|
||||
response, err := w.Apps.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
|
@ -240,35 +240,23 @@ func newDeploy() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().StringVar(&deployReq.DeploymentId, "deployment-id", deployReq.DeploymentId, `The unique id of the deployment.`)
|
||||
cmd.Flags().Var(&deployReq.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`)
|
||||
cmd.Flags().StringVar(&deployReq.SourceCodePath, "source-code-path", deployReq.SourceCodePath, `The workspace file system path of the source code used to create the app deployment.`)
|
||||
|
||||
cmd.Use = "deploy APP_NAME SOURCE_CODE_PATH"
|
||||
cmd.Use = "deploy APP_NAME"
|
||||
cmd.Short = `Create an app deployment.`
|
||||
cmd.Long = `Create an app deployment.
|
||||
|
||||
Creates an app deployment for the app with the supplied name.
|
||||
|
||||
Arguments:
|
||||
APP_NAME: The name of the app.
|
||||
SOURCE_CODE_PATH: The workspace file system path of the source code used to create the app
|
||||
deployment. This is different from
|
||||
deployment_artifacts.source_code_path, which is the path used by the
|
||||
deployed app. The former refers to the original source code location of
|
||||
the app in the workspace during deployment creation, whereas the latter
|
||||
provides a system generated stable snapshotted source code path used by
|
||||
the deployment.`
|
||||
APP_NAME: The name of the app.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("json") {
|
||||
err := root.ExactArgs(1)(cmd, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'source_code_path' in your JSON input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
check := root.ExactArgs(2)
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
|
@ -284,9 +272,6 @@ func newDeploy() *cobra.Command {
|
|||
}
|
||||
}
|
||||
deployReq.AppName = args[0]
|
||||
if !cmd.Flags().Changed("json") {
|
||||
deployReq.SourceCodePath = args[1]
|
||||
}
|
||||
|
||||
wait, err := w.Apps.Deploy(ctx, deployReq)
|
||||
if err != nil {
|
||||
|
@ -759,8 +744,8 @@ func newStart() *cobra.Command {
|
|||
var startSkipWait bool
|
||||
var startTimeout time.Duration
|
||||
|
||||
cmd.Flags().BoolVar(&startSkipWait, "no-wait", startSkipWait, `do not wait to reach SUCCEEDED state`)
|
||||
cmd.Flags().DurationVar(&startTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach SUCCEEDED state`)
|
||||
cmd.Flags().BoolVar(&startSkipWait, "no-wait", startSkipWait, `do not wait to reach ACTIVE state`)
|
||||
cmd.Flags().DurationVar(&startTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach ACTIVE state`)
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "start NAME"
|
||||
|
@ -794,14 +779,14 @@ func newStart() *cobra.Command {
|
|||
return cmdio.Render(ctx, wait.Response)
|
||||
}
|
||||
spinner := cmdio.Spinner(ctx)
|
||||
info, err := wait.OnProgress(func(i *apps.AppDeployment) {
|
||||
if i.Status == nil {
|
||||
info, err := wait.OnProgress(func(i *apps.App) {
|
||||
if i.ComputeStatus == nil {
|
||||
return
|
||||
}
|
||||
status := i.Status.State
|
||||
status := i.ComputeStatus.State
|
||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||
if i.Status != nil {
|
||||
statusMessage = i.Status.Message
|
||||
if i.ComputeStatus != nil {
|
||||
statusMessage = i.ComputeStatus.Message
|
||||
}
|
||||
spinner <- statusMessage
|
||||
}).GetWithTimeout(startTimeout)
|
||||
|
@ -838,6 +823,11 @@ func newStop() *cobra.Command {
|
|||
|
||||
var stopReq apps.StopAppRequest
|
||||
|
||||
var stopSkipWait bool
|
||||
var stopTimeout time.Duration
|
||||
|
||||
cmd.Flags().BoolVar(&stopSkipWait, "no-wait", stopSkipWait, `do not wait to reach STOPPED state`)
|
||||
cmd.Flags().DurationVar(&stopTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach STOPPED state`)
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Use = "stop NAME"
|
||||
|
@ -863,11 +853,30 @@ func newStop() *cobra.Command {
|
|||
|
||||
stopReq.Name = args[0]
|
||||
|
||||
err = w.Apps.Stop(ctx, stopReq)
|
||||
wait, err := w.Apps.Stop(ctx, stopReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
if stopSkipWait {
|
||||
return cmdio.Render(ctx, wait.Response)
|
||||
}
|
||||
spinner := cmdio.Spinner(ctx)
|
||||
info, err := wait.OnProgress(func(i *apps.App) {
|
||||
if i.ComputeStatus == nil {
|
||||
return
|
||||
}
|
||||
status := i.ComputeStatus.State
|
||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||
if i.ComputeStatus != nil {
|
||||
statusMessage = i.ComputeStatus.Message
|
||||
}
|
||||
spinner <- statusMessage
|
||||
}).GetWithTimeout(stopTimeout)
|
||||
close(spinner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, info)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
|
|
|
@ -217,7 +217,7 @@ func newCreate() *cobra.Command {
|
|||
cmd.Flags().StringVar(&createReq.NodeTypeId, "node-type-id", createReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
||||
cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
||||
cmd.Flags().StringVar(&createReq.PolicyId, "policy-id", createReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`)
|
||||
cmd.Flags().Var(&createReq.RuntimeEngine, "runtime-engine", `Decides which runtime engine to be use, e.g. Supported values: [NULL, PHOTON, STANDARD]`)
|
||||
cmd.Flags().Var(&createReq.RuntimeEngine, "runtime-engine", `Determines the cluster's runtime engine, either standard or Photon. Supported values: [NULL, PHOTON, STANDARD]`)
|
||||
cmd.Flags().StringVar(&createReq.SingleUserName, "single-user-name", createReq.SingleUserName, `Single user name if data_security_mode is SINGLE_USER.`)
|
||||
// TODO: map via StringToStringVar: spark_conf
|
||||
// TODO: map via StringToStringVar: spark_env_vars
|
||||
|
@ -236,6 +236,12 @@ func newCreate() *cobra.Command {
|
|||
If Databricks acquires at least 85% of the requested on-demand nodes, cluster
|
||||
creation will succeed. Otherwise the cluster will terminate with an
|
||||
informative error message.
|
||||
|
||||
Rather than authoring the cluster's JSON definition from scratch, Databricks
|
||||
recommends filling out the [create compute UI] and then copying the generated
|
||||
JSON definition from the UI.
|
||||
|
||||
[create compute UI]: https://docs.databricks.com/compute/configure.html
|
||||
|
||||
Arguments:
|
||||
SPARK_VERSION: The Spark version of the cluster, e.g. 3.3.x-scala2.11. A list of
|
||||
|
@ -463,7 +469,7 @@ func newEdit() *cobra.Command {
|
|||
cmd.Flags().StringVar(&editReq.NodeTypeId, "node-type-id", editReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`)
|
||||
cmd.Flags().IntVar(&editReq.NumWorkers, "num-workers", editReq.NumWorkers, `Number of worker nodes that this cluster should have.`)
|
||||
cmd.Flags().StringVar(&editReq.PolicyId, "policy-id", editReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`)
|
||||
cmd.Flags().Var(&editReq.RuntimeEngine, "runtime-engine", `Decides which runtime engine to be use, e.g. Supported values: [NULL, PHOTON, STANDARD]`)
|
||||
cmd.Flags().Var(&editReq.RuntimeEngine, "runtime-engine", `Determines the cluster's runtime engine, either standard or Photon. Supported values: [NULL, PHOTON, STANDARD]`)
|
||||
cmd.Flags().StringVar(&editReq.SingleUserName, "single-user-name", editReq.SingleUserName, `Single user name if data_security_mode is SINGLE_USER.`)
|
||||
// TODO: map via StringToStringVar: spark_conf
|
||||
// TODO: map via StringToStringVar: spark_env_vars
|
||||
|
|
|
@ -76,6 +76,7 @@ import (
|
|||
system_schemas "github.com/databricks/cli/cmd/workspace/system-schemas"
|
||||
table_constraints "github.com/databricks/cli/cmd/workspace/table-constraints"
|
||||
tables "github.com/databricks/cli/cmd/workspace/tables"
|
||||
temporary_table_credentials "github.com/databricks/cli/cmd/workspace/temporary-table-credentials"
|
||||
token_management "github.com/databricks/cli/cmd/workspace/token-management"
|
||||
tokens "github.com/databricks/cli/cmd/workspace/tokens"
|
||||
users "github.com/databricks/cli/cmd/workspace/users"
|
||||
|
@ -165,6 +166,7 @@ func All() []*cobra.Command {
|
|||
out = append(out, system_schemas.New())
|
||||
out = append(out, table_constraints.New())
|
||||
out = append(out, tables.New())
|
||||
out = append(out, temporary_table_credentials.New())
|
||||
out = append(out, token_management.New())
|
||||
out = append(out, tokens.New())
|
||||
out = append(out, users.New())
|
||||
|
|
|
@ -0,0 +1,217 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package disable_legacy_access
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/settings"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "disable-legacy-access",
|
||||
Short: `'Disabling legacy access' has the following impacts: 1.`,
|
||||
Long: `'Disabling legacy access' has the following impacts:
|
||||
|
||||
1. Disables direct access to the Hive Metastore. However, you can still access
|
||||
Hive Metastore through HMS Federation. 2. Disables Fallback Mode (docs link)
|
||||
on any External Location access from the workspace. 3. Alters DBFS path access
|
||||
to use External Location permissions in place of legacy credentials. 4.
|
||||
Enforces Unity Catalog access on all path based access.`,
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newDelete())
|
||||
cmd.AddCommand(newGet())
|
||||
cmd.AddCommand(newUpdate())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start delete command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var deleteOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.DeleteDisableLegacyAccessRequest,
|
||||
)
|
||||
|
||||
func newDelete() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var deleteReq settings.DeleteDisableLegacyAccessRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "delete"
|
||||
cmd.Short = `Delete Legacy Access Disablement Status.`
|
||||
cmd.Long = `Delete Legacy Access Disablement Status.
|
||||
|
||||
Deletes legacy access disablement status.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response, err := w.Settings.DisableLegacyAccess().Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range deleteOverrides {
|
||||
fn(cmd, &deleteReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var getOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.GetDisableLegacyAccessRequest,
|
||||
)
|
||||
|
||||
func newGet() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var getReq settings.GetDisableLegacyAccessRequest
|
||||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`)
|
||||
|
||||
cmd.Use = "get"
|
||||
cmd.Short = `Retrieve Legacy Access Disablement Status.`
|
||||
cmd.Long = `Retrieve Legacy Access Disablement Status.
|
||||
|
||||
Retrieves legacy access disablement Status.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
response, err := w.Settings.DisableLegacyAccess().Get(ctx, getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range getOverrides {
|
||||
fn(cmd, &getReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var updateOverrides []func(
|
||||
*cobra.Command,
|
||||
*settings.UpdateDisableLegacyAccessRequest,
|
||||
)
|
||||
|
||||
func newUpdate() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var updateReq settings.UpdateDisableLegacyAccessRequest
|
||||
var updateJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Use = "update"
|
||||
cmd.Short = `Update Legacy Access Disablement Status.`
|
||||
cmd.Long = `Update Legacy Access Disablement Status.
|
||||
|
||||
Updates legacy access disablement status.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = updateJson.Unmarshal(&updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := w.Settings.DisableLegacyAccess().Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range updateOverrides {
|
||||
fn(cmd, &updateReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service DisableLegacyAccess
|
|
@ -935,6 +935,7 @@ func newUpdate() *cobra.Command {
|
|||
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().BoolVar(&updateReq.AllowDuplicateNames, "allow-duplicate-names", updateReq.AllowDuplicateNames, `If false, deployment will fail if name has changed and conflicts the name of another pipeline.`)
|
||||
cmd.Flags().StringVar(&updateReq.BudgetPolicyId, "budget-policy-id", updateReq.BudgetPolicyId, `Budget policy of this pipeline.`)
|
||||
cmd.Flags().StringVar(&updateReq.Catalog, "catalog", updateReq.Catalog, `A catalog in Unity Catalog to publish data from this pipeline to.`)
|
||||
cmd.Flags().StringVar(&updateReq.Channel, "channel", updateReq.Channel, `DLT Release Channel that specifies which version to use.`)
|
||||
// TODO: array: clusters
|
||||
|
|
|
@ -53,6 +53,7 @@ func New() *cobra.Command {
|
|||
cmd.AddCommand(newLogs())
|
||||
cmd.AddCommand(newPatch())
|
||||
cmd.AddCommand(newPut())
|
||||
cmd.AddCommand(newPutAiGateway())
|
||||
cmd.AddCommand(newQuery())
|
||||
cmd.AddCommand(newSetPermissions())
|
||||
cmd.AddCommand(newUpdateConfig())
|
||||
|
@ -151,6 +152,7 @@ func newCreate() *cobra.Command {
|
|||
// TODO: short flags
|
||||
cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: complex arg: ai_gateway
|
||||
// TODO: array: rate_limits
|
||||
cmd.Flags().BoolVar(&createReq.RouteOptimized, "route-optimized", createReq.RouteOptimized, `Enable route optimization for the serving endpoint.`)
|
||||
// TODO: array: tags
|
||||
|
@ -754,8 +756,9 @@ func newPut() *cobra.Command {
|
|||
cmd.Short = `Update rate limits of a serving endpoint.`
|
||||
cmd.Long = `Update rate limits of a serving endpoint.
|
||||
|
||||
Used to update the rate limits of a serving endpoint. NOTE: only external and
|
||||
foundation model endpoints are supported as of now.
|
||||
Used to update the rate limits of a serving endpoint. NOTE: Only foundation
|
||||
model endpoints are currently supported. For external models, use AI Gateway
|
||||
to manage rate limits.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the serving endpoint whose rate limits are being updated. This
|
||||
|
@ -800,6 +803,79 @@ func newPut() *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
// start put-ai-gateway command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var putAiGatewayOverrides []func(
|
||||
*cobra.Command,
|
||||
*serving.PutAiGatewayRequest,
|
||||
)
|
||||
|
||||
func newPutAiGateway() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var putAiGatewayReq serving.PutAiGatewayRequest
|
||||
var putAiGatewayJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&putAiGatewayJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: complex arg: guardrails
|
||||
// TODO: complex arg: inference_table_config
|
||||
// TODO: array: rate_limits
|
||||
// TODO: complex arg: usage_tracking_config
|
||||
|
||||
cmd.Use = "put-ai-gateway NAME"
|
||||
cmd.Short = `Update AI Gateway of a serving endpoint.`
|
||||
cmd.Long = `Update AI Gateway of a serving endpoint.
|
||||
|
||||
Used to update the AI Gateway of a serving endpoint. NOTE: Only external model
|
||||
endpoints are currently supported.
|
||||
|
||||
Arguments:
|
||||
NAME: The name of the serving endpoint whose AI Gateway is being updated. This
|
||||
field is required.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(1)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = putAiGatewayJson.Unmarshal(&putAiGatewayReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
putAiGatewayReq.Name = args[0]
|
||||
|
||||
response, err := w.ServingEndpoints.PutAiGateway(ctx, putAiGatewayReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range putAiGatewayOverrides {
|
||||
fn(cmd, &putAiGatewayReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start query command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update"
|
||||
compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile"
|
||||
default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace"
|
||||
disable_legacy_access "github.com/databricks/cli/cmd/workspace/disable-legacy-access"
|
||||
enhanced_security_monitoring "github.com/databricks/cli/cmd/workspace/enhanced-security-monitoring"
|
||||
restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins"
|
||||
)
|
||||
|
@ -31,6 +32,7 @@ func New() *cobra.Command {
|
|||
cmd.AddCommand(automatic_cluster_update.New())
|
||||
cmd.AddCommand(compliance_security_profile.New())
|
||||
cmd.AddCommand(default_namespace.New())
|
||||
cmd.AddCommand(disable_legacy_access.New())
|
||||
cmd.AddCommand(enhanced_security_monitoring.New())
|
||||
cmd.AddCommand(restrict_workspace_admins.New())
|
||||
|
||||
|
|
|
@ -220,6 +220,7 @@ func newGet() *cobra.Command {
|
|||
|
||||
cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include tables in the response for which the principal can only access selective metadata for.`)
|
||||
cmd.Flags().BoolVar(&getReq.IncludeDeltaMetadata, "include-delta-metadata", getReq.IncludeDeltaMetadata, `Whether delta metadata should be included in the response.`)
|
||||
cmd.Flags().BoolVar(&getReq.IncludeManifestCapabilities, "include-manifest-capabilities", getReq.IncludeManifestCapabilities, `Whether to include a manifest containing capabilities the table has.`)
|
||||
|
||||
cmd.Use = "get FULL_NAME"
|
||||
cmd.Short = `Get a table.`
|
||||
|
@ -299,6 +300,7 @@ func newList() *cobra.Command {
|
|||
|
||||
cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include tables in the response for which the principal can only access selective metadata for.`)
|
||||
cmd.Flags().BoolVar(&listReq.IncludeDeltaMetadata, "include-delta-metadata", listReq.IncludeDeltaMetadata, `Whether delta metadata should be included in the response.`)
|
||||
cmd.Flags().BoolVar(&listReq.IncludeManifestCapabilities, "include-manifest-capabilities", listReq.IncludeManifestCapabilities, `Whether to include a manifest containing capabilities the table has.`)
|
||||
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of tables to return.`)
|
||||
cmd.Flags().BoolVar(&listReq.OmitColumns, "omit-columns", listReq.OmitColumns, `Whether to omit the columns of the table from the response or not.`)
|
||||
cmd.Flags().BoolVar(&listReq.OmitProperties, "omit-properties", listReq.OmitProperties, `Whether to omit the properties of the table from the response or not.`)
|
||||
|
@ -366,6 +368,7 @@ func newListSummaries() *cobra.Command {
|
|||
|
||||
// TODO: short flags
|
||||
|
||||
cmd.Flags().BoolVar(&listSummariesReq.IncludeManifestCapabilities, "include-manifest-capabilities", listSummariesReq.IncludeManifestCapabilities, `Whether to include a manifest containing capabilities the table has.`)
|
||||
cmd.Flags().IntVar(&listSummariesReq.MaxResults, "max-results", listSummariesReq.MaxResults, `Maximum number of summaries for tables to return.`)
|
||||
cmd.Flags().StringVar(&listSummariesReq.PageToken, "page-token", listSummariesReq.PageToken, `Opaque pagination token to go to next page based on previous query.`)
|
||||
cmd.Flags().StringVar(&listSummariesReq.SchemaNamePattern, "schema-name-pattern", listSummariesReq.SchemaNamePattern, `A sql LIKE pattern (% and _) for schema names.`)
|
||||
|
|
122
cmd/workspace/temporary-table-credentials/temporary-table-credentials.go
generated
Executable file
122
cmd/workspace/temporary-table-credentials/temporary-table-credentials.go
generated
Executable file
|
@ -0,0 +1,122 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package temporary_table_credentials
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "temporary-table-credentials",
|
||||
Short: `Temporary Table Credentials refer to short-lived, downscoped credentials used to access cloud storage locationswhere table data is stored in Databricks.`,
|
||||
Long: `Temporary Table Credentials refer to short-lived, downscoped credentials used
|
||||
to access cloud storage locationswhere table data is stored in Databricks.
|
||||
These credentials are employed to provide secure and time-limitedaccess to
|
||||
data in cloud environments such as AWS, Azure, and Google Cloud. Each cloud
|
||||
provider has its own typeof credentials: AWS uses temporary session tokens via
|
||||
AWS Security Token Service (STS), Azure utilizesShared Access Signatures (SAS)
|
||||
for its data storage services, and Google Cloud supports temporary
|
||||
credentialsthrough OAuth 2.0.Temporary table credentials ensure that data
|
||||
access is limited in scope and duration, reducing the risk ofunauthorized
|
||||
access or misuse. To use the temporary table credentials API, a metastore
|
||||
admin needs to enable the external_access_enabled flag (off by default) at the
|
||||
metastore level, and user needs to be granted the EXTERNAL USE SCHEMA
|
||||
permission at the schema level by catalog admin. Note that EXTERNAL USE SCHEMA
|
||||
is a schema level permission that can only be granted by catalog admin
|
||||
explicitly and is not included in schema ownership or ALL PRIVILEGES on the
|
||||
schema for security reason.`,
|
||||
GroupID: "catalog",
|
||||
Annotations: map[string]string{
|
||||
"package": "catalog",
|
||||
},
|
||||
}
|
||||
|
||||
// Add methods
|
||||
cmd.AddCommand(newGenerateTemporaryTableCredentials())
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// start generate-temporary-table-credentials command
|
||||
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var generateTemporaryTableCredentialsOverrides []func(
|
||||
*cobra.Command,
|
||||
*catalog.GenerateTemporaryTableCredentialRequest,
|
||||
)
|
||||
|
||||
func newGenerateTemporaryTableCredentials() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
var generateTemporaryTableCredentialsReq catalog.GenerateTemporaryTableCredentialRequest
|
||||
var generateTemporaryTableCredentialsJson flags.JsonFlag
|
||||
|
||||
// TODO: short flags
|
||||
cmd.Flags().Var(&generateTemporaryTableCredentialsJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
cmd.Flags().Var(&generateTemporaryTableCredentialsReq.Operation, "operation", `The operation performed against the table data, either READ or READ_WRITE. Supported values: [READ, READ_WRITE]`)
|
||||
cmd.Flags().StringVar(&generateTemporaryTableCredentialsReq.TableId, "table-id", generateTemporaryTableCredentialsReq.TableId, `UUID of the table to read or write.`)
|
||||
|
||||
cmd.Use = "generate-temporary-table-credentials"
|
||||
cmd.Short = `Generate a temporary table credential.`
|
||||
cmd.Long = `Generate a temporary table credential.
|
||||
|
||||
Get a short-lived credential for directly accessing the table data on cloud
|
||||
storage. The metastore must have external_access_enabled flag set to true
|
||||
(default false). The caller must have EXTERNAL_USE_SCHEMA privilege on the
|
||||
parent schema and this privilege can only be granted by catalog owners.`
|
||||
|
||||
cmd.Annotations = make(map[string]string)
|
||||
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := root.ExactArgs(0)
|
||||
return check(cmd, args)
|
||||
}
|
||||
|
||||
cmd.PreRunE = root.MustWorkspaceClient
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = generateTemporaryTableCredentialsJson.Unmarshal(&generateTemporaryTableCredentialsReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
response, err := w.TemporaryTableCredentials.GenerateTemporaryTableCredentials(ctx, generateTemporaryTableCredentialsReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range generateTemporaryTableCredentialsOverrides {
|
||||
fn(cmd, &generateTemporaryTableCredentialsReq)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// end service TemporaryTableCredentials
|
11
go.mod
11
go.mod
|
@ -1,16 +1,18 @@
|
|||
module github.com/databricks/cli
|
||||
|
||||
go 1.22
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.22.7
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // MIT
|
||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.46.0 // Apache 2.0
|
||||
github.com/databricks/databricks-sdk-go v0.47.0 // Apache 2.0
|
||||
github.com/fatih/color v1.17.0 // MIT
|
||||
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
|
||||
github.com/google/uuid v1.6.0 // BSD-3-Clause
|
||||
github.com/hashicorp/go-version v1.7.0 // MPL 2.0
|
||||
github.com/hashicorp/hc-install v0.7.0 // MPL 2.0
|
||||
github.com/hashicorp/hc-install v0.9.0 // MPL 2.0
|
||||
github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0
|
||||
github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0
|
||||
github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause
|
||||
|
@ -22,7 +24,7 @@ require (
|
|||
github.com/spf13/pflag v1.0.5 // BSD-3-Clause
|
||||
github.com/stretchr/testify v1.9.0 // MIT
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
golang.org/x/mod v0.20.0
|
||||
golang.org/x/mod v0.21.0
|
||||
golang.org/x/oauth2 v0.23.0
|
||||
golang.org/x/sync v0.8.0
|
||||
golang.org/x/term v0.24.0
|
||||
|
@ -49,6 +51,7 @@ require (
|
|||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
|
|
|
@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/databricks/databricks-sdk-go v0.46.0 h1:D0TxmtSVAOsdnfzH4OGtAmcq+8TyA7Z6fA6JEYhupeY=
|
||||
github.com/databricks/databricks-sdk-go v0.46.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||
github.com/databricks/databricks-sdk-go v0.47.0 h1:eE7dN9axviL8+s10jnQAayOYDaR+Mfu7E9COGjO4lrQ=
|
||||
github.com/databricks/databricks-sdk-go v0.47.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -99,10 +99,14 @@ github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw
|
|||
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk=
|
||||
github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA=
|
||||
github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6eLhghE=
|
||||
github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg=
|
||||
github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ=
|
||||
github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg=
|
||||
github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec=
|
||||
|
@ -180,8 +184,8 @@ golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY
|
|||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
|
@ -36,8 +36,8 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) {
|
|||
wsDir := internal.TemporaryWorkspaceDir(t, w)
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
SyncRootPath: dir,
|
||||
BundleRootPath: dir,
|
||||
SyncRootPath: dir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -101,8 +101,8 @@ func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T)
|
|||
wsDir := internal.TemporaryWorkspaceDir(t, w)
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
SyncRootPath: dir,
|
||||
BundleRootPath: dir,
|
||||
SyncRootPath: dir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -171,8 +171,8 @@ func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) {
|
|||
touchEmptyFile(t, whlPath)
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
SyncRootPath: dir,
|
||||
BundleRootPath: dir,
|
||||
SyncRootPath: dir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
|
|
@ -121,7 +121,7 @@ You can find that job by opening your workpace and clicking on **Workflows**.
|
|||
|
||||
You can also deploy to your production target directly from the command-line.
|
||||
The warehouse, catalog, and schema for that target are configured in databricks.yml.
|
||||
When deploying to this target, note that the default job at resources/{{.project_name}}_job.yml
|
||||
When deploying to this target, note that the default job at resources/{{.project_name}}.job.yml
|
||||
has a schedule set that runs every day. The schedule is paused when deploying in development mode
|
||||
(see https://docs.databricks.com/dev-tools/bundles/deployment-modes.html).
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ This file only template directives; it is skipped for the actual output.
|
|||
|
||||
{{if $notDLT}}
|
||||
{{skip "{{.project_name}}/src/dlt_pipeline.ipynb"}}
|
||||
{{skip "{{.project_name}}/resources/{{.project_name}}_pipeline.yml"}}
|
||||
{{skip "{{.project_name}}/resources/{{.project_name}}.pipeline.yml"}}
|
||||
{{end}}
|
||||
|
||||
{{if $notNotebook}}
|
||||
|
@ -26,7 +26,7 @@ This file only template directives; it is skipped for the actual output.
|
|||
{{end}}
|
||||
|
||||
{{if (and $notDLT $notNotebook $notPython)}}
|
||||
{{skip "{{.project_name}}/resources/{{.project_name}}_job.yml"}}
|
||||
{{skip "{{.project_name}}/resources/{{.project_name}}.job.yml"}}
|
||||
{{else}}
|
||||
{{skip "{{.project_name}}/resources/.gitkeep"}}
|
||||
{{end}}
|
||||
|
|
|
@ -29,7 +29,7 @@ The '{{.project_name}}' project was generated by using the default-python templa
|
|||
```
|
||||
|
||||
Note that the default job from the template has a schedule that runs every day
|
||||
(defined in resources/{{.project_name}}_job.yml). The schedule
|
||||
(defined in resources/{{.project_name}}.job.yml). The schedule
|
||||
is paused when deploying in development mode (see
|
||||
https://docs.databricks.com/dev-tools/bundles/deployment-modes.html).
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ resources:
|
|||
- task_key: notebook_task
|
||||
{{- end}}
|
||||
pipeline_task:
|
||||
{{- /* TODO: we should find a way that doesn't use magics for the below, like ./{{project_name}}_pipeline.yml */}}
|
||||
{{- /* TODO: we should find a way that doesn't use magics for the below, like ./{{project_name}}.pipeline.yml */}}
|
||||
pipeline_id: ${resources.pipelines.{{.project_name}}_pipeline.id}
|
||||
{{end -}}
|
||||
{{- if (eq .include_python "yes") }}
|
|
@ -14,7 +14,7 @@
|
|||
"source": [
|
||||
"# DLT pipeline\n",
|
||||
"\n",
|
||||
"This Delta Live Tables (DLT) definition is executed using a pipeline defined in resources/{{.project_name}}_pipeline.yml."
|
||||
"This Delta Live Tables (DLT) definition is executed using a pipeline defined in resources/{{.project_name}}.pipeline.yml."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
"source": [
|
||||
"# Default notebook\n",
|
||||
"\n",
|
||||
"This default notebook is executed using Databricks Workflows as defined in resources/{{.project_name}}_job.yml."
|
||||
"This default notebook is executed using Databricks Workflows as defined in resources/{{.project_name}}.job.yml."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml)
|
||||
-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql.job.yml)
|
||||
|
||||
USE CATALOG {{"{{"}}catalog{{"}}"}};
|
||||
USE IDENTIFIER({{"{{"}}schema{{"}}"}});
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml)
|
||||
-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql.job.yml)
|
||||
--
|
||||
-- The streaming table below ingests all JSON files in /databricks-datasets/retail-org/sales_orders/
|
||||
-- See also https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-streaming-table.html
|
||||
|
|
Loading…
Reference in New Issue