mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'databricks/main' into faster-dev-mode-kill-kill
This commit is contained in:
commit
d37b54e981
|
@ -1 +1 @@
|
||||||
3821dc51952c5cf1c276dd84967da011b191e64a
|
93763b0d7ae908520c229c786fff28b8fd623261
|
|
@ -89,3 +89,29 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
# Exit with status code 1 if there are differences (i.e. unformatted files)
|
# Exit with status code 1 if there are differences (i.e. unformatted files)
|
||||||
git diff --exit-code
|
git diff --exit-code
|
||||||
|
|
||||||
|
validate-bundle-schema:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.21.x
|
||||||
|
|
||||||
|
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||||
|
- name: Install ajv-cli
|
||||||
|
run: npm install -g ajv-cli@5.0.0
|
||||||
|
|
||||||
|
# Assert that the generated bundle schema is a valid JSON schema by using
|
||||||
|
# ajv-cli to validate it against a sample configuration file.
|
||||||
|
# By default the ajv-cli runs in strict mode which will fail if the schema
|
||||||
|
# itself is not valid. Strict mode is more strict than the JSON schema
|
||||||
|
# specification. See for details: https://ajv.js.org/options.html#strict-mode-options
|
||||||
|
- name: Validate bundle schema
|
||||||
|
run: |
|
||||||
|
go run main.go bundle schema > schema.json
|
||||||
|
ajv -s schema.json -d ./bundle/tests/basic/databricks.yml
|
||||||
|
|
45
CHANGELOG.md
45
CHANGELOG.md
|
@ -1,5 +1,50 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## 0.216.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Propagate correct `User-Agent` for CLI during OAuth flow ([#1264](https://github.com/databricks/cli/pull/1264)).
|
||||||
|
* Add usage string when command fails with incorrect arguments ([#1276](https://github.com/databricks/cli/pull/1276)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Include `dyn.Path` as argument to the visit callback function ([#1260](https://github.com/databricks/cli/pull/1260)).
|
||||||
|
* Inline logic to set a value in `dyn.SetByPath` ([#1261](https://github.com/databricks/cli/pull/1261)).
|
||||||
|
* Add assertions for the `dyn.Path` argument to the visit callback ([#1265](https://github.com/databricks/cli/pull/1265)).
|
||||||
|
* Add `dyn.MapByPattern` to map a function to values with matching paths ([#1266](https://github.com/databricks/cli/pull/1266)).
|
||||||
|
* Filter current user from resource permissions ([#1262](https://github.com/databricks/cli/pull/1262)).
|
||||||
|
* Retain location annotation when expanding globs for pipeline libraries ([#1274](https://github.com/databricks/cli/pull/1274)).
|
||||||
|
* Added deployment state for bundles ([#1267](https://github.com/databricks/cli/pull/1267)).
|
||||||
|
* Do CheckRunningResource only after terraform.Write ([#1292](https://github.com/databricks/cli/pull/1292)).
|
||||||
|
* Rewrite relative paths using `dyn.Location` of the underlying value ([#1273](https://github.com/databricks/cli/pull/1273)).
|
||||||
|
* Push deployment state right after files upload ([#1293](https://github.com/databricks/cli/pull/1293)).
|
||||||
|
* Make `Append` function to `dyn.Path` return independent slice ([#1295](https://github.com/databricks/cli/pull/1295)).
|
||||||
|
* Move bundle tests into bundle/tests ([#1299](https://github.com/databricks/cli/pull/1299)).
|
||||||
|
* Upgrade Terraform provider to 1.38.0 ([#1308](https://github.com/databricks/cli/pull/1308)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Add integration test for mlops-stacks initialization ([#1155](https://github.com/databricks/cli/pull/1155)).
|
||||||
|
* Update actions/setup-python to v5 ([#1290](https://github.com/databricks/cli/pull/1290)).
|
||||||
|
* Update codecov/codecov-action to v4 ([#1291](https://github.com/databricks/cli/pull/1291)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks catalogs list` command.
|
||||||
|
* Changed `databricks online-tables create` command.
|
||||||
|
* Changed `databricks lakeview publish` command.
|
||||||
|
* Added `databricks lakeview create` command.
|
||||||
|
* Added `databricks lakeview get` command.
|
||||||
|
* Added `databricks lakeview get-published` command.
|
||||||
|
* Added `databricks lakeview trash` command.
|
||||||
|
* Added `databricks lakeview update` command.
|
||||||
|
* Moved settings related commands to `databricks settings` and `databricks account settings`.
|
||||||
|
|
||||||
|
OpenAPI commit 93763b0d7ae908520c229c786fff28b8fd623261 (2024-03-20)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/oauth2 from 0.17.0 to 0.18.0 ([#1270](https://github.com/databricks/cli/pull/1270)).
|
||||||
|
* Bump golang.org/x/mod from 0.15.0 to 0.16.0 ([#1271](https://github.com/databricks/cli/pull/1271)).
|
||||||
|
* Update Go SDK to v0.35.0 ([#1300](https://github.com/databricks/cli/pull/1300)).
|
||||||
|
* Update Go SDK to v0.36.0 ([#1304](https://github.com/databricks/cli/pull/1304)).
|
||||||
|
|
||||||
## 0.215.0
|
## 0.215.0
|
||||||
|
|
||||||
CLI:
|
CLI:
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,7 +22,7 @@ func (m *all) Name() string {
|
||||||
return fmt.Sprintf("artifacts.%sAll", m.name)
|
return fmt.Sprintf("artifacts.%sAll", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *all) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
var out []bundle.Mutator
|
var out []bundle.Mutator
|
||||||
|
|
||||||
// Iterate with stable ordering.
|
// Iterate with stable ordering.
|
||||||
|
@ -31,7 +32,7 @@ func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
for _, name := range keys {
|
for _, name := range keys {
|
||||||
m, err := m.fn(name)
|
m, err := m.fn(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
if m != nil {
|
if m != nil {
|
||||||
out = append(out, m)
|
out = append(out, m)
|
||||||
|
|
|
@ -14,6 +14,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
@ -57,17 +58,17 @@ func (m *basicBuild) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
||||||
|
|
||||||
out, err := artifact.Build(ctx)
|
out, err := artifact.Build(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("build for %s failed, error: %w, output: %s", m.name, err, out)
|
return diag.Errorf("build for %s failed, error: %v, output: %s", m.name, err, out)
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "Build succeeded")
|
log.Infof(ctx, "Build succeeded")
|
||||||
|
|
||||||
|
@ -87,29 +88,29 @@ func (m *basicUpload) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(artifact.Files) == 0 {
|
if len(artifact.Files) == 0 {
|
||||||
return fmt.Errorf("artifact source is not configured: %s", m.name)
|
return diag.Errorf("artifact source is not configured: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadPath, err := getUploadBasePath(b)
|
uploadPath, err := getUploadBasePath(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath)
|
client, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = uploadArtifact(ctx, b, artifact, uploadPath, client)
|
err = uploadArtifact(ctx, b, artifact, uploadPath, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("upload for %s failed, error: %w", m.name, err)
|
return diag.Errorf("upload for %s failed, error: %v", m.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,7 +20,7 @@ func (m *autodetect) Name() string {
|
||||||
return "artifacts.DetectPackages"
|
return "artifacts.DetectPackages"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// If artifacts section explicitly defined, do not try to auto detect packages
|
// If artifacts section explicitly defined, do not try to auto detect packages
|
||||||
if b.Config.Artifacts != nil {
|
if b.Config.Artifacts != nil {
|
||||||
log.Debugf(ctx, "artifacts block is defined, skipping auto-detecting")
|
log.Debugf(ctx, "artifacts block is defined, skipping auto-detecting")
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BuildAll() bundle.Mutator {
|
func BuildAll() bundle.Mutator {
|
||||||
|
@ -27,10 +28,10 @@ func (m *build) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip building if build command is not specified or infered
|
// Skip building if build command is not specified or infered
|
||||||
|
@ -38,14 +39,14 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
// If no build command was specified or infered and there is no
|
// If no build command was specified or infered and there is no
|
||||||
// artifact output files specified, artifact is misconfigured
|
// artifact output files specified, artifact is misconfigured
|
||||||
if len(artifact.Files) == 0 {
|
if len(artifact.Files) == 0 {
|
||||||
return fmt.Errorf("misconfigured artifact: please specify 'build' or 'files' property")
|
return diag.Errorf("misconfigured artifact: please specify 'build' or 'files' property")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If artifact path is not provided, use bundle root dir
|
// If artifact path is not provided, use bundle root dir
|
||||||
if artifact.Path == "" {
|
if artifact.Path == "" {
|
||||||
artifact.Path = b.Config.Path
|
artifact.Path = b.RootPath
|
||||||
}
|
}
|
||||||
|
|
||||||
if !filepath.IsAbs(artifact.Path) {
|
if !filepath.IsAbs(artifact.Path) {
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
var inferMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{
|
var inferMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{
|
||||||
|
@ -41,10 +42,10 @@ func (m *infer) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Infer(%s)", m.name)
|
return fmt.Sprintf("artifacts.Infer(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// only try to infer command if it's not already defined
|
// only try to infer command if it's not already defined
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -33,14 +34,14 @@ func (m *upload) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(artifact.Files) == 0 {
|
if len(artifact.Files) == 0 {
|
||||||
return fmt.Errorf("artifact source is not configured: %s", m.name)
|
return diag.Errorf("artifact source is not configured: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if source paths are absolute, if not, make them absolute
|
// Check if source paths are absolute, if not, make them absolute
|
||||||
|
@ -57,11 +58,11 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
for _, f := range artifact.Files {
|
for _, f := range artifact.Files {
|
||||||
matches, err := filepath.Glob(f.Source)
|
matches, err := filepath.Glob(f.Source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to find files for %s: %w", f.Source, err)
|
return diag.Errorf("unable to find files for %s: %v", f.Source, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(matches) == 0 {
|
if len(matches) == 0 {
|
||||||
return fmt.Errorf("no files found for %s", f.Source)
|
return diag.Errorf("no files found for %s", f.Source)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
|
@ -81,10 +82,10 @@ func (m *cleanUp) Name() string {
|
||||||
return "artifacts.CleanUp"
|
return "artifacts.CleanUp"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
uploadPath, err := getUploadBasePath(b)
|
uploadPath, err := getUploadBasePath(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{
|
b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{
|
||||||
|
@ -94,7 +95,7 @@ func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
|
||||||
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, uploadPath)
|
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, uploadPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to create directory for %s: %w", uploadPath, err)
|
return diag.Errorf("unable to create directory for %s: %v", uploadPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -9,13 +9,14 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/testfile"
|
"github.com/databricks/cli/libs/testfile"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type noop struct{}
|
type noop struct{}
|
||||||
|
|
||||||
func (n *noop) Apply(context.Context, *bundle.Bundle) error {
|
func (n *noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,8 +36,8 @@ func TestExpandGlobFilesSource(t *testing.T) {
|
||||||
t2.Close(t)
|
t2.Close(t)
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: rootPath,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: rootPath,
|
|
||||||
Artifacts: map[string]*config.Artifact{
|
Artifacts: map[string]*config.Artifact{
|
||||||
"test": {
|
"test": {
|
||||||
Type: "custom",
|
Type: "custom",
|
||||||
|
@ -57,8 +58,8 @@ func TestExpandGlobFilesSource(t *testing.T) {
|
||||||
return &noop{}
|
return &noop{}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bundle.Apply(context.Background(), b, u)
|
diags := bundle.Apply(context.Background(), b, u)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
require.Equal(t, 2, len(b.Config.Artifacts["test"].Files))
|
require.Equal(t, 2, len(b.Config.Artifacts["test"].Files))
|
||||||
require.Equal(t, filepath.Join(rootPath, "test", "myjar1.jar"), b.Config.Artifacts["test"].Files[0].Source)
|
require.Equal(t, filepath.Join(rootPath, "test", "myjar1.jar"), b.Config.Artifacts["test"].Files[0].Source)
|
||||||
|
@ -71,8 +72,8 @@ func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: rootPath,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: rootPath,
|
|
||||||
Artifacts: map[string]*config.Artifact{
|
Artifacts: map[string]*config.Artifact{
|
||||||
"test": {
|
"test": {
|
||||||
Type: "custom",
|
Type: "custom",
|
||||||
|
@ -93,6 +94,6 @@ func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) {
|
||||||
return &noop{}
|
return &noop{}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bundle.Apply(context.Background(), b, u)
|
diags := bundle.Apply(context.Background(), b, u)
|
||||||
require.ErrorContains(t, err, "no files found for")
|
require.ErrorContains(t, diags.Error(), "no files found for")
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,7 +26,7 @@ func (m *detectPkg) Name() string {
|
||||||
return "artifacts.whl.AutoDetect"
|
return "artifacts.whl.AutoDetect"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||||
if len(wheelTasks) == 0 {
|
if len(wheelTasks) == 0 {
|
||||||
log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect")
|
log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect")
|
||||||
|
@ -34,23 +35,23 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
log.Infof(ctx, "Detecting Python wheel project...")
|
log.Infof(ctx, "Detecting Python wheel project...")
|
||||||
|
|
||||||
// checking if there is setup.py in the bundle root
|
// checking if there is setup.py in the bundle root
|
||||||
setupPy := filepath.Join(b.Config.Path, "setup.py")
|
setupPy := filepath.Join(b.RootPath, "setup.py")
|
||||||
_, err := os.Stat(setupPy)
|
_, err := os.Stat(setupPy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof(ctx, "No Python wheel project found at bundle root folder")
|
log.Infof(ctx, "No Python wheel project found at bundle root folder")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.Config.Path))
|
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.RootPath))
|
||||||
module := extractModuleName(setupPy)
|
module := extractModuleName(setupPy)
|
||||||
|
|
||||||
if b.Config.Artifacts == nil {
|
if b.Config.Artifacts == nil {
|
||||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||||
}
|
}
|
||||||
|
|
||||||
pkgPath, err := filepath.Abs(b.Config.Path)
|
pkgPath, err := filepath.Abs(b.RootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
b.Config.Artifacts[module] = &config.Artifact{
|
b.Config.Artifacts[module] = &config.Artifact{
|
||||||
Path: pkgPath,
|
Path: pkgPath,
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/cli/libs/python"
|
"github.com/databricks/cli/libs/python"
|
||||||
)
|
)
|
||||||
|
@ -27,10 +28,10 @@ func (m *build) Name() string {
|
||||||
return fmt.Sprintf("artifacts.whl.Build(%s)", m.name)
|
return fmt.Sprintf("artifacts.whl.Build(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
||||||
|
@ -43,13 +44,13 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
|
||||||
out, err := artifact.Build(ctx)
|
out, err := artifact.Build(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("build failed %s, error: %w, output: %s", m.name, err, out)
|
return diag.Errorf("build failed %s, error: %v, output: %s", m.name, err, out)
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "Build succeeded")
|
log.Infof(ctx, "Build succeeded")
|
||||||
|
|
||||||
wheels := python.FindFilesWithSuffixInPath(distPath, ".whl")
|
wheels := python.FindFilesWithSuffixInPath(distPath, ".whl")
|
||||||
if len(wheels) == 0 {
|
if len(wheels) == 0 {
|
||||||
return fmt.Errorf("cannot find built wheel in %s for package %s", dir, m.name)
|
return diag.Errorf("cannot find built wheel in %s for package %s", dir, m.name)
|
||||||
}
|
}
|
||||||
for _, wheel := range wheels {
|
for _, wheel := range wheels {
|
||||||
artifact.Files = append(artifact.Files, config.ArtifactFile{
|
artifact.Files = append(artifact.Files, config.ArtifactFile{
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,7 +21,7 @@ func (m *fromLibraries) Name() string {
|
||||||
return "artifacts.whl.DefineArtifactsFromLibraries"
|
return "artifacts.whl.DefineArtifactsFromLibraries"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if len(b.Config.Artifacts) != 0 {
|
if len(b.Config.Artifacts) != 0 {
|
||||||
log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined")
|
log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined")
|
||||||
return nil
|
return nil
|
||||||
|
@ -29,7 +30,7 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
tasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
tasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||||
for _, task := range tasks {
|
for _, task := range tasks {
|
||||||
for _, lib := range task.Libraries {
|
for _, lib := range task.Libraries {
|
||||||
matches, err := filepath.Glob(filepath.Join(b.Config.Path, lib.Whl))
|
matches, err := filepath.Glob(filepath.Join(b.RootPath, lib.Whl))
|
||||||
// File referenced from libraries section does not exists, skipping
|
// File referenced from libraries section does not exists, skipping
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/python"
|
"github.com/databricks/cli/libs/python"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -12,11 +13,11 @@ type infer struct {
|
||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact := b.Config.Artifacts[m.name]
|
artifact := b.Config.Artifacts[m.name]
|
||||||
py, err := python.DetectExecutable(ctx)
|
py, err := python.DetectExecutable(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: using --build-number (build tag) flag does not help with re-installing
|
// Note: using --build-number (build tag) flag does not help with re-installing
|
||||||
|
|
|
@ -30,6 +30,10 @@ import (
|
||||||
const internalFolder = ".internal"
|
const internalFolder = ".internal"
|
||||||
|
|
||||||
type Bundle struct {
|
type Bundle struct {
|
||||||
|
// RootPath contains the directory path to the root of the bundle.
|
||||||
|
// It is set when we instantiate a new bundle instance.
|
||||||
|
RootPath string
|
||||||
|
|
||||||
Config config.Root
|
Config config.Root
|
||||||
|
|
||||||
// Metadata about the bundle deployment. This is the interface Databricks services
|
// Metadata about the bundle deployment. This is the interface Databricks services
|
||||||
|
@ -63,33 +67,14 @@ type Bundle struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Load(ctx context.Context, path string) (*Bundle, error) {
|
func Load(ctx context.Context, path string) (*Bundle, error) {
|
||||||
b := &Bundle{}
|
b := &Bundle{
|
||||||
stat, err := os.Stat(path)
|
RootPath: filepath.Clean(path),
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
configFile, err := config.FileNames.FindInPath(path)
|
configFile, err := config.FileNames.FindInPath(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_, hasRootEnv := env.Root(ctx)
|
|
||||||
_, hasIncludesEnv := env.Includes(ctx)
|
|
||||||
if hasRootEnv && hasIncludesEnv && stat.IsDir() {
|
|
||||||
log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path)
|
|
||||||
b.Config = config.Root{
|
|
||||||
Path: path,
|
|
||||||
Bundle: config.Bundle{
|
|
||||||
Name: filepath.Base(path),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Debugf(ctx, "Loading bundle configuration from: %s", configFile)
|
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.RootPath, configFile)
|
||||||
root, err := config.Load(configFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b.Config = *root
|
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +143,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error)
|
||||||
if !exists || cacheDirName == "" {
|
if !exists || cacheDirName == "" {
|
||||||
cacheDirName = filepath.Join(
|
cacheDirName = filepath.Join(
|
||||||
// Anchor at bundle root directory.
|
// Anchor at bundle root directory.
|
||||||
b.Config.Path,
|
b.RootPath,
|
||||||
// Static cache directory.
|
// Static cache directory.
|
||||||
".databricks",
|
".databricks",
|
||||||
"bundle",
|
"bundle",
|
||||||
|
@ -210,7 +195,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
internalDirRel, err := filepath.Rel(b.Config.Path, internalDir)
|
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -218,7 +203,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bundle) GitRepository() (*git.Repository, error) {
|
func (b *Bundle) GitRepository() (*git.Repository, error) {
|
||||||
rootPath, err := folders.FindDirWithLeaf(b.Config.Path, ".git")
|
rootPath, err := folders.FindDirWithLeaf(b.RootPath, ".git")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to locate repository root: %w", err)
|
return nil, fmt.Errorf("unable to locate repository root: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,8 +20,8 @@ func TestLoadNotExists(t *testing.T) {
|
||||||
|
|
||||||
func TestLoadExists(t *testing.T) {
|
func TestLoadExists(t *testing.T) {
|
||||||
b, err := Load(context.Background(), "./tests/basic")
|
b, err := Load(context.Background(), "./tests/basic")
|
||||||
require.Nil(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "basic", b.Config.Bundle.Name)
|
assert.NotNil(t, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleCacheDir(t *testing.T) {
|
func TestBundleCacheDir(t *testing.T) {
|
||||||
|
@ -77,7 +77,7 @@ func TestBundleMustLoadSuccess(t *testing.T) {
|
||||||
t.Setenv(env.RootVariable, "./tests/basic")
|
t.Setenv(env.RootVariable, "./tests/basic")
|
||||||
b, err := MustLoad(context.Background())
|
b, err := MustLoad(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
||||||
|
@ -96,7 +96,7 @@ func TestBundleTryLoadSuccess(t *testing.T) {
|
||||||
t.Setenv(env.RootVariable, "./tests/basic")
|
t.Setenv(env.RootVariable, "./tests/basic")
|
||||||
b, err := TryLoad(context.Background())
|
b, err := TryLoad(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
||||||
|
|
|
@ -42,5 +42,5 @@ type Bundle struct {
|
||||||
ComputeID string `json:"compute_id,omitempty"`
|
ComputeID string `json:"compute_id,omitempty"`
|
||||||
|
|
||||||
// Deployment section specifies deployment related configuration for bundle
|
// Deployment section specifies deployment related configuration for bundle
|
||||||
Deployment Deployment `json:"deployment"`
|
Deployment Deployment `json:"deployment,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
)
|
||||||
|
|
||||||
|
type entryPoint struct{}
|
||||||
|
|
||||||
|
// EntryPoint loads the entry point configuration.
|
||||||
|
func EntryPoint() bundle.Mutator {
|
||||||
|
return &entryPoint{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *entryPoint) Name() string {
|
||||||
|
return "EntryPoint"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
path, err := config.FileNames.FindInPath(b.RootPath)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
this, diags := config.Load(path)
|
||||||
|
if diags.HasError() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
err = b.Config.Merge(this)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
}
|
||||||
|
return diags
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
package loader_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config/loader"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEntryPointNoRootPath(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
||||||
|
require.Error(t, diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEntryPoint(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "testdata",
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, "loader_test", b.Config.Bundle.Name)
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package mutator
|
package loader
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type processInclude struct {
|
type processInclude struct {
|
||||||
|
@ -25,10 +26,14 @@ func (m *processInclude) Name() string {
|
||||||
return fmt.Sprintf("ProcessInclude(%s)", m.relPath)
|
return fmt.Sprintf("ProcessInclude(%s)", m.relPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
this, err := config.Load(m.fullPath)
|
this, diags := config.Load(m.fullPath)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags
|
||||||
}
|
}
|
||||||
return b.Config.Merge(this)
|
err := b.Config.Merge(this)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
}
|
||||||
|
return diags
|
||||||
}
|
}
|
|
@ -1,38 +1,35 @@
|
||||||
package mutator_test
|
package loader_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/loader"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestProcessInclude(t *testing.T) {
|
func TestProcessInclude(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "testdata",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
Host: "foo",
|
Host: "foo",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
relPath := "./file.yml"
|
m := loader.ProcessInclude(filepath.Join(b.RootPath, "host.yml"), "host.yml")
|
||||||
fullPath := filepath.Join(b.Config.Path, relPath)
|
assert.Equal(t, "ProcessInclude(host.yml)", m.Name())
|
||||||
f, err := os.Create(fullPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
fmt.Fprint(f, "workspace:\n host: bar\n")
|
|
||||||
f.Close()
|
|
||||||
|
|
||||||
|
// Assert the host value prior to applying the mutator
|
||||||
assert.Equal(t, "foo", b.Config.Workspace.Host)
|
assert.Equal(t, "foo", b.Config.Workspace.Host)
|
||||||
err = bundle.Apply(context.Background(), b, mutator.ProcessInclude(fullPath, relPath))
|
|
||||||
require.NoError(t, err)
|
// Apply the mutator and assert that the host value has been updated
|
||||||
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
||||||
}
|
}
|
|
@ -1,27 +1,16 @@
|
||||||
package mutator
|
package loader
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get extra include paths from environment variable
|
|
||||||
func getExtraIncludePaths(ctx context.Context) []string {
|
|
||||||
value, exists := env.Includes(ctx)
|
|
||||||
if !exists {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return strings.Split(value, string(os.PathListSeparator))
|
|
||||||
}
|
|
||||||
|
|
||||||
type processRootIncludes struct{}
|
type processRootIncludes struct{}
|
||||||
|
|
||||||
// ProcessRootIncludes expands the patterns in the configuration's include list
|
// ProcessRootIncludes expands the patterns in the configuration's include list
|
||||||
|
@ -34,7 +23,7 @@ func (m *processRootIncludes) Name() string {
|
||||||
return "ProcessRootIncludes"
|
return "ProcessRootIncludes"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
var out []bundle.Mutator
|
var out []bundle.Mutator
|
||||||
|
|
||||||
// Map with files we've already seen to avoid loading them twice.
|
// Map with files we've already seen to avoid loading them twice.
|
||||||
|
@ -48,45 +37,33 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
// This is stored in the bundle configuration for observability.
|
// This is stored in the bundle configuration for observability.
|
||||||
var files []string
|
var files []string
|
||||||
|
|
||||||
// Converts extra include paths from environment variable to relative paths
|
|
||||||
for _, extraIncludePath := range getExtraIncludePaths(ctx) {
|
|
||||||
if filepath.IsAbs(extraIncludePath) {
|
|
||||||
rel, err := filepath.Rel(b.Config.Path, extraIncludePath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to include file '%s': %w", extraIncludePath, err)
|
|
||||||
}
|
|
||||||
extraIncludePath = rel
|
|
||||||
}
|
|
||||||
b.Config.Include = append(b.Config.Include, extraIncludePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For each glob, find all files to load.
|
// For each glob, find all files to load.
|
||||||
// Ordering of the list of globs is maintained in the output.
|
// Ordering of the list of globs is maintained in the output.
|
||||||
// For matches that appear in multiple globs, only the first is kept.
|
// For matches that appear in multiple globs, only the first is kept.
|
||||||
for _, entry := range b.Config.Include {
|
for _, entry := range b.Config.Include {
|
||||||
// Include paths must be relative.
|
// Include paths must be relative.
|
||||||
if filepath.IsAbs(entry) {
|
if filepath.IsAbs(entry) {
|
||||||
return fmt.Errorf("%s: includes must be relative paths", entry)
|
return diag.Errorf("%s: includes must be relative paths", entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Anchor includes to the bundle root path.
|
// Anchor includes to the bundle root path.
|
||||||
matches, err := filepath.Glob(filepath.Join(b.Config.Path, entry))
|
matches, err := filepath.Glob(filepath.Join(b.RootPath, entry))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the entry is not a glob pattern and no matches found,
|
// If the entry is not a glob pattern and no matches found,
|
||||||
// return an error because the file defined is not found
|
// return an error because the file defined is not found
|
||||||
if len(matches) == 0 && !strings.ContainsAny(entry, "*?[") {
|
if len(matches) == 0 && !strings.ContainsAny(entry, "*?[") {
|
||||||
return fmt.Errorf("%s defined in 'include' section does not match any files", entry)
|
return diag.Errorf("%s defined in 'include' section does not match any files", entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter matches to ones we haven't seen yet.
|
// Filter matches to ones we haven't seen yet.
|
||||||
var includes []string
|
var includes []string
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
rel, err := filepath.Rel(b.Config.Path, match)
|
rel, err := filepath.Rel(b.RootPath, match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
if _, ok := seen[rel]; ok {
|
if _, ok := seen[rel]; ok {
|
||||||
continue
|
continue
|
||||||
|
@ -99,7 +76,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
slices.Sort(includes)
|
slices.Sort(includes)
|
||||||
files = append(files, includes...)
|
files = append(files, includes...)
|
||||||
for _, include := range includes {
|
for _, include := range includes {
|
||||||
out = append(out, ProcessInclude(filepath.Join(b.Config.Path, include), include))
|
out = append(out, ProcessInclude(filepath.Join(b.RootPath, include), include))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,113 @@
|
||||||
|
package loader_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/loader"
|
||||||
|
"github.com/databricks/cli/internal/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProcessRootIncludesEmpty(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: ".",
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesAbs(t *testing.T) {
|
||||||
|
// remove this once equivalent tests for windows have been set up
|
||||||
|
// or this test has been fixed for windows
|
||||||
|
// date: 28 Nov 2022
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.Skip("skipping temperorilty to make windows unit tests green")
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: ".",
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"/tmp/*.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.True(t, diags.HasError())
|
||||||
|
assert.ErrorContains(t, diags.Error(), "must be relative paths")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"*.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Touch(t, b.RootPath, "databricks.yml")
|
||||||
|
testutil.Touch(t, b.RootPath, "a.yml")
|
||||||
|
testutil.Touch(t, b.RootPath, "b.yml")
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"a*.yml",
|
||||||
|
"b*.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Touch(t, b.RootPath, "a1.yml")
|
||||||
|
testutil.Touch(t, b.RootPath, "b1.yml")
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"*.yml",
|
||||||
|
"*.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Touch(t, b.RootPath, "a.yml")
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, []string{"a.yml"}, b.Config.Include)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesNotExists(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"notexist.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.True(t, diags.HasError())
|
||||||
|
assert.ErrorContains(t, diags.Error(), "notexist.yml defined in 'include' section does not match any files")
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
bundle:
|
||||||
|
name: loader_test
|
|
@ -0,0 +1,2 @@
|
||||||
|
workspace:
|
||||||
|
host: bar
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type defineDefaultTarget struct {
|
type defineDefaultTarget struct {
|
||||||
|
@ -24,7 +25,7 @@ func (m *defineDefaultTarget) Name() string {
|
||||||
return fmt.Sprintf("DefineDefaultTarget(%s)", m.name)
|
return fmt.Sprintf("DefineDefaultTarget(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Nothing to do if the configuration has at least 1 target.
|
// Nothing to do if the configuration has at least 1 target.
|
||||||
if len(b.Config.Targets) > 0 {
|
if len(b.Config.Targets) > 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -13,8 +13,9 @@ import (
|
||||||
|
|
||||||
func TestDefaultTarget(t *testing.T) {
|
func TestDefaultTarget(t *testing.T) {
|
||||||
b := &bundle.Bundle{}
|
b := &bundle.Bundle{}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
env, ok := b.Config.Targets["default"]
|
env, ok := b.Config.Targets["default"]
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
assert.Equal(t, &config.Target{}, env)
|
assert.Equal(t, &config.Target{}, env)
|
||||||
|
@ -28,8 +29,9 @@ func TestDefaultTargetAlreadySpecified(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
_, ok := b.Config.Targets["default"]
|
_, ok := b.Config.Targets["default"]
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,10 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type defineDefaultWorkspacePaths struct{}
|
type defineDefaultWorkspacePaths struct{}
|
||||||
|
@ -19,10 +19,10 @@ func (m *defineDefaultWorkspacePaths) Name() string {
|
||||||
return "DefaultWorkspacePaths"
|
return "DefaultWorkspacePaths"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
root := b.Config.Workspace.RootPath
|
root := b.Config.Workspace.RootPath
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return fmt.Errorf("unable to define default workspace paths: workspace root not defined")
|
return diag.Errorf("unable to define default workspace paths: workspace root not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Workspace.FilePath == "" {
|
if b.Config.Workspace.FilePath == "" {
|
||||||
|
|
|
@ -19,8 +19,8 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/files", b.Config.Workspace.FilePath)
|
assert.Equal(t, "/files", b.Config.Workspace.FilePath)
|
||||||
assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath)
|
assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath)
|
||||||
assert.Equal(t, "/state", b.Config.Workspace.StatePath)
|
assert.Equal(t, "/state", b.Config.Workspace.StatePath)
|
||||||
|
@ -37,8 +37,8 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath)
|
||||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath)
|
||||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath)
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type defineDefaultWorkspaceRoot struct{}
|
type defineDefaultWorkspaceRoot struct{}
|
||||||
|
@ -18,17 +19,17 @@ func (m *defineDefaultWorkspaceRoot) Name() string {
|
||||||
return "DefineDefaultWorkspaceRoot"
|
return "DefineDefaultWorkspaceRoot"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Workspace.RootPath != "" {
|
if b.Config.Workspace.RootPath != "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.Name == "" {
|
if b.Config.Bundle.Name == "" {
|
||||||
return fmt.Errorf("unable to define default workspace root: bundle name not defined")
|
return diag.Errorf("unable to define default workspace root: bundle name not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.Target == "" {
|
if b.Config.Bundle.Target == "" {
|
||||||
return fmt.Errorf("unable to define default workspace root: bundle target not selected")
|
return diag.Errorf("unable to define default workspace root: bundle target not selected")
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Config.Workspace.RootPath = fmt.Sprintf(
|
b.Config.Workspace.RootPath = fmt.Sprintf(
|
||||||
|
|
|
@ -20,7 +20,8 @@ func TestDefaultWorkspaceRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(t, "~/.bundle/name/environment", b.Config.Workspace.RootPath)
|
assert.Equal(t, "~/.bundle/name/environment", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -18,7 +19,7 @@ func (m *environmentsToTargets) Name() string {
|
||||||
return "EnvironmentsToTargets"
|
return "EnvironmentsToTargets"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Short circuit if the "environments" key is not set.
|
// Short circuit if the "environments" key is not set.
|
||||||
// This is the common case.
|
// This is the common case.
|
||||||
if b.Config.Environments == nil {
|
if b.Config.Environments == nil {
|
||||||
|
@ -26,7 +27,7 @@ func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) err
|
||||||
}
|
}
|
||||||
|
|
||||||
// The "environments" key is set; validate and rewrite it to "targets".
|
// The "environments" key is set; validate and rewrite it to "targets".
|
||||||
return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
environments := v.Get("environments")
|
environments := v.Get("environments")
|
||||||
targets := v.Get("targets")
|
targets := v.Get("targets")
|
||||||
|
|
||||||
|
@ -60,4 +61,6 @@ func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) err
|
||||||
|
|
||||||
return v, nil
|
return v, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEnvironmentsToTargetsWithBothDefined(t *testing.T) {
|
func TestEnvironmentsToTargetsWithBothDefined(t *testing.T) {
|
||||||
|
@ -26,8 +27,8 @@ func TestEnvironmentsToTargetsWithBothDefined(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
||||||
assert.ErrorContains(t, err, `both 'environments' and 'targets' are specified;`)
|
assert.ErrorContains(t, diags.Error(), `both 'environments' and 'targets' are specified;`)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) {
|
func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) {
|
||||||
|
@ -41,8 +42,8 @@ func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
||||||
assert.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Len(t, b.Config.Environments, 0)
|
assert.Len(t, b.Config.Environments, 0)
|
||||||
assert.Len(t, b.Config.Targets, 1)
|
assert.Len(t, b.Config.Targets, 1)
|
||||||
}
|
}
|
||||||
|
@ -58,8 +59,8 @@ func TestEnvironmentsToTargetsWithTargetsDefined(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
||||||
assert.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Len(t, b.Config.Environments, 0)
|
assert.Len(t, b.Config.Environments, 0)
|
||||||
assert.Len(t, b.Config.Targets, 1)
|
assert.Len(t, b.Config.Targets, 1)
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -92,8 +93,8 @@ func (m *expandPipelineGlobPaths) expandSequence(p dyn.Path, v dyn.Value) (dyn.V
|
||||||
return dyn.NewValue(vs, v.Location()), nil
|
return dyn.NewValue(vs, v.Location()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
p := dyn.NewPattern(
|
p := dyn.NewPattern(
|
||||||
dyn.Key("resources"),
|
dyn.Key("resources"),
|
||||||
dyn.Key("pipelines"),
|
dyn.Key("pipelines"),
|
||||||
|
@ -104,6 +105,8 @@ func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) err
|
||||||
// Visit each pipeline's "libraries" field and expand any glob patterns.
|
// Visit each pipeline's "libraries" field and expand any glob patterns.
|
||||||
return dyn.MapByPattern(v, p, m.expandSequence)
|
return dyn.MapByPattern(v, p, m.expandSequence)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*expandPipelineGlobPaths) Name() string {
|
func (*expandPipelineGlobPaths) Name() string {
|
||||||
|
|
|
@ -41,8 +41,8 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "skip/test7.py"))
|
touchEmptyFile(t, filepath.Join(dir, "skip/test7.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
|
@ -109,8 +109,8 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
||||||
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml"))
|
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml"))
|
||||||
|
|
||||||
m := ExpandPipelineGlobPaths()
|
m := ExpandPipelineGlobPaths()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
libraries := b.Config.Resources.Pipelines["pipeline"].Libraries
|
libraries := b.Config.Resources.Pipelines["pipeline"].Libraries
|
||||||
require.Len(t, libraries, 13)
|
require.Len(t, libraries, 13)
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type expandWorkspaceRoot struct{}
|
type expandWorkspaceRoot struct{}
|
||||||
|
@ -20,15 +21,15 @@ func (m *expandWorkspaceRoot) Name() string {
|
||||||
return "ExpandWorkspaceRoot"
|
return "ExpandWorkspaceRoot"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
root := b.Config.Workspace.RootPath
|
root := b.Config.Workspace.RootPath
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return fmt.Errorf("unable to expand workspace root: workspace root not defined")
|
return diag.Errorf("unable to expand workspace root: workspace root not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
currentUser := b.Config.Workspace.CurrentUser
|
currentUser := b.Config.Workspace.CurrentUser
|
||||||
if currentUser == nil || currentUser.UserName == "" {
|
if currentUser == nil || currentUser.UserName == "" {
|
||||||
return fmt.Errorf("unable to expand workspace root: current user not set")
|
return diag.Errorf("unable to expand workspace root: current user not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(root, "~/") {
|
if strings.HasPrefix(root, "~/") {
|
||||||
|
|
|
@ -25,8 +25,8 @@ func TestExpandWorkspaceRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,8 +43,8 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/Users/charly@doe.com/foo", b.Config.Workspace.RootPath)
|
assert.Equal(t, "/Users/charly@doe.com/foo", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,8 +60,8 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.Error(t, err)
|
require.True(t, diags.HasError())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
||||||
|
@ -72,6 +72,6 @@ func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.Error(t, err)
|
require.True(t, diags.HasError())
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ifMutator struct {
|
type ifMutator struct {
|
||||||
|
@ -22,7 +23,7 @@ func If(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if m.condition(b) {
|
if m.condition(b) {
|
||||||
return bundle.Apply(ctx, b, m.onTrueMutator)
|
return bundle.Apply(ctx, b, m.onTrueMutator)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type initializeVariables struct{}
|
type initializeVariables struct{}
|
||||||
|
@ -18,7 +19,7 @@ func (m *initializeVariables) Name() string {
|
||||||
return "InitializeVariables"
|
return "InitializeVariables"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
vars := b.Config.Variables
|
vars := b.Config.Variables
|
||||||
for k, v := range vars {
|
for k, v := range vars {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
|
|
|
@ -23,8 +23,8 @@ func TestInitializeVariables(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
diags := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.NotNil(t, b.Config.Variables["foo"])
|
assert.NotNil(t, b.Config.Variables["foo"])
|
||||||
assert.NotNil(t, b.Config.Variables["bar"])
|
assert.NotNil(t, b.Config.Variables["bar"])
|
||||||
assert.Equal(t, "This is a description", b.Config.Variables["bar"].Description)
|
assert.Equal(t, "This is a description", b.Config.Variables["bar"].Description)
|
||||||
|
@ -36,7 +36,7 @@ func TestInitializeVariablesWithoutVariables(t *testing.T) {
|
||||||
Variables: nil,
|
Variables: nil,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
diags := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Nil(t, b.Config.Variables)
|
assert.Nil(t, b.Config.Variables)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type initializeWorkspaceClient struct{}
|
type initializeWorkspaceClient struct{}
|
||||||
|
@ -19,7 +20,7 @@ func (m *initializeWorkspaceClient) Name() string {
|
||||||
// Apply initializes the workspace client for the bundle. We do this here so
|
// Apply initializes the workspace client for the bundle. We do this here so
|
||||||
// downstream calls to b.WorkspaceClient() do not panic if there's an error in the
|
// downstream calls to b.WorkspaceClient() do not panic if there's an error in the
|
||||||
// auth configuration.
|
// auth configuration.
|
||||||
func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
_, err := b.InitializeWorkspaceClient()
|
_, err := b.InitializeWorkspaceClient()
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/git"
|
"github.com/databricks/cli/libs/git"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
@ -19,11 +20,11 @@ func (m *loadGitDetails) Name() string {
|
||||||
return "LoadGitDetails"
|
return "LoadGitDetails"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Load relevant git repository
|
// Load relevant git repository
|
||||||
repo, err := git.NewRepository(b.Config.Path)
|
repo, err := git.NewRepository(b.RootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read branch name of current checkout
|
// Read branch name of current checkout
|
||||||
|
@ -55,14 +56,14 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute relative path of the bundle root from the Git repo root.
|
// Compute relative path of the bundle root from the Git repo root.
|
||||||
absBundlePath, err := filepath.Abs(b.Config.Path)
|
absBundlePath, err := filepath.Abs(b.RootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
// repo.Root() returns the absolute path of the repo
|
// repo.Root() returns the absolute path of the repo
|
||||||
relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath)
|
relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath)
|
b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/dyn/merge"
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
)
|
)
|
||||||
|
@ -29,8 +30,8 @@ func (m *mergeJobClusters) jobClusterKey(v dyn.Value) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
if v == dyn.NilValue {
|
if v == dyn.NilValue {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
@ -39,4 +40,6 @@ func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
return dyn.Map(job, "job_clusters", merge.ElementsByKey("job_cluster_key", m.jobClusterKey))
|
return dyn.Map(job, "job_clusters", merge.ElementsByKey("job_cluster_key", m.jobClusterKey))
|
||||||
}))
|
}))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,8 +50,8 @@ func TestMergeJobClusters(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.MergeJobClusters())
|
diags := bundle.Apply(context.Background(), b, mutator.MergeJobClusters())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
j := b.Config.Resources.Jobs["foo"]
|
j := b.Config.Resources.Jobs["foo"]
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ func TestMergeJobClustersWithNilKey(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.MergeJobClusters())
|
diags := bundle.Apply(context.Background(), b, mutator.MergeJobClusters())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1)
|
assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/dyn/merge"
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
)
|
)
|
||||||
|
@ -29,8 +30,8 @@ func (m *mergeJobTasks) taskKeyString(v dyn.Value) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
if v == dyn.NilValue {
|
if v == dyn.NilValue {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
@ -39,4 +40,6 @@ func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
return dyn.Map(job, "tasks", merge.ElementsByKey("task_key", m.taskKeyString))
|
return dyn.Map(job, "tasks", merge.ElementsByKey("task_key", m.taskKeyString))
|
||||||
}))
|
}))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,8 +58,8 @@ func TestMergeJobTasks(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.MergeJobTasks())
|
diags := bundle.Apply(context.Background(), b, mutator.MergeJobTasks())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
j := b.Config.Resources.Jobs["foo"]
|
j := b.Config.Resources.Jobs["foo"]
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ func TestMergeJobTasksWithNilKey(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.MergeJobTasks())
|
diags := bundle.Apply(context.Background(), b, mutator.MergeJobTasks())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 1)
|
assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 1)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/dyn/merge"
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
)
|
)
|
||||||
|
@ -32,8 +33,8 @@ func (m *mergePipelineClusters) clusterLabel(v dyn.Value) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
if v == dyn.NilValue {
|
if v == dyn.NilValue {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
@ -42,4 +43,6 @@ func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) err
|
||||||
return dyn.Map(pipeline, "clusters", merge.ElementsByKey("label", m.clusterLabel))
|
return dyn.Map(pipeline, "clusters", merge.ElementsByKey("label", m.clusterLabel))
|
||||||
}))
|
}))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,8 +42,8 @@ func TestMergePipelineClusters(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
p := b.Config.Resources.Pipelines["foo"]
|
p := b.Config.Resources.Pipelines["foo"]
|
||||||
|
|
||||||
|
@ -86,8 +86,8 @@ func TestMergePipelineClustersCaseInsensitive(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
p := b.Config.Resources.Pipelines["foo"]
|
p := b.Config.Resources.Pipelines["foo"]
|
||||||
assert.Len(t, p.Clusters, 1)
|
assert.Len(t, p.Clusters, 1)
|
||||||
|
@ -107,8 +107,8 @@ func TestMergePipelineClustersNilPipelines(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMergePipelineClustersEmptyPipelines(t *testing.T) {
|
func TestMergePipelineClustersEmptyPipelines(t *testing.T) {
|
||||||
|
@ -120,6 +120,6 @@ func TestMergePipelineClustersEmptyPipelines(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,13 +3,17 @@ package mutator
|
||||||
import (
|
import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/loader"
|
||||||
"github.com/databricks/cli/bundle/scripts"
|
"github.com/databricks/cli/bundle/scripts"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DefaultMutators() []bundle.Mutator {
|
func DefaultMutators() []bundle.Mutator {
|
||||||
return []bundle.Mutator{
|
return []bundle.Mutator{
|
||||||
|
loader.EntryPoint(),
|
||||||
|
loader.ProcessRootIncludes(),
|
||||||
|
|
||||||
|
// Execute preinit script after loading all configuration files.
|
||||||
scripts.Execute(config.ScriptPreInit),
|
scripts.Execute(config.ScriptPreInit),
|
||||||
ProcessRootIncludes(),
|
|
||||||
EnvironmentsToTargets(),
|
EnvironmentsToTargets(),
|
||||||
InitializeVariables(),
|
InitializeVariables(),
|
||||||
DefineDefaultTarget(),
|
DefineDefaultTarget(),
|
||||||
|
|
|
@ -4,11 +4,12 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type noop struct{}
|
type noop struct{}
|
||||||
|
|
||||||
func (*noop) Apply(context.Context, *bundle.Bundle) error {
|
func (*noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,11 +2,11 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/env"
|
"github.com/databricks/cli/libs/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -32,10 +32,10 @@ func overrideJobCompute(j *resources.Job, compute string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Bundle.Mode != config.Development {
|
if b.Config.Bundle.Mode != config.Development {
|
||||||
if b.Config.Bundle.ComputeID != "" {
|
if b.Config.Bundle.ComputeID != "" {
|
||||||
return fmt.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
return diag.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,8 +49,8 @@ func TestOverrideDevelopment(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
||||||
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
|
@ -85,8 +85,8 @@ func TestOverrideDevelopmentEnv(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,8 +110,8 @@ func TestOverridePipelineTask(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,8 +140,8 @@ func TestOverrideProduction(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.Error(t, err)
|
require.True(t, diags.HasError())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideProductionEnv(t *testing.T) {
|
func TestOverrideProductionEnv(t *testing.T) {
|
||||||
|
@ -167,6 +167,6 @@ func TestOverrideProductionEnv(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/auth"
|
"github.com/databricks/cli/libs/auth"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/tags"
|
"github.com/databricks/cli/libs/tags"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,7 +21,7 @@ func (m *populateCurrentUser) Name() string {
|
||||||
return "PopulateCurrentUser"
|
return "PopulateCurrentUser"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Workspace.CurrentUser != nil {
|
if b.Config.Workspace.CurrentUser != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -28,7 +29,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
w := b.WorkspaceClient()
|
w := b.WorkspaceClient()
|
||||||
me, err := w.CurrentUser.Me(ctx)
|
me, err := w.CurrentUser.Me(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser = &config.User{
|
b.Config.Workspace.CurrentUser = &config.User{
|
||||||
|
|
|
@ -1,161 +0,0 @@
|
||||||
package mutator_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
|
||||||
"github.com/databricks/cli/bundle/env"
|
|
||||||
"github.com/databricks/cli/internal/testutil"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestProcessRootIncludesEmpty(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: ".",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesAbs(t *testing.T) {
|
|
||||||
// remove this once equivalent tests for windows have been set up
|
|
||||||
// or this test has been fixed for windows
|
|
||||||
// date: 28 Nov 2022
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
t.Skip("skipping temperorilty to make windows unit tests green")
|
|
||||||
}
|
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: ".",
|
|
||||||
Include: []string{
|
|
||||||
"/tmp/*.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "must be relative paths")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: t.TempDir(),
|
|
||||||
Include: []string{
|
|
||||||
"*.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
testutil.Touch(t, b.Config.Path, "databricks.yml")
|
|
||||||
testutil.Touch(t, b.Config.Path, "a.yml")
|
|
||||||
testutil.Touch(t, b.Config.Path, "b.yml")
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: t.TempDir(),
|
|
||||||
Include: []string{
|
|
||||||
"a*.yml",
|
|
||||||
"b*.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
testutil.Touch(t, b.Config.Path, "a1.yml")
|
|
||||||
testutil.Touch(t, b.Config.Path, "b1.yml")
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: t.TempDir(),
|
|
||||||
Include: []string{
|
|
||||||
"*.yml",
|
|
||||||
"*.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
testutil.Touch(t, b.Config.Path, "a.yml")
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, []string{"a.yml"}, b.Config.Include)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesNotExists(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: t.TempDir(),
|
|
||||||
Include: []string{
|
|
||||||
"notexist.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "notexist.yml defined in 'include' section does not match any files")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) {
|
|
||||||
rootPath := t.TempDir()
|
|
||||||
testYamlName := "extra_include_path.yml"
|
|
||||||
testutil.Touch(t, rootPath, testYamlName)
|
|
||||||
t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName))
|
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: rootPath,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Contains(t, b.Config.Include, testYamlName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) {
|
|
||||||
rootPath := t.TempDir()
|
|
||||||
testYamlName := "extra_include_path.yml"
|
|
||||||
testutil.Touch(t, rootPath, testYamlName)
|
|
||||||
t.Setenv(env.IncludesVariable, strings.Join(
|
|
||||||
[]string{
|
|
||||||
path.Join(rootPath, testYamlName),
|
|
||||||
path.Join(rootPath, testYamlName),
|
|
||||||
},
|
|
||||||
string(os.PathListSeparator),
|
|
||||||
))
|
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: rootPath,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, []string{testYamlName}, b.Config.Include)
|
|
||||||
}
|
|
|
@ -2,13 +2,13 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/auth"
|
"github.com/databricks/cli/libs/auth"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
@ -30,7 +30,7 @@ func (m *processTargetMode) Name() string {
|
||||||
// Mark all resources as being for 'development' purposes, i.e.
|
// Mark all resources as being for 'development' purposes, i.e.
|
||||||
// changing their their name, adding tags, and (in the future)
|
// changing their their name, adding tags, and (in the future)
|
||||||
// marking them as 'hidden' in the UI.
|
// marking them as 'hidden' in the UI.
|
||||||
func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) error {
|
func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
r := b.Config.Resources
|
r := b.Config.Resources
|
||||||
|
|
||||||
shortName := b.Config.Workspace.CurrentUser.ShortName
|
shortName := b.Config.Workspace.CurrentUser.ShortName
|
||||||
|
@ -40,7 +40,7 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) error {
|
||||||
log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true")
|
log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true")
|
||||||
err := disableDeploymentLock(b)
|
err := disableDeploymentLock(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,9 +117,9 @@ func disableDeploymentLock(b *bundle.Bundle) error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateDevelopmentMode(b *bundle.Bundle) error {
|
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||||
if path := findNonUserPath(b); path != "" {
|
if path := findNonUserPath(b); path != "" {
|
||||||
return fmt.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -142,7 +142,7 @@ func findNonUserPath(b *bundle.Bundle) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) error {
|
func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) diag.Diagnostics {
|
||||||
if b.Config.Bundle.Git.Inferred {
|
if b.Config.Bundle.Git.Inferred {
|
||||||
env := b.Config.Bundle.Target
|
env := b.Config.Bundle.Target
|
||||||
log.Warnf(ctx, "target with 'mode: production' should specify an explicit 'targets.%s.git' configuration", env)
|
log.Warnf(ctx, "target with 'mode: production' should specify an explicit 'targets.%s.git' configuration", env)
|
||||||
|
@ -151,12 +151,12 @@ func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUs
|
||||||
r := b.Config.Resources
|
r := b.Config.Resources
|
||||||
for i := range r.Pipelines {
|
for i := range r.Pipelines {
|
||||||
if r.Pipelines[i].Development {
|
if r.Pipelines[i].Development {
|
||||||
return fmt.Errorf("target with 'mode: production' cannot include a pipeline with 'development: true'")
|
return diag.Errorf("target with 'mode: production' cannot include a pipeline with 'development: true'")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isPrincipalUsed && !isRunAsSet(r) {
|
if !isPrincipalUsed && !isRunAsSet(r) {
|
||||||
return fmt.Errorf("'run_as' must be set for all jobs when using 'mode: production'")
|
return diag.Errorf("'run_as' must be set for all jobs when using 'mode: production'")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -173,21 +173,21 @@ func isRunAsSet(r config.Resources) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
switch b.Config.Bundle.Mode {
|
switch b.Config.Bundle.Mode {
|
||||||
case config.Development:
|
case config.Development:
|
||||||
err := validateDevelopmentMode(b)
|
diags := validateDevelopmentMode(b)
|
||||||
if err != nil {
|
if diags != nil {
|
||||||
return err
|
return diags
|
||||||
}
|
}
|
||||||
return transformDevelopmentMode(b)
|
return transformDevelopmentMode(ctx, b)
|
||||||
case config.Production:
|
case config.Production:
|
||||||
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
|
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
|
||||||
return validateProductionMode(ctx, b, isPrincipal)
|
return validateProductionMode(ctx, b, isPrincipal)
|
||||||
case "":
|
case "":
|
||||||
// No action
|
// No action
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode)
|
return diag.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -110,8 +110,8 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
||||||
b := mockBundle(config.Development)
|
b := mockBundle(config.Development)
|
||||||
|
|
||||||
m := ProcessTargetMode()
|
m := ProcessTargetMode()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Job 1
|
// Job 1
|
||||||
assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name)
|
assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name)
|
||||||
|
@ -154,8 +154,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||||
err := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Assert that tag normalization took place.
|
// Assert that tag normalization took place.
|
||||||
assert.Equal(t, "Hello world__", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
assert.Equal(t, "Hello world__", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||||
|
@ -168,8 +168,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||||
err := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Assert that tag normalization took place (Azure allows more characters than AWS).
|
// Assert that tag normalization took place (Azure allows more characters than AWS).
|
||||||
assert.Equal(t, "Héllö wörld?!", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
assert.Equal(t, "Héllö wörld?!", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||||
|
@ -182,8 +182,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||||
err := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Assert that tag normalization took place.
|
// Assert that tag normalization took place.
|
||||||
assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||||
|
@ -193,8 +193,8 @@ func TestProcessTargetModeDefault(t *testing.T) {
|
||||||
b := mockBundle("")
|
b := mockBundle("")
|
||||||
|
|
||||||
m := ProcessTargetMode()
|
m := ProcessTargetMode()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
||||||
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||||
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||||
|
@ -205,15 +205,15 @@ func TestProcessTargetModeDefault(t *testing.T) {
|
||||||
func TestProcessTargetModeProduction(t *testing.T) {
|
func TestProcessTargetModeProduction(t *testing.T) {
|
||||||
b := mockBundle(config.Production)
|
b := mockBundle(config.Production)
|
||||||
|
|
||||||
err := validateProductionMode(context.Background(), b, false)
|
diags := validateProductionMode(context.Background(), b, false)
|
||||||
require.ErrorContains(t, err, "run_as")
|
require.ErrorContains(t, diags.Error(), "run_as")
|
||||||
|
|
||||||
b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
||||||
b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts"
|
b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts"
|
||||||
b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files"
|
b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files"
|
||||||
|
|
||||||
err = validateProductionMode(context.Background(), b, false)
|
diags = validateProductionMode(context.Background(), b, false)
|
||||||
require.ErrorContains(t, err, "production")
|
require.ErrorContains(t, diags.Error(), "production")
|
||||||
|
|
||||||
permissions := []resources.Permission{
|
permissions := []resources.Permission{
|
||||||
{
|
{
|
||||||
|
@ -232,8 +232,8 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
||||||
b.Config.Resources.Models["model1"].Permissions = permissions
|
b.Config.Resources.Models["model1"].Permissions = permissions
|
||||||
b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
|
b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
|
||||||
|
|
||||||
err = validateProductionMode(context.Background(), b, false)
|
diags = validateProductionMode(context.Background(), b, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
||||||
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||||
|
@ -246,12 +246,12 @@ func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {
|
||||||
b := mockBundle(config.Production)
|
b := mockBundle(config.Production)
|
||||||
|
|
||||||
// Our target has all kinds of problems when not using service principals ...
|
// Our target has all kinds of problems when not using service principals ...
|
||||||
err := validateProductionMode(context.Background(), b, false)
|
diags := validateProductionMode(context.Background(), b, false)
|
||||||
require.Error(t, err)
|
require.Error(t, diags.Error())
|
||||||
|
|
||||||
// ... but we're much less strict when a principal is used
|
// ... but we're much less strict when a principal is used
|
||||||
err = validateProductionMode(context.Background(), b, true)
|
diags = validateProductionMode(context.Background(), b, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure that we have test coverage for all resource types
|
// Make sure that we have test coverage for all resource types
|
||||||
|
@ -277,8 +277,8 @@ func TestAllResourcesRenamed(t *testing.T) {
|
||||||
b := mockBundle(config.Development)
|
b := mockBundle(config.Development)
|
||||||
|
|
||||||
m := ProcessTargetMode()
|
m := ProcessTargetMode()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
resources := reflect.ValueOf(b.Config.Resources)
|
resources := reflect.ValueOf(b.Config.Resources)
|
||||||
for i := 0; i < resources.NumField(); i++ {
|
for i := 0; i < resources.NumField(); i++ {
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
@ -15,7 +16,7 @@ func ResolveResourceReferences() bundle.Mutator {
|
||||||
return &resolveResourceReferences{}
|
return &resolveResourceReferences{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
errs, errCtx := errgroup.WithContext(ctx)
|
errs, errCtx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
for k := range b.Config.Variables {
|
for k := range b.Config.Variables {
|
||||||
|
@ -40,7 +41,7 @@ func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return errs.Wait()
|
return diag.FromErr(errs.Wait())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*resolveResourceReferences) Name() string {
|
func (*resolveResourceReferences) Name() string {
|
||||||
|
|
|
@ -50,8 +50,8 @@ func TestResolveClusterReference(t *testing.T) {
|
||||||
ClusterId: "9876-5432-xywz",
|
ClusterId: "9876-5432-xywz",
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(t, "1234-5678-abcd", *b.Config.Variables["my-cluster-id-1"].Value)
|
require.Equal(t, "1234-5678-abcd", *b.Config.Variables["my-cluster-id-1"].Value)
|
||||||
require.Equal(t, "9876-5432-xywz", *b.Config.Variables["my-cluster-id-2"].Value)
|
require.Equal(t, "9876-5432-xywz", *b.Config.Variables["my-cluster-id-2"].Value)
|
||||||
}
|
}
|
||||||
|
@ -79,8 +79,8 @@ func TestResolveNonExistentClusterReference(t *testing.T) {
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef))
|
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.ErrorContains(t, err, "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist")
|
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
||||||
|
@ -102,8 +102,8 @@ func TestNoLookupIfVariableIsSet(t *testing.T) {
|
||||||
|
|
||||||
b.Config.Variables["my-cluster-id"].Set("random value")
|
b.Config.Variables["my-cluster-id"].Set("random value")
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(t, "random value", *b.Config.Variables["my-cluster-id"].Value)
|
require.Equal(t, "random value", *b.Config.Variables["my-cluster-id"].Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ func TestResolveServicePrincipal(t *testing.T) {
|
||||||
ApplicationId: "app-1234",
|
ApplicationId: "app-1234",
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(t, "app-1234", *b.Config.Variables["my-sp"].Value)
|
require.Equal(t, "app-1234", *b.Config.Variables["my-sp"].Value)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/dyn/convert"
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
"github.com/databricks/cli/libs/dyn/dynvar"
|
"github.com/databricks/cli/libs/dyn/dynvar"
|
||||||
|
@ -26,7 +27,7 @@ func (m *resolveVariableReferences) Validate(ctx context.Context, b *bundle.Bund
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
prefixes := make([]dyn.Path, len(m.prefixes))
|
prefixes := make([]dyn.Path, len(m.prefixes))
|
||||||
for i, prefix := range m.prefixes {
|
for i, prefix := range m.prefixes {
|
||||||
prefixes[i] = dyn.MustPathFromString(prefix)
|
prefixes[i] = dyn.MustPathFromString(prefix)
|
||||||
|
@ -36,7 +37,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
// We rewrite it here to make the resolution logic simpler.
|
// We rewrite it here to make the resolution logic simpler.
|
||||||
varPath := dyn.NewPath(dyn.Key("var"))
|
varPath := dyn.NewPath(dyn.Key("var"))
|
||||||
|
|
||||||
return b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||||
// Synthesize a copy of the root that has all fields that are present in the type
|
// Synthesize a copy of the root that has all fields that are present in the type
|
||||||
// but not set in the dynamic value set to their corresponding empty value.
|
// but not set in the dynamic value set to their corresponding empty value.
|
||||||
// This enables users to interpolate variable references to fields that haven't
|
// This enables users to interpolate variable references to fields that haven't
|
||||||
|
@ -92,4 +93,6 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
}
|
}
|
||||||
return root, nil
|
return root, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
@ -29,14 +30,14 @@ func TestResolveVariableReferences(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply with an invalid prefix. This should not change the workspace root path.
|
// Apply with an invalid prefix. This should not change the workspace root path.
|
||||||
err := bundle.Apply(context.Background(), b, ResolveVariableReferences("doesntexist"))
|
diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("doesntexist"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(t, "${bundle.name}/bar", b.Config.Workspace.RootPath)
|
require.Equal(t, "${bundle.name}/bar", b.Config.Workspace.RootPath)
|
||||||
require.Equal(t, "${workspace.root_path}/baz", b.Config.Workspace.FilePath)
|
require.Equal(t, "${workspace.root_path}/baz", b.Config.Workspace.FilePath)
|
||||||
|
|
||||||
// Apply with a valid prefix. This should change the workspace root path.
|
// Apply with a valid prefix. This should change the workspace root path.
|
||||||
err = bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "workspace"))
|
diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "workspace"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(t, "example/bar", b.Config.Workspace.RootPath)
|
require.Equal(t, "example/bar", b.Config.Workspace.RootPath)
|
||||||
require.Equal(t, "example/bar/baz", b.Config.Workspace.FilePath)
|
require.Equal(t, "example/bar/baz", b.Config.Workspace.FilePath)
|
||||||
}
|
}
|
||||||
|
@ -63,8 +64,8 @@ func TestResolveVariableReferencesToBundleVariables(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply with a valid prefix. This should change the workspace root path.
|
// Apply with a valid prefix. This should change the workspace root path.
|
||||||
err := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "variables"))
|
diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "variables"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(t, "example/bar", b.Config.Workspace.RootPath)
|
require.Equal(t, "example/bar", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,15 +93,15 @@ func TestResolveVariableReferencesToEmptyFields(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply for the bundle prefix.
|
// Apply for the bundle prefix.
|
||||||
err := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle"))
|
diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// The job settings should have been interpolated to an empty string.
|
// The job settings should have been interpolated to an empty string.
|
||||||
require.Equal(t, "", b.Config.Resources.Jobs["job1"].JobSettings.Tags["git_branch"])
|
require.Equal(t, "", b.Config.Resources.Jobs["job1"].JobSettings.Tags["git_branch"])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) {
|
func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) {
|
||||||
var err error
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
@ -142,20 +143,21 @@ func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// Initialize the variables.
|
// Initialize the variables.
|
||||||
err = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error {
|
diags = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
return b.Config.InitializeVariables([]string{
|
err := b.Config.InitializeVariables([]string{
|
||||||
"no_alert_for_canceled_runs=true",
|
"no_alert_for_canceled_runs=true",
|
||||||
"no_alert_for_skipped_runs=true",
|
"no_alert_for_skipped_runs=true",
|
||||||
"min_workers=1",
|
"min_workers=1",
|
||||||
"max_workers=2",
|
"max_workers=2",
|
||||||
"spot_bid_max_price=0.5",
|
"spot_bid_max_price=0.5",
|
||||||
})
|
})
|
||||||
|
return diag.FromErr(err)
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Assign the variables to the dynamic configuration.
|
// Assign the variables to the dynamic configuration.
|
||||||
err = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error {
|
diags = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
var p dyn.Path
|
var p dyn.Path
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
@ -180,12 +182,13 @@ func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) {
|
||||||
|
|
||||||
return v, nil
|
return v, nil
|
||||||
})
|
})
|
||||||
|
return diag.FromErr(err)
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Apply for the variable prefix. This should resolve the variables to their values.
|
// Apply for the variable prefix. This should resolve the variables to their values.
|
||||||
err = bundle.Apply(context.Background(), b, ResolveVariableReferences("variables"))
|
diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("variables"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns)
|
assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns)
|
||||||
assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns)
|
assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns)
|
||||||
assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MinWorkers)
|
assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MinWorkers)
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -41,18 +42,20 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
||||||
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.Config.Path)))
|
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.NilValue, err
|
return dyn.NilValue, err
|
||||||
}
|
}
|
||||||
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.Config.Path)))
|
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.NilValue, err
|
return dyn.NilValue, err
|
||||||
}
|
}
|
||||||
return v, nil
|
return v, nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,8 +14,8 @@ import (
|
||||||
|
|
||||||
func TestRewriteSyncPathsRelative(t *testing.T) {
|
func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: ".",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: ".",
|
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Include: []string{
|
Include: []string{
|
||||||
"foo",
|
"foo",
|
||||||
|
@ -34,8 +34,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
||||||
bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml")
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||||
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||||
|
@ -45,8 +45,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
|
|
||||||
func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "/tmp/dir",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: "/tmp/dir",
|
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Include: []string{
|
Include: []string{
|
||||||
"foo",
|
"foo",
|
||||||
|
@ -65,8 +65,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
||||||
bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml")
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||||
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||||
|
@ -77,19 +77,17 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
||||||
t.Run("no sync block", func(t *testing.T) {
|
t.Run("no sync block", func(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
RootPath: ".",
|
||||||
Path: ".",
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("empty include/exclude blocks", func(t *testing.T) {
|
t.Run("empty include/exclude blocks", func(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: ".",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: ".",
|
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Include: []string{},
|
Include: []string{},
|
||||||
Exclude: []string{},
|
Exclude: []string{},
|
||||||
|
@ -97,7 +95,7 @@ func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,19 +2,24 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"slices"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type setRunAs struct {
|
type setRunAs struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRunAs mutator is used to go over defined resources such as Jobs and DLT Pipelines
|
// This mutator does two things:
|
||||||
// And set correct execution identity ("run_as" for a job or "is_owner" permission for DLT)
|
//
|
||||||
// if top-level "run-as" section is defined in the configuration.
|
// 1. Sets the run_as field for jobs to the value of the run_as field in the bundle.
|
||||||
|
//
|
||||||
|
// 2. Validates that the bundle run_as configuration is valid in the context of the bundle.
|
||||||
|
// If the run_as user is different from the current deployment user, DABs only
|
||||||
|
// supports a subset of resources.
|
||||||
func SetRunAs() bundle.Mutator {
|
func SetRunAs() bundle.Mutator {
|
||||||
return &setRunAs{}
|
return &setRunAs{}
|
||||||
}
|
}
|
||||||
|
@ -23,12 +28,94 @@ func (m *setRunAs) Name() string {
|
||||||
return "SetRunAs"
|
return "SetRunAs"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error {
|
type errUnsupportedResourceTypeForRunAs struct {
|
||||||
|
resourceType string
|
||||||
|
resourceLocation dyn.Location
|
||||||
|
currentUser string
|
||||||
|
runAsUser string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(6 March 2024): Link the docs page describing run_as semantics in the error below
|
||||||
|
// once the page is ready.
|
||||||
|
func (e errUnsupportedResourceTypeForRunAs) Error() string {
|
||||||
|
return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser)
|
||||||
|
}
|
||||||
|
|
||||||
|
type errBothSpAndUserSpecified struct {
|
||||||
|
spName string
|
||||||
|
spLoc dyn.Location
|
||||||
|
userName string
|
||||||
|
userLoc dyn.Location
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errBothSpAndUserSpecified) Error() string {
|
||||||
|
return fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name %q is specified at %s. A user_name %q is defined at %s", e.spName, e.spLoc, e.userName, e.userLoc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateRunAs(b *bundle.Bundle) error {
|
||||||
|
runAs := b.Config.RunAs
|
||||||
|
|
||||||
|
// Error if neither service_principal_name nor user_name are specified
|
||||||
|
if runAs.ServicePrincipalName == "" && runAs.UserName == "" {
|
||||||
|
return fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error if both service_principal_name and user_name are specified
|
||||||
|
if runAs.UserName != "" && runAs.ServicePrincipalName != "" {
|
||||||
|
return errBothSpAndUserSpecified{
|
||||||
|
spName: runAs.ServicePrincipalName,
|
||||||
|
userName: runAs.UserName,
|
||||||
|
spLoc: b.Config.GetLocation("run_as.service_principal_name"),
|
||||||
|
userLoc: b.Config.GetLocation("run_as.user_name"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
identity := runAs.ServicePrincipalName
|
||||||
|
if identity == "" {
|
||||||
|
identity = runAs.UserName
|
||||||
|
}
|
||||||
|
|
||||||
|
// All resources are supported if the run_as identity is the same as the current deployment identity.
|
||||||
|
if identity == b.Config.Workspace.CurrentUser.UserName {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DLT pipelines do not support run_as in the API.
|
||||||
|
if len(b.Config.Resources.Pipelines) > 0 {
|
||||||
|
return errUnsupportedResourceTypeForRunAs{
|
||||||
|
resourceType: "pipelines",
|
||||||
|
resourceLocation: b.Config.GetLocation("resources.pipelines"),
|
||||||
|
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
||||||
|
runAsUser: identity,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Model serving endpoints do not support run_as in the API.
|
||||||
|
if len(b.Config.Resources.ModelServingEndpoints) > 0 {
|
||||||
|
return errUnsupportedResourceTypeForRunAs{
|
||||||
|
resourceType: "model_serving_endpoints",
|
||||||
|
resourceLocation: b.Config.GetLocation("resources.model_serving_endpoints"),
|
||||||
|
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
||||||
|
runAsUser: identity,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
// Mutator is a no-op if run_as is not specified in the bundle
|
||||||
runAs := b.Config.RunAs
|
runAs := b.Config.RunAs
|
||||||
if runAs == nil {
|
if runAs == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Assert the run_as configuration is valid in the context of the bundle
|
||||||
|
if err := validateRunAs(b); err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set run_as for jobs
|
||||||
for i := range b.Config.Resources.Jobs {
|
for i := range b.Config.Resources.Jobs {
|
||||||
job := b.Config.Resources.Jobs[i]
|
job := b.Config.Resources.Jobs[i]
|
||||||
if job.RunAs != nil {
|
if job.RunAs != nil {
|
||||||
|
@ -40,26 +127,5 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
me := b.Config.Workspace.CurrentUser.UserName
|
|
||||||
// If user deploying the bundle and the one defined in run_as are the same
|
|
||||||
// Do not add IS_OWNER permission. Current user is implied to be an owner in this case.
|
|
||||||
// Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407
|
|
||||||
if runAs.UserName == me || runAs.ServicePrincipalName == me {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range b.Config.Resources.Pipelines {
|
|
||||||
pipeline := b.Config.Resources.Pipelines[i]
|
|
||||||
pipeline.Permissions = slices.DeleteFunc(pipeline.Permissions, func(p resources.Permission) bool {
|
|
||||||
return (runAs.ServicePrincipalName != "" && p.ServicePrincipalName == runAs.ServicePrincipalName) ||
|
|
||||||
(runAs.UserName != "" && p.UserName == runAs.UserName)
|
|
||||||
})
|
|
||||||
pipeline.Permissions = append(pipeline.Permissions, resources.Permission{
|
|
||||||
Level: "IS_OWNER",
|
|
||||||
ServicePrincipalName: runAs.ServicePrincipalName,
|
|
||||||
UserName: runAs.UserName,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,188 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func allResourceTypes(t *testing.T) []string {
|
||||||
|
// Compute supported resource types based on the `Resources{}` struct.
|
||||||
|
r := config.Resources{}
|
||||||
|
rv, err := convert.FromTyped(r, dyn.NilValue)
|
||||||
|
require.NoError(t, err)
|
||||||
|
normalized, _ := convert.Normalize(r, rv, convert.IncludeMissingFields)
|
||||||
|
resourceTypes := []string{}
|
||||||
|
for _, k := range normalized.MustMap().Keys() {
|
||||||
|
resourceTypes = append(resourceTypes, k.MustString())
|
||||||
|
}
|
||||||
|
slices.Sort(resourceTypes)
|
||||||
|
|
||||||
|
// Assert the total list of resource supported, as a sanity check that using
|
||||||
|
// the dyn library gives us the correct list of all resources supported. Please
|
||||||
|
// also update this check when adding a new resource
|
||||||
|
require.Equal(t, []string{
|
||||||
|
"experiments",
|
||||||
|
"jobs",
|
||||||
|
"model_serving_endpoints",
|
||||||
|
"models",
|
||||||
|
"pipelines",
|
||||||
|
"registered_models",
|
||||||
|
},
|
||||||
|
resourceTypes,
|
||||||
|
)
|
||||||
|
|
||||||
|
return resourceTypes
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunAsWorksForAllowedResources(t *testing.T) {
|
||||||
|
config := config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
CurrentUser: &config.User{
|
||||||
|
User: &iam.User{
|
||||||
|
UserName: "alice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RunAs: &jobs.JobRunAs{
|
||||||
|
UserName: "bob",
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job_one": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job_two": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job_three": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "baz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Models: map[string]*resources.MlflowModel{
|
||||||
|
"model_one": {},
|
||||||
|
},
|
||||||
|
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||||
|
"registered_model_one": {},
|
||||||
|
},
|
||||||
|
Experiments: map[string]*resources.MlflowExperiment{
|
||||||
|
"experiment_one": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, SetRunAs())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
for _, job := range b.Config.Resources.Jobs {
|
||||||
|
assert.Equal(t, "bob", job.RunAs.UserName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
||||||
|
// Bundle "run_as" has two modes of operation, each with a different set of
|
||||||
|
// resources that are supported.
|
||||||
|
// Cases:
|
||||||
|
// 1. When the bundle "run_as" identity is same as the current deployment
|
||||||
|
// identity. In this case all resources are supported.
|
||||||
|
// 2. When the bundle "run_as" identity is different from the current
|
||||||
|
// deployment identity. In this case only a subset of resources are
|
||||||
|
// supported. This subset of resources are defined in the allow list below.
|
||||||
|
//
|
||||||
|
// To be a part of the allow list, the resource must satisfy one of the following
|
||||||
|
// two conditions:
|
||||||
|
// 1. The resource supports setting a run_as identity to a different user
|
||||||
|
// from the owner/creator of the resource. For example, jobs.
|
||||||
|
// 2. Run as semantics do not apply to the resource. We do not plan to add
|
||||||
|
// platform side support for `run_as` for these resources. For example,
|
||||||
|
// experiments or registered models.
|
||||||
|
//
|
||||||
|
// Any resource that is not on the allow list cannot be used when the bundle
|
||||||
|
// run_as is different from the current deployment user. "bundle validate" must
|
||||||
|
// return an error if such a resource has been defined, and the run_as identity
|
||||||
|
// is different from the current deployment identity.
|
||||||
|
//
|
||||||
|
// Action Item: If you are adding a new resource to DABs, please check in with
|
||||||
|
// the relevant owning team whether the resource should be on the allow list or (implicitly) on
|
||||||
|
// the deny list. Any resources that could have run_as semantics in the future
|
||||||
|
// should be on the deny list.
|
||||||
|
// For example: Teams for pipelines, model serving endpoints or Lakeview dashboards
|
||||||
|
// are planning to add platform side support for `run_as` for these resources at
|
||||||
|
// some point in the future. These resources are (implicitly) on the deny list, since
|
||||||
|
// they are not on the allow list below.
|
||||||
|
allowList := []string{
|
||||||
|
"jobs",
|
||||||
|
"models",
|
||||||
|
"registered_models",
|
||||||
|
"experiments",
|
||||||
|
}
|
||||||
|
|
||||||
|
base := config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
CurrentUser: &config.User{
|
||||||
|
User: &iam.User{
|
||||||
|
UserName: "alice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RunAs: &jobs.JobRunAs{
|
||||||
|
UserName: "bob",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := convert.FromTyped(base, dyn.NilValue)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, rt := range allResourceTypes(t) {
|
||||||
|
// Skip allowed resources
|
||||||
|
if slices.Contains(allowList, rt) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an instance of the resource type that is not on the allow list to
|
||||||
|
// the bundle configuration.
|
||||||
|
nv, err := dyn.SetByPath(v, dyn.NewPath(dyn.Key("resources"), dyn.Key(rt)), dyn.V(map[string]dyn.Value{
|
||||||
|
"foo": dyn.V(map[string]dyn.Value{
|
||||||
|
"path": dyn.V("bar"),
|
||||||
|
}),
|
||||||
|
}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Get back typed configuration from the newly created invalid bundle configuration.
|
||||||
|
r := &config.Root{}
|
||||||
|
err = convert.ToTyped(r, nv)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Assert this invalid bundle configuration fails validation.
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: *r,
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, SetRunAs())
|
||||||
|
assert.Equal(t, diags.Error().Error(), errUnsupportedResourceTypeForRunAs{
|
||||||
|
resourceType: rt,
|
||||||
|
resourceLocation: dyn.Location{},
|
||||||
|
currentUser: "alice",
|
||||||
|
runAsUser: "bob",
|
||||||
|
}.Error(), "expected run_as with a different identity than the current deployment user to not supported for resources of type: %s", rt)
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,10 +2,10 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,9 +20,9 @@ func (m *selectDefaultTarget) Name() string {
|
||||||
return "SelectDefaultTarget"
|
return "SelectDefaultTarget"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if len(b.Config.Targets) == 0 {
|
if len(b.Config.Targets) == 0 {
|
||||||
return fmt.Errorf("no targets defined")
|
return diag.Errorf("no targets defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
// One target means there's only one default.
|
// One target means there's only one default.
|
||||||
|
@ -41,12 +41,12 @@ func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
|
|
||||||
// It is invalid to have multiple targets with the `default` flag set.
|
// It is invalid to have multiple targets with the `default` flag set.
|
||||||
if len(defaults) > 1 {
|
if len(defaults) > 1 {
|
||||||
return fmt.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", "))
|
return diag.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no target has the `default` flag set, ask the user to specify one.
|
// If no target has the `default` flag set, ask the user to specify one.
|
||||||
if len(defaults) == 0 {
|
if len(defaults) == 0 {
|
||||||
return fmt.Errorf("please specify target")
|
return diag.Errorf("please specify target")
|
||||||
}
|
}
|
||||||
|
|
||||||
// One default remaining.
|
// One default remaining.
|
||||||
|
|
|
@ -16,8 +16,8 @@ func TestSelectDefaultTargetNoTargets(t *testing.T) {
|
||||||
Targets: map[string]*config.Target{},
|
Targets: map[string]*config.Target{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
assert.ErrorContains(t, err, "no targets defined")
|
assert.ErrorContains(t, diags.Error(), "no targets defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSelectDefaultTargetSingleTargets(t *testing.T) {
|
func TestSelectDefaultTargetSingleTargets(t *testing.T) {
|
||||||
|
@ -28,8 +28,8 @@ func TestSelectDefaultTargetSingleTargets(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "foo", b.Config.Bundle.Target)
|
assert.Equal(t, "foo", b.Config.Bundle.Target)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,8 +43,8 @@ func TestSelectDefaultTargetNoDefaults(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
assert.ErrorContains(t, err, "please specify target")
|
assert.ErrorContains(t, diags.Error(), "please specify target")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) {
|
func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) {
|
||||||
|
@ -56,8 +56,8 @@ func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
assert.ErrorContains(t, err, "please specify target")
|
assert.ErrorContains(t, diags.Error(), "please specify target")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSelectDefaultTargetMultipleDefaults(t *testing.T) {
|
func TestSelectDefaultTargetMultipleDefaults(t *testing.T) {
|
||||||
|
@ -70,8 +70,8 @@ func TestSelectDefaultTargetMultipleDefaults(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
assert.ErrorContains(t, err, "multiple targets are marked as default")
|
assert.ErrorContains(t, diags.Error(), "multiple targets are marked as default")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSelectDefaultTargetSingleDefault(t *testing.T) {
|
func TestSelectDefaultTargetSingleDefault(t *testing.T) {
|
||||||
|
@ -84,7 +84,7 @@ func TestSelectDefaultTargetSingleDefault(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "bar", b.Config.Bundle.Target)
|
assert.Equal(t, "bar", b.Config.Bundle.Target)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,21 +25,21 @@ func (m *selectTarget) Name() string {
|
||||||
return fmt.Sprintf("SelectTarget(%s)", m.name)
|
return fmt.Sprintf("SelectTarget(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Targets == nil {
|
if b.Config.Targets == nil {
|
||||||
return fmt.Errorf("no targets defined")
|
return diag.Errorf("no targets defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get specified target
|
// Get specified target
|
||||||
_, ok := b.Config.Targets[m.name]
|
_, ok := b.Config.Targets[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", "))
|
return diag.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge specified target into root configuration structure.
|
// Merge specified target into root configuration structure.
|
||||||
err := b.Config.MergeTargetOverrides(m.name)
|
err := b.Config.MergeTargetOverrides(m.name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store specified target in configuration for reference.
|
// Store specified target in configuration for reference.
|
||||||
|
|
|
@ -26,8 +26,8 @@ func TestSelectTarget(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.SelectTarget("default"))
|
diags := bundle.Apply(context.Background(), b, mutator.SelectTarget("default"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,6 +39,6 @@ func TestSelectTargetNotFound(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.SelectTarget("doesnt-exist"))
|
diags := bundle.Apply(context.Background(), b, mutator.SelectTarget("doesnt-exist"))
|
||||||
require.Error(t, err, "no targets defined")
|
require.Error(t, diags.Error(), "no targets defined")
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,10 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/env"
|
"github.com/databricks/cli/libs/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ func (m *setVariables) Name() string {
|
||||||
return "SetVariables"
|
return "SetVariables"
|
||||||
}
|
}
|
||||||
|
|
||||||
func setVariable(ctx context.Context, v *variable.Variable, name string) error {
|
func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Diagnostics {
|
||||||
// case: variable already has value initialized, so skip
|
// case: variable already has value initialized, so skip
|
||||||
if v.HasValue() {
|
if v.HasValue() {
|
||||||
return nil
|
return nil
|
||||||
|
@ -32,7 +32,7 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) error {
|
||||||
if val, ok := env.Lookup(ctx, envVarName); ok {
|
if val, ok := env.Lookup(ctx, envVarName); ok {
|
||||||
err := v.Set(val)
|
err := v.Set(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %w`, val, name, envVarName, err)
|
return diag.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %v`, val, name, envVarName, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) error {
|
||||||
if v.HasDefault() {
|
if v.HasDefault() {
|
||||||
err := v.Set(*v.Default)
|
err := v.Set(*v.Default)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`failed to assign default value from config "%s" to variable %s with error: %w`, *v.Default, name, err)
|
return diag.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, *v.Default, name, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -55,15 +55,16 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) error {
|
||||||
// We should have had a value to set for the variable at this point.
|
// We should have had a value to set for the variable at this point.
|
||||||
// TODO: use cmdio to request values for unassigned variables if current
|
// TODO: use cmdio to request values for unassigned variables if current
|
||||||
// terminal is a tty. Tracked in https://github.com/databricks/cli/issues/379
|
// terminal is a tty. Tracked in https://github.com/databricks/cli/issues/379
|
||||||
return fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
|
return diag.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
var diags diag.Diagnostics
|
||||||
for name, variable := range b.Config.Variables {
|
for name, variable := range b.Config.Variables {
|
||||||
err := setVariable(ctx, variable, name)
|
diags = diags.Extend(setVariable(ctx, variable, name))
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return diags
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,8 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) {
|
||||||
// set value for variable as an environment variable
|
// set value for variable as an environment variable
|
||||||
t.Setenv("BUNDLE_VAR_foo", "process-env")
|
t.Setenv("BUNDLE_VAR_foo", "process-env")
|
||||||
|
|
||||||
err := setVariable(context.Background(), &variable, "foo")
|
diags := setVariable(context.Background(), &variable, "foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, *variable.Value, "process-env")
|
assert.Equal(t, *variable.Value, "process-env")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,8 +33,8 @@ func TestSetVariableUsingDefaultValue(t *testing.T) {
|
||||||
Default: &defaultVal,
|
Default: &defaultVal,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := setVariable(context.Background(), &variable, "foo")
|
diags := setVariable(context.Background(), &variable, "foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, *variable.Value, "default")
|
assert.Equal(t, *variable.Value, "default")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,8 +49,8 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) {
|
||||||
|
|
||||||
// since a value is already assigned to the variable, it would not be overridden
|
// since a value is already assigned to the variable, it would not be overridden
|
||||||
// by the default value
|
// by the default value
|
||||||
err := setVariable(context.Background(), &variable, "foo")
|
diags := setVariable(context.Background(), &variable, "foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, *variable.Value, "assigned-value")
|
assert.Equal(t, *variable.Value, "assigned-value")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,8 +68,8 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) {
|
||||||
|
|
||||||
// since a value is already assigned to the variable, it would not be overridden
|
// since a value is already assigned to the variable, it would not be overridden
|
||||||
// by the value from environment
|
// by the value from environment
|
||||||
err := setVariable(context.Background(), &variable, "foo")
|
diags := setVariable(context.Background(), &variable, "foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, *variable.Value, "assigned-value")
|
assert.Equal(t, *variable.Value, "assigned-value")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,8 +79,8 @@ func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// fails because we could not resolve a value for the variable
|
// fails because we could not resolve a value for the variable
|
||||||
err := setVariable(context.Background(), &variable, "foo")
|
diags := setVariable(context.Background(), &variable, "foo")
|
||||||
assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable")
|
assert.ErrorContains(t, diags.Error(), "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetVariablesMutator(t *testing.T) {
|
func TestSetVariablesMutator(t *testing.T) {
|
||||||
|
@ -108,8 +108,8 @@ func TestSetVariablesMutator(t *testing.T) {
|
||||||
|
|
||||||
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, SetVariables())
|
diags := bundle.Apply(context.Background(), b, SetVariables())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "default-a", *b.Config.Variables["a"].Value)
|
assert.Equal(t, "default-a", *b.Config.Variables["a"].Value)
|
||||||
assert.Equal(t, "env-var-b", *b.Config.Variables["b"].Value)
|
assert.Equal(t, "env-var-b", *b.Config.Variables["b"].Value)
|
||||||
assert.Equal(t, "assigned-val-c", *b.Config.Variables["c"].Value)
|
assert.Equal(t, "assigned-val-c", *b.Config.Variables["c"].Value)
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -40,12 +41,12 @@ func (m *trampoline) Name() string {
|
||||||
return fmt.Sprintf("trampoline(%s)", m.name)
|
return fmt.Sprintf("trampoline(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
tasks := m.functions.GetTasks(b)
|
tasks := m.functions.GetTasks(b)
|
||||||
for _, task := range tasks {
|
for _, task := range tasks {
|
||||||
err := m.generateNotebookWrapper(ctx, b, task)
|
err := m.generateNotebookWrapper(ctx, b, task)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -81,7 +82,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
internalDirRel, err := filepath.Rel(b.Config.Path, internalDir)
|
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,8 +57,8 @@ func TestGenerateTrampoline(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: tmpDir,
|
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "development",
|
Target: "development",
|
||||||
},
|
},
|
||||||
|
@ -80,8 +80,8 @@ func TestGenerateTrampoline(t *testing.T) {
|
||||||
|
|
||||||
funcs := functions{}
|
funcs := functions{}
|
||||||
trampoline := NewTrampoline("test_trampoline", &funcs, "Hello from {{.MyName}}")
|
trampoline := NewTrampoline("test_trampoline", &funcs, "Hello from {{.MyName}}")
|
||||||
err := bundle.Apply(ctx, b, trampoline)
|
diags := bundle.Apply(ctx, b, trampoline)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
dir, err := b.InternalDir(ctx)
|
dir, err := b.InternalDir(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/notebook"
|
"github.com/databricks/cli/libs/notebook"
|
||||||
)
|
)
|
||||||
|
@ -84,7 +85,7 @@ func (m *translatePaths) rewritePath(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remote path must be relative to the bundle root.
|
// Remote path must be relative to the bundle root.
|
||||||
localRelPath, err := filepath.Rel(b.Config.Path, localPath)
|
localRelPath, err := filepath.Rel(b.RootPath, localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -185,10 +186,10 @@ func (m *translatePaths) rewriteRelativeTo(b *bundle.Bundle, p dyn.Path, v dyn.V
|
||||||
return dyn.InvalidValue, err
|
return dyn.InvalidValue, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
m.seen = make(map[string]string)
|
m.seen = make(map[string]string)
|
||||||
|
|
||||||
return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
var err error
|
var err error
|
||||||
for _, fn := range []func(*bundle.Bundle, dyn.Value) (dyn.Value, error){
|
for _, fn := range []func(*bundle.Bundle, dyn.Value) (dyn.Value, error){
|
||||||
m.applyJobTranslations,
|
m.applyJobTranslations,
|
||||||
|
@ -202,4 +203,6 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
}
|
}
|
||||||
return v, nil
|
return v, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,8 @@ func touchEmptyFile(t *testing.T, path string) {
|
||||||
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
|
@ -78,8 +78,8 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
|
@ -106,8 +106,8 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar"))
|
touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
|
@ -201,8 +201,8 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Assert that the path in the tasks now refer to the artifact.
|
// Assert that the path in the tasks now refer to the artifact.
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
|
@ -273,8 +273,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml"))
|
touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
|
@ -332,8 +332,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
||||||
bundletest.SetLocation(b, "resources.pipelines", filepath.Join(dir, "pipeline/resource.yml"))
|
bundletest.SetLocation(b, "resources.pipelines", filepath.Join(dir, "pipeline/resource.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
|
@ -367,8 +367,8 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
|
@ -392,16 +392,16 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, "is not contained in bundle root")
|
assert.ErrorContains(t, diags.Error(), "is not contained in bundle root")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobNotebookDoesNotExistError(t *testing.T) {
|
func TestJobNotebookDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
|
@ -422,16 +422,16 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobFileDoesNotExistError(t *testing.T) {
|
func TestJobFileDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
|
@ -452,16 +452,16 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
|
@ -482,16 +482,16 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPipelineFileDoesNotExistError(t *testing.T) {
|
func TestPipelineFileDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
|
@ -512,8 +512,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
||||||
|
@ -521,8 +521,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
|
@ -546,8 +546,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`)
|
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
||||||
|
@ -555,8 +555,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
|
@ -580,8 +580,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`)
|
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
||||||
|
@ -589,8 +589,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
|
@ -614,8 +614,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`)
|
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
||||||
|
@ -623,8 +623,8 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilePath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
|
@ -648,6 +648,6 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`)
|
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,9 +2,9 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type validateGitDetails struct{}
|
type validateGitDetails struct{}
|
||||||
|
@ -17,13 +17,13 @@ func (m *validateGitDetails) Name() string {
|
||||||
return "ValidateGitDetails"
|
return "ValidateGitDetails"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *validateGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *validateGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Bundle.Git.Branch == "" || b.Config.Bundle.Git.ActualBranch == "" {
|
if b.Config.Bundle.Git.Branch == "" || b.Config.Bundle.Git.ActualBranch == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.Git.Branch != b.Config.Bundle.Git.ActualBranch && !b.Config.Bundle.Force {
|
if b.Config.Bundle.Git.Branch != b.Config.Bundle.Git.ActualBranch && !b.Config.Bundle.Force {
|
||||||
return fmt.Errorf("not on the right Git branch:\n expected according to configuration: %s\n actual: %s\nuse --force to override", b.Config.Bundle.Git.Branch, b.Config.Bundle.Git.ActualBranch)
|
return diag.Errorf("not on the right Git branch:\n expected according to configuration: %s\n actual: %s\nuse --force to override", b.Config.Bundle.Git.Branch, b.Config.Bundle.Git.ActualBranch)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,9 +22,8 @@ func TestValidateGitDetailsMatchingBranches(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := ValidateGitDetails()
|
m := ValidateGitDetails()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateGitDetailsNonMatchingBranches(t *testing.T) {
|
func TestValidateGitDetailsNonMatchingBranches(t *testing.T) {
|
||||||
|
@ -40,10 +39,10 @@ func TestValidateGitDetailsNonMatchingBranches(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := ValidateGitDetails()
|
m := ValidateGitDetails()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
|
|
||||||
expectedError := "not on the right Git branch:\n expected according to configuration: main\n actual: feature\nuse --force to override"
|
expectedError := "not on the right Git branch:\n expected according to configuration: main\n actual: feature\nuse --force to override"
|
||||||
assert.EqualError(t, err, expectedError)
|
assert.EqualError(t, diags.Error(), expectedError)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateGitDetailsNotUsingGit(t *testing.T) {
|
func TestValidateGitDetailsNotUsingGit(t *testing.T) {
|
||||||
|
@ -59,7 +58,6 @@ func TestValidateGitDetailsNotUsingGit(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := ValidateGitDetails()
|
m := ValidateGitDetails()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
@ -21,13 +20,8 @@ import (
|
||||||
|
|
||||||
type Root struct {
|
type Root struct {
|
||||||
value dyn.Value
|
value dyn.Value
|
||||||
diags diag.Diagnostics
|
|
||||||
depth int
|
depth int
|
||||||
|
|
||||||
// Path contains the directory path to the root of the bundle.
|
|
||||||
// It is set when loading `databricks.yml`.
|
|
||||||
Path string `json:"-" bundle:"readonly"`
|
|
||||||
|
|
||||||
// Contains user defined variables
|
// Contains user defined variables
|
||||||
Variables map[string]*variable.Variable `json:"variables,omitempty"`
|
Variables map[string]*variable.Variable `json:"variables,omitempty"`
|
||||||
|
|
||||||
|
@ -74,44 +68,40 @@ type Root struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load loads the bundle configuration file at the specified path.
|
// Load loads the bundle configuration file at the specified path.
|
||||||
func Load(path string) (*Root, error) {
|
func Load(path string) (*Root, diag.Diagnostics) {
|
||||||
raw, err := os.ReadFile(path)
|
raw, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
r := Root{
|
r := Root{}
|
||||||
Path: filepath.Dir(path),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load configuration tree from YAML.
|
// Load configuration tree from YAML.
|
||||||
v, err := yamlloader.LoadYAML(path, bytes.NewBuffer(raw))
|
v, err := yamlloader.LoadYAML(path, bytes.NewBuffer(raw))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to load %s: %w", path, err)
|
return nil, diag.Errorf("failed to load %s: %v", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rewrite configuration tree where necessary.
|
// Rewrite configuration tree where necessary.
|
||||||
v, err = rewriteShorthands(v)
|
v, err = rewriteShorthands(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to rewrite %s: %w", path, err)
|
return nil, diag.Errorf("failed to rewrite %s: %v", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normalize dynamic configuration tree according to configuration type.
|
// Normalize dynamic configuration tree according to configuration type.
|
||||||
v, diags := convert.Normalize(r, v)
|
v, diags := convert.Normalize(r, v)
|
||||||
|
|
||||||
// Keep track of diagnostics (warnings and errors in the schema).
|
|
||||||
// We delay acting on diagnostics until we have loaded all
|
|
||||||
// configuration files and merged them together.
|
|
||||||
r.diags = diags
|
|
||||||
|
|
||||||
// Convert normalized configuration tree to typed configuration.
|
// Convert normalized configuration tree to typed configuration.
|
||||||
err = r.updateWithDynamicValue(v)
|
err = r.updateWithDynamicValue(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to load %s: %w", path, err)
|
return nil, diag.Errorf("failed to load %s: %v", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = r.Resources.VerifyUniqueResourceIdentifiers()
|
_, err = r.Resources.VerifyUniqueResourceIdentifiers()
|
||||||
return &r, err
|
if err != nil {
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
}
|
||||||
|
return &r, diags
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Root) initializeDynamicValue() error {
|
func (r *Root) initializeDynamicValue() error {
|
||||||
|
@ -133,14 +123,10 @@ func (r *Root) initializeDynamicValue() error {
|
||||||
func (r *Root) updateWithDynamicValue(nv dyn.Value) error {
|
func (r *Root) updateWithDynamicValue(nv dyn.Value) error {
|
||||||
// Hack: restore state; it may be cleared by [ToTyped] if
|
// Hack: restore state; it may be cleared by [ToTyped] if
|
||||||
// the configuration equals nil (happens in tests).
|
// the configuration equals nil (happens in tests).
|
||||||
diags := r.diags
|
|
||||||
depth := r.depth
|
depth := r.depth
|
||||||
path := r.Path
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
r.diags = diags
|
|
||||||
r.depth = depth
|
r.depth = depth
|
||||||
r.Path = path
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Convert normalized configuration tree to typed configuration.
|
// Convert normalized configuration tree to typed configuration.
|
||||||
|
@ -245,10 +231,6 @@ func (r *Root) MarkMutatorExit(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Root) Diagnostics() diag.Diagnostics {
|
|
||||||
return r.diags
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetConfigFilePath configures the path that its configuration
|
// SetConfigFilePath configures the path that its configuration
|
||||||
// was loaded from in configuration leafs that require it.
|
// was loaded from in configuration leafs that require it.
|
||||||
func (r *Root) ConfigureConfigFilePath() {
|
func (r *Root) ConfigureConfigFilePath() {
|
||||||
|
@ -282,9 +264,6 @@ func (r *Root) InitializeVariables(vars []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Root) Merge(other *Root) error {
|
func (r *Root) Merge(other *Root) error {
|
||||||
// Merge diagnostics.
|
|
||||||
r.diags = append(r.diags, other.diags...)
|
|
||||||
|
|
||||||
// Check for safe merge, protecting against duplicate resource identifiers
|
// Check for safe merge, protecting against duplicate resource identifiers
|
||||||
err := r.Resources.VerifySafeMerge(&other.Resources)
|
err := r.Resources.VerifySafeMerge(&other.Resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -469,3 +448,14 @@ func validateVariableOverrides(root, target dyn.Value) (err error) {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Best effort to get the location of configuration value at the specified path.
|
||||||
|
// This function is useful to annotate error messages with the location, because
|
||||||
|
// we don't want to fail with a different error message if we cannot retrieve the location.
|
||||||
|
func (r *Root) GetLocation(path string) dyn.Location {
|
||||||
|
v, err := dyn.Get(r.value, path)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.Location{}
|
||||||
|
}
|
||||||
|
return v.Location()
|
||||||
|
}
|
||||||
|
|
|
@ -25,24 +25,24 @@ func TestRootMarshalUnmarshal(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRootLoad(t *testing.T) {
|
func TestRootLoad(t *testing.T) {
|
||||||
root, err := Load("../tests/basic/databricks.yml")
|
root, diags := Load("../tests/basic/databricks.yml")
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "basic", root.Bundle.Name)
|
assert.Equal(t, "basic", root.Bundle.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDuplicateIdOnLoadReturnsError(t *testing.T) {
|
func TestDuplicateIdOnLoadReturnsError(t *testing.T) {
|
||||||
_, err := Load("./testdata/duplicate_resource_names_in_root/databricks.yml")
|
_, diags := Load("./testdata/duplicate_resource_names_in_root/databricks.yml")
|
||||||
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)")
|
assert.ErrorContains(t, diags.Error(), "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDuplicateIdOnMergeReturnsError(t *testing.T) {
|
func TestDuplicateIdOnMergeReturnsError(t *testing.T) {
|
||||||
root, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml")
|
root, diags := Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml")
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
other, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml")
|
other, diags := Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml")
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
err = root.Merge(other)
|
err := root.Merge(other)
|
||||||
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml, pipeline at ./testdata/duplicate_resource_name_in_subconfiguration/resources.yml)")
|
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml, pipeline at ./testdata/duplicate_resource_name_in_subconfiguration/resources.yml)")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ package bundle
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/errs"
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DeferredMutator struct {
|
type DeferredMutator struct {
|
||||||
|
@ -22,12 +22,9 @@ func Defer(mutator Mutator, finally Mutator) Mutator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) error {
|
func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics {
|
||||||
mainErr := Apply(ctx, b, d.mutator)
|
var diags diag.Diagnostics
|
||||||
errOnFinish := Apply(ctx, b, d.finally)
|
diags = diags.Extend(Apply(ctx, b, d.mutator))
|
||||||
if mainErr != nil || errOnFinish != nil {
|
diags = diags.Extend(Apply(ctx, b, d.finally))
|
||||||
return errs.FromMany(mainErr, errOnFinish)
|
return diags
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,9 +2,9 @@ package bundle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,9 +17,9 @@ func (t *mutatorWithError) Name() string {
|
||||||
return "mutatorWithError"
|
return "mutatorWithError"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) error {
|
func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) diag.Diagnostics {
|
||||||
t.applyCalled++
|
t.applyCalled++
|
||||||
return fmt.Errorf(t.errorMsg)
|
return diag.Errorf(t.errorMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {
|
func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {
|
||||||
|
@ -30,8 +30,8 @@ func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {
|
||||||
deferredMutator := Defer(Seq(m1, m2, m3), cleanup)
|
deferredMutator := Defer(Seq(m1, m2, m3), cleanup)
|
||||||
|
|
||||||
b := &Bundle{}
|
b := &Bundle{}
|
||||||
err := Apply(context.Background(), b, deferredMutator)
|
diags := Apply(context.Background(), b, deferredMutator)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(t, 1, m1.applyCalled)
|
assert.Equal(t, 1, m1.applyCalled)
|
||||||
assert.Equal(t, 1, m2.applyCalled)
|
assert.Equal(t, 1, m2.applyCalled)
|
||||||
|
@ -47,8 +47,8 @@ func TestDeferredMutatorWhenFirstFails(t *testing.T) {
|
||||||
deferredMutator := Defer(Seq(mErr, m1, m2), cleanup)
|
deferredMutator := Defer(Seq(mErr, m1, m2), cleanup)
|
||||||
|
|
||||||
b := &Bundle{}
|
b := &Bundle{}
|
||||||
err := Apply(context.Background(), b, deferredMutator)
|
diags := Apply(context.Background(), b, deferredMutator)
|
||||||
assert.ErrorContains(t, err, "mutator error occurred")
|
assert.ErrorContains(t, diags.Error(), "mutator error occurred")
|
||||||
|
|
||||||
assert.Equal(t, 1, mErr.applyCalled)
|
assert.Equal(t, 1, mErr.applyCalled)
|
||||||
assert.Equal(t, 0, m1.applyCalled)
|
assert.Equal(t, 0, m1.applyCalled)
|
||||||
|
@ -64,8 +64,8 @@ func TestDeferredMutatorWhenMiddleOneFails(t *testing.T) {
|
||||||
deferredMutator := Defer(Seq(m1, mErr, m2), cleanup)
|
deferredMutator := Defer(Seq(m1, mErr, m2), cleanup)
|
||||||
|
|
||||||
b := &Bundle{}
|
b := &Bundle{}
|
||||||
err := Apply(context.Background(), b, deferredMutator)
|
diags := Apply(context.Background(), b, deferredMutator)
|
||||||
assert.ErrorContains(t, err, "mutator error occurred")
|
assert.ErrorContains(t, diags.Error(), "mutator error occurred")
|
||||||
|
|
||||||
assert.Equal(t, 1, m1.applyCalled)
|
assert.Equal(t, 1, m1.applyCalled)
|
||||||
assert.Equal(t, 1, mErr.applyCalled)
|
assert.Equal(t, 1, mErr.applyCalled)
|
||||||
|
@ -81,8 +81,8 @@ func TestDeferredMutatorWhenLastOneFails(t *testing.T) {
|
||||||
deferredMutator := Defer(Seq(m1, m2, mErr), cleanup)
|
deferredMutator := Defer(Seq(m1, m2, mErr), cleanup)
|
||||||
|
|
||||||
b := &Bundle{}
|
b := &Bundle{}
|
||||||
err := Apply(context.Background(), b, deferredMutator)
|
diags := Apply(context.Background(), b, deferredMutator)
|
||||||
assert.ErrorContains(t, err, "mutator error occurred")
|
assert.ErrorContains(t, diags.Error(), "mutator error occurred")
|
||||||
|
|
||||||
assert.Equal(t, 1, m1.applyCalled)
|
assert.Equal(t, 1, m1.applyCalled)
|
||||||
assert.Equal(t, 1, m2.applyCalled)
|
assert.Equal(t, 1, m2.applyCalled)
|
||||||
|
@ -98,8 +98,14 @@ func TestDeferredMutatorCombinesErrorMessages(t *testing.T) {
|
||||||
deferredMutator := Defer(Seq(m1, m2, mErr), cleanupErr)
|
deferredMutator := Defer(Seq(m1, m2, mErr), cleanupErr)
|
||||||
|
|
||||||
b := &Bundle{}
|
b := &Bundle{}
|
||||||
err := Apply(context.Background(), b, deferredMutator)
|
diags := Apply(context.Background(), b, deferredMutator)
|
||||||
assert.ErrorContains(t, err, "mutator error occurred\ncleanup error occurred")
|
|
||||||
|
var errs []string
|
||||||
|
for _, d := range diags {
|
||||||
|
errs = append(errs, d.Summary)
|
||||||
|
}
|
||||||
|
assert.Contains(t, errs, "mutator error occurred")
|
||||||
|
assert.Contains(t, errs, "cleanup error occurred")
|
||||||
|
|
||||||
assert.Equal(t, 1, m1.applyCalled)
|
assert.Equal(t, 1, m1.applyCalled)
|
||||||
assert.Equal(t, 1, m2.applyCalled)
|
assert.Equal(t, 1, m2.applyCalled)
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
@ -30,29 +31,29 @@ func (l *checkRunningResources) Name() string {
|
||||||
return "check-running-resources"
|
return "check-running-resources"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if !b.Config.Bundle.Deployment.FailOnActiveRuns {
|
if !b.Config.Bundle.Deployment.FailOnActiveRuns {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return fmt.Errorf("terraform not initialized")
|
return diag.Errorf("terraform not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform init: %w", err)
|
return diag.Errorf("terraform init: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
state, err := b.Terraform.Show(ctx)
|
state, err := b.Terraform.Show(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = checkAnyResourceRunning(ctx, b.WorkspaceClient(), state)
|
err = checkAnyResourceRunning(ctx, b.WorkspaceClient(), state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("deployment aborted, err: %w", err)
|
return diag.Errorf("deployment aborted, err: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
)
|
)
|
||||||
|
@ -16,7 +17,7 @@ func (m *delete) Name() string {
|
||||||
return "files.Delete"
|
return "files.Delete"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Do not delete files if terraform destroy was not consented
|
// Do not delete files if terraform destroy was not consented
|
||||||
if !b.Plan.IsEmpty && !b.Plan.ConfirmApply {
|
if !b.Plan.IsEmpty && !b.Plan.ConfirmApply {
|
||||||
return nil
|
return nil
|
||||||
|
@ -29,7 +30,7 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
if !b.AutoApprove {
|
if !b.AutoApprove {
|
||||||
proceed, err := cmdio.AskYesOrNo(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?", b.Config.Workspace.RootPath, red("deleted permanently!")))
|
proceed, err := cmdio.AskYesOrNo(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?", b.Config.Workspace.RootPath, red("deleted permanently!")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
if !proceed {
|
if !proceed {
|
||||||
return nil
|
return nil
|
||||||
|
@ -41,17 +42,17 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
Recursive: true,
|
Recursive: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up sync snapshot file
|
// Clean up sync snapshot file
|
||||||
sync, err := GetSync(ctx, b)
|
sync, err := GetSync(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
err = sync.DestroySnapshot(ctx)
|
err = sync.DestroySnapshot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Deleted snapshot file at %s", sync.SnapshotPath()))
|
cmdio.LogString(ctx, fmt.Sprintf("Deleted snapshot file at %s", sync.SnapshotPath()))
|
||||||
|
|
|
@ -28,7 +28,7 @@ func GetSyncOptions(ctx context.Context, b *bundle.Bundle) (*sync.SyncOptions, e
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := &sync.SyncOptions{
|
opts := &sync.SyncOptions{
|
||||||
LocalPath: b.Config.Path,
|
LocalPath: b.RootPath,
|
||||||
RemotePath: b.Config.Workspace.FilePath,
|
RemotePath: b.Config.Workspace.FilePath,
|
||||||
Include: includes,
|
Include: includes,
|
||||||
Exclude: b.Config.Sync.Exclude,
|
Exclude: b.Config.Sync.Exclude,
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -15,16 +16,16 @@ func (m *upload) Name() string {
|
||||||
return "files.Upload"
|
return "files.Upload"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath))
|
cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath))
|
||||||
sync, err := GetSync(ctx, b)
|
sync, err := GetSync(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = sync.RunOnce(ctx)
|
err = sync.RunOnce(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof(ctx, "Uploaded bundle files")
|
log.Infof(ctx, "Uploaded bundle files")
|
||||||
|
|
|
@ -3,9 +3,9 @@ package lock
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
"github.com/databricks/cli/libs/locker"
|
"github.com/databricks/cli/libs/locker"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
|
@ -33,7 +33,7 @@ func (m *acquire) init(b *bundle.Bundle) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Return early if locking is disabled.
|
// Return early if locking is disabled.
|
||||||
if !b.Config.Bundle.Deployment.Lock.IsEnabled() {
|
if !b.Config.Bundle.Deployment.Lock.IsEnabled() {
|
||||||
log.Infof(ctx, "Skipping; locking is disabled")
|
log.Infof(ctx, "Skipping; locking is disabled")
|
||||||
|
@ -42,7 +42,7 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
|
||||||
err := m.init(b)
|
err := m.init(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
force := b.Config.Bundle.Deployment.Lock.Force
|
force := b.Config.Bundle.Deployment.Lock.Force
|
||||||
|
@ -55,9 +55,9 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
if errors.As(err, ¬ExistsError) {
|
if errors.As(err, ¬ExistsError) {
|
||||||
// If we get a "doesn't exist" error from the API this indicates
|
// If we get a "doesn't exist" error from the API this indicates
|
||||||
// we either don't have permissions or the path is invalid.
|
// we either don't have permissions or the path is invalid.
|
||||||
return fmt.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath)
|
return diag.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -2,9 +2,9 @@ package lock
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/locker"
|
"github.com/databricks/cli/libs/locker"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
@ -30,7 +30,7 @@ func (m *release) Name() string {
|
||||||
return "lock:release"
|
return "lock:release"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *release) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Return early if locking is disabled.
|
// Return early if locking is disabled.
|
||||||
if !b.Config.Bundle.Deployment.Lock.IsEnabled() {
|
if !b.Config.Bundle.Deployment.Lock.IsEnabled() {
|
||||||
log.Infof(ctx, "Skipping; locking is disabled")
|
log.Infof(ctx, "Skipping; locking is disabled")
|
||||||
|
@ -47,12 +47,12 @@ func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
log.Infof(ctx, "Releasing deployment lock")
|
log.Infof(ctx, "Releasing deployment lock")
|
||||||
switch m.goal {
|
switch m.goal {
|
||||||
case GoalDeploy:
|
case GoalDeploy:
|
||||||
return b.Locker.Unlock(ctx)
|
return diag.FromErr(b.Locker.Unlock(ctx))
|
||||||
case GoalBind, GoalUnbind:
|
case GoalBind, GoalUnbind:
|
||||||
return b.Locker.Unlock(ctx)
|
return diag.FromErr(b.Locker.Unlock(ctx))
|
||||||
case GoalDestroy:
|
case GoalDestroy:
|
||||||
return b.Locker.Unlock(ctx, locker.AllowLockFileNotExist)
|
return diag.FromErr(b.Locker.Unlock(ctx, locker.AllowLockFileNotExist))
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown goal for lock release: %s", m.goal)
|
return diag.Errorf("unknown goal for lock release: %s", m.goal)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -18,7 +19,7 @@ func (m *annotateJobs) Name() string {
|
||||||
return "metadata.AnnotateJobs"
|
return "metadata.AnnotateJobs"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
for _, job := range b.Config.Resources.Jobs {
|
for _, job := range b.Config.Resources.Jobs {
|
||||||
if job.JobSettings == nil {
|
if job.JobSettings == nil {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAnnotateJobsMutator(t *testing.T) {
|
func TestAnnotateJobsMutator(t *testing.T) {
|
||||||
|
@ -34,8 +35,8 @@ func TestAnnotateJobsMutator(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := AnnotateJobs().Apply(context.Background(), b)
|
diags := AnnotateJobs().Apply(context.Background(), b)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
&jobs.JobDeployment{
|
&jobs.JobDeployment{
|
||||||
|
@ -67,6 +68,6 @@ func TestAnnotateJobsMutatorJobWithoutSettings(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := AnnotateJobs().Apply(context.Background(), b)
|
diags := AnnotateJobs().Apply(context.Background(), b)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,12 +2,12 @@ package metadata
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/metadata"
|
"github.com/databricks/cli/bundle/metadata"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type compute struct{}
|
type compute struct{}
|
||||||
|
@ -20,7 +20,7 @@ func (m *compute) Name() string {
|
||||||
return "metadata.Compute"
|
return "metadata.Compute"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *compute) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
b.Metadata = metadata.Metadata{
|
b.Metadata = metadata.Metadata{
|
||||||
Version: metadata.Version,
|
Version: metadata.Version,
|
||||||
Config: metadata.Config{},
|
Config: metadata.Config{},
|
||||||
|
@ -39,9 +39,9 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
for name, job := range b.Config.Resources.Jobs {
|
for name, job := range b.Config.Resources.Jobs {
|
||||||
// Compute config file path the job is defined in, relative to the bundle
|
// Compute config file path the job is defined in, relative to the bundle
|
||||||
// root
|
// root
|
||||||
relativePath, err := filepath.Rel(b.Config.Path, job.ConfigFilePath)
|
relativePath, err := filepath.Rel(b.RootPath, job.ConfigFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to compute relative path for job %s: %w", name, err)
|
return diag.Errorf("failed to compute relative path for job %s: %v", name, err)
|
||||||
}
|
}
|
||||||
// Metadata for the job
|
// Metadata for the job
|
||||||
jobsMetadata[name] = &metadata.Job{
|
jobsMetadata[name] = &metadata.Job{
|
||||||
|
|
|
@ -91,8 +91,8 @@ func TestComputeMetadataMutator(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, Compute())
|
diags := bundle.Apply(context.Background(), b, Compute())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(t, expectedMetadata, b.Metadata)
|
assert.Equal(t, expectedMetadata, b.Metadata)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,16 +22,16 @@ func (m *upload) Name() string {
|
||||||
return "metadata.Upload"
|
return "metadata.Upload"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata, err := json.MarshalIndent(b.Metadata, "", " ")
|
metadata, err := json.MarshalIndent(b.Metadata, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.Write(ctx, MetadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists)
|
return diag.FromErr(f.Write(ctx, MetadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists))
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/deploy/files"
|
"github.com/databricks/cli/bundle/deploy/files"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/cli/libs/sync"
|
"github.com/databricks/cli/libs/sync"
|
||||||
|
@ -20,10 +21,10 @@ type statePull struct {
|
||||||
filerFactory FilerFactory
|
filerFactory FilerFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
f, err := s.filerFactory(b)
|
f, err := s.filerFactory(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download deployment state file from filer to local cache directory.
|
// Download deployment state file from filer to local cache directory.
|
||||||
|
@ -31,7 +32,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
remote, err := s.remoteState(ctx, f)
|
remote, err := s.remoteState(ctx, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof(ctx, "Unable to open remote deployment state file: %s", err)
|
log.Infof(ctx, "Unable to open remote deployment state file: %s", err)
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
if remote == nil {
|
if remote == nil {
|
||||||
log.Infof(ctx, "Remote deployment state file does not exist")
|
log.Infof(ctx, "Remote deployment state file does not exist")
|
||||||
|
@ -40,19 +41,19 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
|
||||||
statePath, err := getPathToStateFile(ctx, b)
|
statePath, err := getPathToStateFile(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0600)
|
local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
defer local.Close()
|
defer local.Close()
|
||||||
|
|
||||||
data := remote.Bytes()
|
data := remote.Bytes()
|
||||||
err = validateRemoteStateCompatibility(bytes.NewReader(data))
|
err = validateRemoteStateCompatibility(bytes.NewReader(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isLocalStateStale(local, bytes.NewReader(data)) {
|
if !isLocalStateStale(local, bytes.NewReader(data)) {
|
||||||
|
@ -68,30 +69,30 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
log.Infof(ctx, "Writing remote deployment state file to local cache directory")
|
log.Infof(ctx, "Writing remote deployment state file to local cache directory")
|
||||||
_, err = io.Copy(local, bytes.NewReader(data))
|
_, err = io.Copy(local, bytes.NewReader(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var state DeploymentState
|
var state DeploymentState
|
||||||
err = json.Unmarshal(data, &state)
|
err = json.Unmarshal(data, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new snapshot based on the deployment state file.
|
// Create a new snapshot based on the deployment state file.
|
||||||
opts, err := files.GetSyncOptions(ctx, b)
|
opts, err := files.GetSyncOptions(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof(ctx, "Creating new snapshot")
|
log.Infof(ctx, "Creating new snapshot")
|
||||||
snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.Config.Path), opts)
|
snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.RootPath), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Persist the snapshot to disk.
|
// Persist the snapshot to disk.
|
||||||
log.Infof(ctx, "Persisting snapshot to disk")
|
log.Infof(ctx, "Persisting snapshot to disk")
|
||||||
return snapshot.Save(ctx)
|
return diag.FromErr(snapshot.Save(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buffer, error) {
|
func (s *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buffer, error) {
|
||||||
|
|
|
@ -59,8 +59,8 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
||||||
}}
|
}}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
},
|
},
|
||||||
|
@ -77,11 +77,11 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
for _, file := range opts.localFiles {
|
for _, file := range opts.localFiles {
|
||||||
testutil.Touch(t, filepath.Join(b.Config.Path, "bar"), file)
|
testutil.Touch(t, filepath.Join(b.RootPath, "bar"), file)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range opts.localNotebooks {
|
for _, file := range opts.localNotebooks {
|
||||||
testutil.TouchNotebook(t, filepath.Join(b.Config.Path, "bar"), file)
|
testutil.TouchNotebook(t, filepath.Join(b.RootPath, "bar"), file)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.withExistingSnapshot {
|
if opts.withExistingSnapshot {
|
||||||
|
@ -106,8 +106,8 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := bundle.Apply(ctx, b, s)
|
diags := bundle.Apply(ctx, b, s)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Check that deployment state was written
|
// Check that deployment state was written
|
||||||
statePath, err := getPathToStateFile(ctx, b)
|
statePath, err := getPathToStateFile(ctx, b)
|
||||||
|
@ -251,8 +251,8 @@ func TestStatePullNoState(t *testing.T) {
|
||||||
}}
|
}}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
},
|
},
|
||||||
|
@ -263,8 +263,8 @@ func TestStatePullNoState(t *testing.T) {
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
err := bundle.Apply(ctx, b, s)
|
diags := bundle.Apply(ctx, b, s)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Check that deployment state was not written
|
// Check that deployment state was not written
|
||||||
statePath, err := getPathToStateFile(ctx, b)
|
statePath, err := getPathToStateFile(ctx, b)
|
||||||
|
@ -439,8 +439,8 @@ func TestStatePullNewerDeploymentStateVersion(t *testing.T) {
|
||||||
}}
|
}}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
},
|
},
|
||||||
|
@ -451,7 +451,7 @@ func TestStatePullNewerDeploymentStateVersion(t *testing.T) {
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
err := bundle.Apply(ctx, b, s)
|
diags := bundle.Apply(ctx, b, s)
|
||||||
require.Error(t, err)
|
require.True(t, diags.HasError())
|
||||||
require.Contains(t, err.Error(), "remote deployment state is incompatible with the current version of the CLI, please upgrade to at least 1.2.3")
|
require.ErrorContains(t, diags.Error(), "remote deployment state is incompatible with the current version of the CLI, please upgrade to at least 1.2.3")
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
@ -17,27 +18,27 @@ func (s *statePush) Name() string {
|
||||||
return "deploy:state-push"
|
return "deploy:state-push"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
f, err := s.filerFactory(b)
|
f, err := s.filerFactory(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
statePath, err := getPathToStateFile(ctx, b)
|
statePath, err := getPathToStateFile(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
local, err := os.Open(statePath)
|
local, err := os.Open(statePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
defer local.Close()
|
defer local.Close()
|
||||||
|
|
||||||
log.Infof(ctx, "Writing local deployment state file to remote state directory")
|
log.Infof(ctx, "Writing local deployment state file to remote state directory")
|
||||||
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -45,8 +45,8 @@ func TestStatePush(t *testing.T) {
|
||||||
}}
|
}}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
},
|
},
|
||||||
|
@ -77,6 +77,6 @@ func TestStatePush(t *testing.T) {
|
||||||
err = os.WriteFile(statePath, data, 0644)
|
err = os.WriteFile(statePath, data, 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = bundle.Apply(ctx, b, s)
|
diags := bundle.Apply(ctx, b, s)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/deploy/files"
|
"github.com/databricks/cli/bundle/deploy/files"
|
||||||
"github.com/databricks/cli/internal/build"
|
"github.com/databricks/cli/internal/build"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,10 +22,10 @@ func (s *stateUpdate) Name() string {
|
||||||
return "deploy:state-update"
|
return "deploy:state-update"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
state, err := load(ctx, b)
|
state, err := load(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increment the state sequence.
|
// Increment the state sequence.
|
||||||
|
@ -40,41 +41,41 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
// Get the current file list.
|
// Get the current file list.
|
||||||
sync, err := files.GetSync(ctx, b)
|
sync, err := files.GetSync(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
files, err := sync.GetFileList(ctx)
|
files, err := sync.GetFileList(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the state with the current file list.
|
// Update the state with the current file list.
|
||||||
fl, err := FromSlice(files)
|
fl, err := FromSlice(files)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
state.Files = fl
|
state.Files = fl
|
||||||
|
|
||||||
statePath, err := getPathToStateFile(ctx, b)
|
statePath, err := getPathToStateFile(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
// Write the state back to the file.
|
// Write the state back to the file.
|
||||||
f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600)
|
f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof(ctx, "Unable to open deployment state file: %s", err)
|
log.Infof(ctx, "Unable to open deployment state file: %s", err)
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
data, err := json.Marshal(state)
|
data, err := json.Marshal(state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = io.Copy(f, bytes.NewReader(data))
|
_, err = io.Copy(f, bytes.NewReader(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -22,8 +22,8 @@ func TestStateUpdate(t *testing.T) {
|
||||||
s := &stateUpdate{}
|
s := &stateUpdate{}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
},
|
},
|
||||||
|
@ -39,8 +39,8 @@ func TestStateUpdate(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testutil.Touch(t, b.Config.Path, "test1.py")
|
testutil.Touch(t, b.RootPath, "test1.py")
|
||||||
testutil.Touch(t, b.Config.Path, "test2.py")
|
testutil.Touch(t, b.RootPath, "test2.py")
|
||||||
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
m.WorkspaceClient.Config = &databrickscfg.Config{
|
m.WorkspaceClient.Config = &databrickscfg.Config{
|
||||||
|
@ -55,8 +55,8 @@ func TestStateUpdate(t *testing.T) {
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
err := bundle.Apply(ctx, b, s)
|
diags := bundle.Apply(ctx, b, s)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Check that the state file was updated.
|
// Check that the state file was updated.
|
||||||
state, err := load(ctx, b)
|
state, err := load(ctx, b)
|
||||||
|
@ -66,8 +66,8 @@ func TestStateUpdate(t *testing.T) {
|
||||||
require.Len(t, state.Files, 3)
|
require.Len(t, state.Files, 3)
|
||||||
require.Equal(t, build.GetInfo().Version, state.CliVersion)
|
require.Equal(t, build.GetInfo().Version, state.CliVersion)
|
||||||
|
|
||||||
err = bundle.Apply(ctx, b, s)
|
diags = bundle.Apply(ctx, b, s)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Check that the state file was updated again.
|
// Check that the state file was updated again.
|
||||||
state, err = load(ctx, b)
|
state, err = load(ctx, b)
|
||||||
|
@ -82,8 +82,8 @@ func TestStateUpdateWithExistingState(t *testing.T) {
|
||||||
s := &stateUpdate{}
|
s := &stateUpdate{}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
},
|
},
|
||||||
|
@ -99,8 +99,8 @@ func TestStateUpdateWithExistingState(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testutil.Touch(t, b.Config.Path, "test1.py")
|
testutil.Touch(t, b.RootPath, "test1.py")
|
||||||
testutil.Touch(t, b.Config.Path, "test2.py")
|
testutil.Touch(t, b.RootPath, "test2.py")
|
||||||
|
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
m.WorkspaceClient.Config = &databrickscfg.Config{
|
m.WorkspaceClient.Config = &databrickscfg.Config{
|
||||||
|
@ -136,8 +136,8 @@ func TestStateUpdateWithExistingState(t *testing.T) {
|
||||||
err = os.WriteFile(statePath, data, 0644)
|
err = os.WriteFile(statePath, data, 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = bundle.Apply(ctx, b, s)
|
diags := bundle.Apply(ctx, b, s)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Check that the state file was updated.
|
// Check that the state file was updated.
|
||||||
state, err = load(ctx, b)
|
state, err = load(ctx, b)
|
||||||
|
|
|
@ -2,10 +2,10 @@ package terraform
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/hashicorp/terraform-exec/tfexec"
|
"github.com/hashicorp/terraform-exec/tfexec"
|
||||||
)
|
)
|
||||||
|
@ -16,22 +16,22 @@ func (w *apply) Name() string {
|
||||||
return "terraform.Apply"
|
return "terraform.Apply"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return fmt.Errorf("terraform not initialized")
|
return diag.Errorf("terraform not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "Deploying resources...")
|
cmdio.LogString(ctx, "Deploying resources...")
|
||||||
|
|
||||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform init: %w", err)
|
return diag.Errorf("terraform init: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tf.Apply(ctx)
|
err = tf.Apply(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform apply: %w", err)
|
return diag.Errorf("terraform apply: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof(ctx, "Resource deployment completed")
|
log.Infof(ctx, "Resource deployment completed")
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
"github.com/hashicorp/terraform-exec/tfexec"
|
"github.com/hashicorp/terraform-exec/tfexec"
|
||||||
tfjson "github.com/hashicorp/terraform-json"
|
tfjson "github.com/hashicorp/terraform-json"
|
||||||
|
@ -62,7 +63,7 @@ func (w *destroy) Name() string {
|
||||||
return "terraform.Destroy"
|
return "terraform.Destroy"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// return early if plan is empty
|
// return early if plan is empty
|
||||||
if b.Plan.IsEmpty {
|
if b.Plan.IsEmpty {
|
||||||
cmdio.LogString(ctx, "No resources to destroy in plan. Skipping destroy!")
|
cmdio.LogString(ctx, "No resources to destroy in plan. Skipping destroy!")
|
||||||
|
@ -71,19 +72,19 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return fmt.Errorf("terraform not initialized")
|
return diag.Errorf("terraform not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
// read plan file
|
// read plan file
|
||||||
plan, err := tf.ShowPlanFile(ctx, b.Plan.Path)
|
plan, err := tf.ShowPlanFile(ctx, b.Plan.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// print the resources that will be destroyed
|
// print the resources that will be destroyed
|
||||||
err = logDestroyPlan(ctx, plan.ResourceChanges)
|
err = logDestroyPlan(ctx, plan.ResourceChanges)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ask for confirmation, if needed
|
// Ask for confirmation, if needed
|
||||||
|
@ -91,7 +92,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
red := color.New(color.FgRed).SprintFunc()
|
red := color.New(color.FgRed).SprintFunc()
|
||||||
b.Plan.ConfirmApply, err = cmdio.AskYesOrNo(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed?", red("destroy")))
|
b.Plan.ConfirmApply, err = cmdio.AskYesOrNo(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed?", red("destroy")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,7 +102,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Plan.Path == "" {
|
if b.Plan.Path == "" {
|
||||||
return fmt.Errorf("no plan found")
|
return diag.Errorf("no plan found")
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "Starting to destroy resources")
|
cmdio.LogString(ctx, "Starting to destroy resources")
|
||||||
|
@ -109,7 +110,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
// Apply terraform according to the computed destroy plan
|
// Apply terraform according to the computed destroy plan
|
||||||
err = tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
err = tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform destroy: %w", err)
|
return diag.Errorf("terraform destroy: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "Successfully destroyed resources!")
|
cmdio.LogString(ctx, "Successfully destroyed resources!")
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/hashicorp/terraform-exec/tfexec"
|
"github.com/hashicorp/terraform-exec/tfexec"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,31 +26,31 @@ type importResource struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply implements bundle.Mutator.
|
// Apply implements bundle.Mutator.
|
||||||
func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
dir, err := Dir(ctx, b)
|
dir, err := Dir(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return fmt.Errorf("terraform not initialized")
|
return diag.Errorf("terraform not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tf.Init(ctx, tfexec.Upgrade(true))
|
err = tf.Init(ctx, tfexec.Upgrade(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform init: %w", err)
|
return diag.Errorf("terraform init: %v", err)
|
||||||
}
|
}
|
||||||
tmpDir, err := os.MkdirTemp("", "state-*")
|
tmpDir, err := os.MkdirTemp("", "state-*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform init: %w", err)
|
return diag.Errorf("terraform init: %v", err)
|
||||||
}
|
}
|
||||||
tmpState := filepath.Join(tmpDir, TerraformStateFileName)
|
tmpState := filepath.Join(tmpDir, TerraformStateFileName)
|
||||||
|
|
||||||
importAddress := fmt.Sprintf("%s.%s", m.opts.ResourceType, m.opts.ResourceKey)
|
importAddress := fmt.Sprintf("%s.%s", m.opts.ResourceType, m.opts.ResourceKey)
|
||||||
err = tf.Import(ctx, importAddress, m.opts.ResourceId, tfexec.StateOut(tmpState))
|
err = tf.Import(ctx, importAddress, m.opts.ResourceId, tfexec.StateOut(tmpState))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform import: %w", err)
|
return diag.Errorf("terraform import: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := bytes.NewBuffer(nil)
|
buf := bytes.NewBuffer(nil)
|
||||||
|
@ -58,7 +59,7 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
//lint:ignore SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file
|
//lint:ignore SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file
|
||||||
changed, err := tf.Plan(ctx, tfexec.State(tmpState), tfexec.Target(importAddress))
|
changed, err := tf.Plan(ctx, tfexec.State(tmpState), tfexec.Target(importAddress))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform plan: %w", err)
|
return diag.Errorf("terraform plan: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
@ -70,29 +71,29 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
cmdio.LogString(ctx, output)
|
cmdio.LogString(ctx, output)
|
||||||
ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.")
|
ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
if !ans {
|
if !ans {
|
||||||
return fmt.Errorf("import aborted")
|
return diag.Errorf("import aborted")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If user confirmed changes, move the state file from temp dir to state location
|
// If user confirmed changes, move the state file from temp dir to state location
|
||||||
f, err := os.Create(filepath.Join(dir, TerraformStateFileName))
|
f, err := os.Create(filepath.Join(dir, TerraformStateFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
tmpF, err := os.Open(tmpState)
|
tmpF, err := os.Open(tmpState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
defer tmpF.Close()
|
defer tmpF.Close()
|
||||||
|
|
||||||
_, err = io.Copy(f, tmpF)
|
_, err = io.Copy(f, tmpF)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue