Compare commits

...

8 Commits

Author SHA1 Message Date
shreyas-goenka 3b4f96313c
Merge 4233a7c292 into abfd1713e0 2024-11-21 21:02:58 +05:30
Pieter Noordhuis abfd1713e0
Skip sync warning if no sync paths are defined (#1926)
## Changes

Users can configure the bundle to not synchronize any files with:
```yaml
sync:
  paths: []
```

If it is explicitly configured as an empty list, the validate command
must not warn about not having any files to synchronize. The warning
exists to alert users who are unintentionally not synchronizing any
files (they might have a `.gitignore` pattern that matches everything).

Closes #1663.

## Tests

* New unit test.
2024-11-21 15:03:13 +00:00
Pieter Noordhuis a3cea07c9e
Support lookup by name of notification destinations (#1922)
## Changes

Add support for notification destinations in variable lookups.

More information:
https://docs.databricks.com/en/admin/workspace-settings/notification-destinations.html

Depends on #1921.

## Tests

* New unit test
* Manually confirmed that the lookup works
2024-11-21 15:52:14 +01:00
shreyas-goenka abc2f3c825
Fix `TestAccBundleInitOnMlopsStacks` (#1924)
## Changes
The ML production team modified mlops-stack to use `mode: development`
for their development target here:
https://github.com/databricks/mlops-stacks/pull/174

This PR makes the integration test assertion agnostic of the prefix to
make it pass again.

## Tests
The test passes now
2024-11-21 10:46:24 +00:00
shreyas-goenka c2e2abcc35
Extend "notebook not found" error to warn about missing extension (#1920)
## Changes
The full workspace path for a notebook does not contain the notebook's
extension. If a user converts that file path to a relative path (like
`/Workspace/bundle_root/bar/nb` -> `./bar/nb`), they can be confused as
to why the new file path does not work.

The changes in this PR nudge them to add the appropriate file extension
(e.g., `./bar/nb.py` or `./bar/nb.ipynb`).

One common way users can end up in this scenario is by using the view
job as YAML functionality in the Databricks UI.

## Tests
Unit test and manually.

```
(.venv) ➜  bundle-playground git:(master) ✗ cli bundle validate 
Error: notebook ./foo not found. Local notebook references are expected
to contain one of the following file extensions: [.py, .r, .scala, .sql, .ipynb]
```
2024-11-21 16:21:21 +05:30
Shreyas Goenka 4233a7c292
better warn 2024-11-19 23:11:03 +01:00
Shreyas Goenka 96a0a3ec27
address comments 2024-11-19 23:06:43 +01:00
Shreyas Goenka df0a98066a
Add validation for single node clusters 2024-11-18 15:51:58 +01:00
11 changed files with 903 additions and 2 deletions

View File

@ -126,8 +126,34 @@ func (t *translateContext) rewritePath(
func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath)) nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
if filepath.Ext(localFullPath) != notebook.ExtensionNone {
return "", fmt.Errorf("notebook %s not found", literal) return "", fmt.Errorf("notebook %s not found", literal)
} }
extensions := []string{
notebook.ExtensionPython,
notebook.ExtensionR,
notebook.ExtensionScala,
notebook.ExtensionSql,
notebook.ExtensionJupyter,
}
// Check whether a file with a notebook extension already exists. This
// way we can provide a more targeted error message.
for _, ext := range extensions {
literalWithExt := literal + ext
localRelPathWithExt := filepath.ToSlash(localRelPath + ext)
if _, err := fs.Stat(t.b.SyncRoot, localRelPathWithExt); err == nil {
return "", fmt.Errorf(`notebook %s not found. Did you mean %s?
Local notebook references are expected to contain one of the following
file extensions: [%s]`, literal, literalWithExt, strings.Join(extensions, ", "))
}
}
// Return a generic error message if no matching possible file is found.
return "", fmt.Errorf(`notebook %s not found. Local notebook references are expected
to contain one of the following file extensions: [%s]`, literal, strings.Join(extensions, ", "))
}
if err != nil { if err != nil {
return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localFullPath, err) return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localFullPath, err)
} }

View File

@ -2,6 +2,7 @@ package mutator_test
import ( import (
"context" "context"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
@ -508,6 +509,59 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found") assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
} }
func TestPipelineNotebookDoesNotExistErrorWithoutExtension(t *testing.T) {
for _, ext := range []string{
".py",
".r",
".scala",
".sql",
".ipynb",
"",
} {
t.Run("case_"+ext, func(t *testing.T) {
dir := t.TempDir()
if ext != "" {
touchEmptyFile(t, filepath.Join(dir, "foo"+ext))
}
b := &bundle.Bundle{
SyncRootPath: dir,
SyncRoot: vfs.MustNew(dir),
Config: config.Root{
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"pipeline": {
PipelineSpec: &pipelines.PipelineSpec{
Libraries: []pipelines.PipelineLibrary{
{
Notebook: &pipelines.NotebookLibrary{
Path: "./foo",
},
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
if ext == "" {
assert.EqualError(t, diags.Error(), `notebook ./foo not found. Local notebook references are expected
to contain one of the following file extensions: [.py, .r, .scala, .sql, .ipynb]`)
} else {
assert.EqualError(t, diags.Error(), fmt.Sprintf(`notebook ./foo not found. Did you mean ./foo%s?
Local notebook references are expected to contain one of the following
file extensions: [.py, .r, .scala, .sql, .ipynb]`, ext))
}
})
}
}
func TestPipelineFileDoesNotExistError(t *testing.T) { func TestPipelineFileDoesNotExistError(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()

View File

@ -21,6 +21,12 @@ func (v *filesToSync) Name() string {
} }
func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
// The user may be intentional about not synchronizing any files.
// In this case, we should not show any warnings.
if len(rb.Config().Sync.Paths) == 0 {
return nil
}
sync, err := files.GetSync(ctx, rb) sync, err := files.GetSync(ctx, rb)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
@ -31,6 +37,7 @@ func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.
return diag.FromErr(err) return diag.FromErr(err)
} }
// If there are files to sync, we don't need to show any warnings.
if len(fl) != 0 { if len(fl) != 0 {
return nil return nil
} }

View File

@ -0,0 +1,105 @@
package validate
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/internal/testutil"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/vfs"
sdkconfig "github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/databricks/databricks-sdk-go/service/workspace"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestFilesToSync_NoPaths(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Sync: config.Sync{
Paths: []string{},
},
},
}
ctx := context.Background()
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
assert.Empty(t, diags)
}
func setupBundleForFilesToSyncTest(t *testing.T) *bundle.Bundle {
dir := t.TempDir()
testutil.Touch(t, dir, "file1")
testutil.Touch(t, dir, "file2")
b := &bundle.Bundle{
BundleRootPath: dir,
BundleRoot: vfs.MustNew(dir),
SyncRootPath: dir,
SyncRoot: vfs.MustNew(dir),
Config: config.Root{
Bundle: config.Bundle{
Target: "default",
},
Workspace: config.Workspace{
FilePath: "/this/doesnt/matter",
CurrentUser: &config.User{
User: &iam.User{},
},
},
Sync: config.Sync{
// Paths are relative to [SyncRootPath].
Paths: []string{"."},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
m.WorkspaceClient.Config = &sdkconfig.Config{
Host: "https://foo.com",
}
// The initialization logic in [sync.New] performs a check on the destination path.
// Removing this check at initialization time is tbd...
m.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/this/doesnt/matter").Return(&workspace.ObjectInfo{
ObjectType: workspace.ObjectTypeDirectory,
}, nil)
b.SetWorkpaceClient(m.WorkspaceClient)
return b
}
func TestFilesToSync_EverythingIgnored(t *testing.T) {
b := setupBundleForFilesToSyncTest(t)
// Ignore all files.
testutil.WriteFile(t, "*\n.*\n", b.BundleRootPath, ".gitignore")
ctx := context.Background()
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
require.Equal(t, 1, len(diags))
assert.Equal(t, diag.Warning, diags[0].Severity)
assert.Equal(t, "There are no files to sync, please check your .gitignore", diags[0].Summary)
}
func TestFilesToSync_EverythingExcluded(t *testing.T) {
b := setupBundleForFilesToSyncTest(t)
// Exclude all files.
b.Config.Sync.Exclude = []string{"*"}
ctx := context.Background()
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
require.Equal(t, 1, len(diags))
assert.Equal(t, diag.Warning, diags[0].Severity)
assert.Equal(t, "There are no files to sync, please check your .gitignore and sync.exclude configuration", diags[0].Summary)
}

View File

@ -0,0 +1,135 @@
package validate
import (
"context"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/cli/libs/log"
)
// Validates that any single node clusters defined in the bundle are correctly configured.
func SingleNodeCluster() bundle.ReadOnlyMutator {
return &singleNodeCluster{}
}
type singleNodeCluster struct{}
func (m *singleNodeCluster) Name() string {
return "validate:SingleNodeCluster"
}
const singleNodeWarningDetail = `num_workers should be 0 only for single-node clusters. To create a
valid single node cluster please ensure that the following properties
are correctly set in the cluster specification:
spark_conf:
spark.databricks.cluster.profile: singleNode
spark.master: local[*]
custom_tags:
ResourceClass: SingleNode
`
const singleNodeWarningSummary = `Single node cluster is not correctly configured`
func showSingleNodeClusterWarning(ctx context.Context, v dyn.Value) bool {
// Check if the user has explicitly set the num_workers to 0. Skip the warning
// if that's not the case.
numWorkers, ok := v.Get("num_workers").AsInt()
if !ok || numWorkers > 0 {
return false
}
// Convenient type that contains the common fields from compute.ClusterSpec and
// pipelines.PipelineCluster that we are interested in.
type ClusterConf struct {
SparkConf map[string]string `json:"spark_conf"`
CustomTags map[string]string `json:"custom_tags"`
PolicyId string `json:"policy_id"`
}
conf := &ClusterConf{}
err := convert.ToTyped(conf, v)
if err != nil {
return false
}
// If the policy id is set, we don't want to show the warning. This is because
// the user might have configured `spark_conf` and `custom_tags` correctly
// in their cluster policy.
if conf.PolicyId != "" {
return false
}
profile, ok := conf.SparkConf["spark.databricks.cluster.profile"]
if !ok {
log.Warnf(ctx, "spark_conf spark.databricks.cluster.profile not found in single-node cluster spec")
return true
}
if profile != "singleNode" {
log.Warnf(ctx, "spark_conf spark.databricks.cluster.profile is not singleNode in single-node cluster spec: %s", profile)
return true
}
master, ok := conf.SparkConf["spark.master"]
if !ok {
log.Warnf(ctx, "spark_conf spark.master not found in single-node cluster spec")
return true
}
if !strings.HasPrefix(master, "local") {
log.Warnf(ctx, "spark_conf spark.master does not start with local in single-node cluster spec: %s", master)
return true
}
resourceClass, ok := conf.CustomTags["ResourceClass"]
if !ok {
log.Warnf(ctx, "custom_tag ResourceClass not found in single-node cluster spec")
return true
}
if resourceClass != "SingleNode" {
log.Warnf(ctx, "custom_tag ResourceClass is not SingleNode in single-node cluster spec: %s", resourceClass)
return true
}
return false
}
func (m *singleNodeCluster) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
diags := diag.Diagnostics{}
patterns := []dyn.Pattern{
// Interactive clusters
dyn.NewPattern(dyn.Key("resources"), dyn.Key("clusters"), dyn.AnyKey()),
// Job clusters
dyn.NewPattern(dyn.Key("resources"), dyn.Key("jobs"), dyn.AnyKey(), dyn.Key("job_clusters"), dyn.AnyIndex(), dyn.Key("new_cluster")),
// Job task clusters
dyn.NewPattern(dyn.Key("resources"), dyn.Key("jobs"), dyn.AnyKey(), dyn.Key("tasks"), dyn.AnyIndex(), dyn.Key("new_cluster")),
// Pipeline clusters
dyn.NewPattern(dyn.Key("resources"), dyn.Key("pipelines"), dyn.AnyKey(), dyn.Key("clusters"), dyn.AnyIndex()),
}
for _, p := range patterns {
_, err := dyn.MapByPattern(rb.Config().Value(), p, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
warning := diag.Diagnostic{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: v.Locations(),
Paths: []dyn.Path{p},
}
if showSingleNodeClusterWarning(ctx, v) {
diags = append(diags, warning)
}
return v, nil
})
if err != nil {
log.Debugf(ctx, "Error while applying single node cluster validation: %s", err)
}
}
return diags
}

View File

@ -0,0 +1,440 @@
package validate
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/stretchr/testify/assert"
)
func TestValidateSingleNodeClusterFail(t *testing.T) {
failCases := []struct {
name string
sparkConf map[string]string
customTags map[string]string
}{
{
name: "no tags or conf",
},
{
name: "no tags",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*]",
},
},
{
name: "no conf",
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
{
name: "invalid spark cluster profile",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "invalid",
"spark.master": "local[*]",
},
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
{
name: "invalid spark.master",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "invalid",
},
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
{
name: "invalid tags",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*]",
},
customTags: map[string]string{"ResourceClass": "invalid"},
},
{
name: "missing ResourceClass tag",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*]",
},
customTags: map[string]string{"what": "ever"},
},
{
name: "missing spark.master",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
},
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
{
name: "missing spark.databricks.cluster.profile",
sparkConf: map[string]string{
"spark.master": "local[*]",
},
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
}
ctx := context.Background()
// Interactive clusters.
for _, tc := range failCases {
t.Run("interactive_"+tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Clusters: map[string]*resources.Cluster{
"foo": {
ClusterSpec: &compute.ClusterSpec{
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.clusters.foo", []dyn.Location{{File: "a.yml", Line: 1, Column: 1}})
// We can't set num_workers to 0 explicitly in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.clusters.foo.num_workers", dyn.V(0))
})
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Equal(t, diag.Diagnostics{
{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: []dyn.Location{{File: "a.yml", Line: 1, Column: 1}},
Paths: []dyn.Path{dyn.NewPath(dyn.Key("resources"), dyn.Key("clusters"), dyn.Key("foo"))},
},
}, diags)
})
}
// Job clusters.
for _, tc := range failCases {
t.Run("job_"+tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
JobClusters: []jobs.JobCluster{
{
NewCluster: compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
},
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.jobs.foo.job_clusters[0].new_cluster", []dyn.Location{{File: "b.yml", Line: 1, Column: 1}})
// We can't set num_workers to 0 explicitly in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.job_clusters[0].new_cluster.num_workers", dyn.V(0))
})
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Equal(t, diag.Diagnostics{
{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: []dyn.Location{{File: "b.yml", Line: 1, Column: 1}},
Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.job_clusters[0].new_cluster")},
},
}, diags)
})
}
// Job task clusters.
for _, tc := range failCases {
t.Run("task_"+tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
NewCluster: &compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
},
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.jobs.foo.tasks[0].new_cluster", []dyn.Location{{File: "c.yml", Line: 1, Column: 1}})
// We can't set num_workers to 0 explicitly in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.tasks[0].new_cluster.num_workers", dyn.V(0))
})
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Equal(t, diag.Diagnostics{
{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: []dyn.Location{{File: "c.yml", Line: 1, Column: 1}},
Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.tasks[0].new_cluster")},
},
}, diags)
})
}
// Pipeline clusters.
for _, tc := range failCases {
t.Run("pipeline_"+tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"foo": {
PipelineSpec: &pipelines.PipelineSpec{
Clusters: []pipelines.PipelineCluster{
{
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.pipelines.foo.clusters[0]", []dyn.Location{{File: "d.yml", Line: 1, Column: 1}})
// We can't set num_workers to 0 explicitly in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.pipelines.foo.clusters[0].num_workers", dyn.V(0))
})
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Equal(t, diag.Diagnostics{
{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: []dyn.Location{{File: "d.yml", Line: 1, Column: 1}},
Paths: []dyn.Path{dyn.MustPathFromString("resources.pipelines.foo.clusters[0]")},
},
}, diags)
})
}
}
func TestValidateSingleNodeClusterPass(t *testing.T) {
zero := 0
one := 1
passCases := []struct {
name string
numWorkers *int
sparkConf map[string]string
customTags map[string]string
policyId string
}{
{
name: "single node cluster",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*]",
},
customTags: map[string]string{
"ResourceClass": "SingleNode",
},
numWorkers: &zero,
},
{
name: "num workers is not zero",
numWorkers: &one,
},
{
name: "num workers is not set",
},
{
name: "policy id is not empty",
policyId: "policy-abc",
numWorkers: &zero,
},
}
ctx := context.Background()
// Interactive clusters.
for _, tc := range passCases {
t.Run("interactive_"+tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Clusters: map[string]*resources.Cluster{
"foo": {
ClusterSpec: &compute.ClusterSpec{
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
PolicyId: tc.policyId,
},
},
},
},
},
}
if tc.numWorkers != nil {
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.clusters.foo.num_workers", dyn.V(*tc.numWorkers))
})
}
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Empty(t, diags)
})
}
// Job clusters.
for _, tc := range passCases {
t.Run("job_"+tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
JobClusters: []jobs.JobCluster{
{
NewCluster: compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
PolicyId: tc.policyId,
},
},
},
},
},
},
},
},
}
if tc.numWorkers != nil {
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.job_clusters[0].new_cluster.num_workers", dyn.V(*tc.numWorkers))
})
}
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Empty(t, diags)
})
}
// Job task clusters.
for _, tc := range passCases {
t.Run("task_"+tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
NewCluster: &compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
PolicyId: tc.policyId,
},
},
},
},
},
},
},
},
}
if tc.numWorkers != nil {
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.tasks[0].new_cluster.num_workers", dyn.V(*tc.numWorkers))
})
}
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Empty(t, diags)
})
}
// Pipeline clusters.
for _, tc := range passCases {
t.Run("pipeline_"+tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"foo": {
PipelineSpec: &pipelines.PipelineSpec{
Clusters: []pipelines.PipelineCluster{
{
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
PolicyId: tc.policyId,
},
},
},
},
},
},
},
}
if tc.numWorkers != nil {
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.pipelines.foo.clusters[0].num_workers", dyn.V(*tc.numWorkers))
})
}
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Empty(t, diags)
})
}
}

View File

@ -36,6 +36,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
ValidateSyncPatterns(), ValidateSyncPatterns(),
JobTaskClusterSpec(), JobTaskClusterSpec(),
ValidateFolderPermissions(), ValidateFolderPermissions(),
SingleNodeCluster(),
)) ))
} }

View File

@ -22,6 +22,8 @@ type Lookup struct {
Metastore string `json:"metastore,omitempty"` Metastore string `json:"metastore,omitempty"`
NotificationDestination string `json:"notification_destination,omitempty"`
Pipeline string `json:"pipeline,omitempty"` Pipeline string `json:"pipeline,omitempty"`
Query string `json:"query,omitempty"` Query string `json:"query,omitempty"`
@ -63,6 +65,9 @@ func (l *Lookup) constructResolver() (resolver, error) {
if l.Metastore != "" { if l.Metastore != "" {
resolvers = append(resolvers, resolveMetastore{name: l.Metastore}) resolvers = append(resolvers, resolveMetastore{name: l.Metastore})
} }
if l.NotificationDestination != "" {
resolvers = append(resolvers, resolveNotificationDestination{name: l.NotificationDestination})
}
if l.Pipeline != "" { if l.Pipeline != "" {
resolvers = append(resolvers, resolvePipeline{name: l.Pipeline}) resolvers = append(resolvers, resolvePipeline{name: l.Pipeline})
} }

View File

@ -0,0 +1,46 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/service/settings"
)
type resolveNotificationDestination struct {
name string
}
func (l resolveNotificationDestination) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
result, err := w.NotificationDestinations.ListAll(ctx, settings.ListNotificationDestinationsRequest{
// The default page size for this API is 20.
// We use a higher value to make fewer API calls.
PageSize: 200,
})
if err != nil {
return "", err
}
// Collect all notification destinations with the given name.
var entities []settings.ListNotificationDestinationsResult
for _, entity := range result {
if entity.DisplayName == l.name {
entities = append(entities, entity)
}
}
// Return the ID of the first matching notification destination.
switch len(entities) {
case 0:
return "", fmt.Errorf("notification destination named %q does not exist", l.name)
case 1:
return entities[0].Id, nil
default:
return "", fmt.Errorf("there are %d instances of clusters named %q", len(entities), l.name)
}
}
func (l resolveNotificationDestination) String() string {
return fmt.Sprintf("notification-destination: %s", l.name)
}

View File

@ -0,0 +1,82 @@
package variable
import (
"context"
"fmt"
"testing"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/settings"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveNotificationDestination_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockNotificationDestinationsAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return([]settings.ListNotificationDestinationsResult{
{Id: "1234", DisplayName: "destination"},
}, nil)
ctx := context.Background()
l := resolveNotificationDestination{name: "destination"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "1234", result)
}
func TestResolveNotificationDestination_ResolveError(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockNotificationDestinationsAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return(nil, fmt.Errorf("bad"))
ctx := context.Background()
l := resolveNotificationDestination{name: "destination"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
assert.ErrorContains(t, err, "bad")
}
func TestResolveNotificationDestination_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockNotificationDestinationsAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return([]settings.ListNotificationDestinationsResult{}, nil)
ctx := context.Background()
l := resolveNotificationDestination{name: "destination"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.Error(t, err)
assert.ErrorContains(t, err, `notification destination named "destination" does not exist`)
}
func TestResolveNotificationDestination_ResolveMultiple(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockNotificationDestinationsAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return([]settings.ListNotificationDestinationsResult{
{Id: "1234", DisplayName: "destination"},
{Id: "5678", DisplayName: "destination"},
}, nil)
ctx := context.Background()
l := resolveNotificationDestination{name: "destination"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.Error(t, err)
assert.ErrorContains(t, err, `there are 2 instances of clusters named "destination"`)
}
func TestResolveNotificationDestination_String(t *testing.T) {
l := resolveNotificationDestination{name: "name"}
assert.Equal(t, "notification-destination: name", l.String())
}

View File

@ -97,7 +97,7 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
job, err := w.Jobs.GetByJobId(context.Background(), batchJobId) job, err := w.Jobs.GetByJobId(context.Background(), batchJobId)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("dev-%s-batch-inference-job", projectName), job.Settings.Name) assert.Contains(t, job.Settings.Name, fmt.Sprintf("dev-%s-batch-inference-job", projectName))
} }
func TestAccBundleInitHelpers(t *testing.T) { func TestAccBundleInitHelpers(t *testing.T) {