mirror of https://github.com/databricks/cli.git
Compare commits
7 Commits
e3b7688798
...
18b89657d8
Author | SHA1 | Date |
---|---|---|
Lennart Kats (databricks) | 18b89657d8 | |
Pieter Noordhuis | abfd1713e0 | |
Pieter Noordhuis | a3cea07c9e | |
shreyas-goenka | abc2f3c825 | |
shreyas-goenka | c2e2abcc35 | |
Lennart Kats | c4301b7a44 | |
Lennart Kats | a43bcc4c63 |
|
@ -38,18 +38,27 @@ func overrideJobCompute(j *resources.Job, compute string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Bundle.Mode != config.Development {
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
|
if b.Config.Bundle.Mode == config.Production {
|
||||||
if b.Config.Bundle.ClusterId != "" {
|
if b.Config.Bundle.ClusterId != "" {
|
||||||
return diag.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
// Overriding compute via a command-line flag for production works, but is not recommended.
|
||||||
|
diags = diags.Extend(diag.Warningf("overriding compute for a target that uses 'mode: production' is not recommended"))
|
||||||
|
}
|
||||||
|
if env.Get(ctx, "DATABRICKS_CLUSTER_ID") != "" {
|
||||||
|
// The DATABRICKS_CLUSTER_ID may be set by accident when doing a production deploy.
|
||||||
|
// Overriding the cluster in production is almost always a mistake since customers
|
||||||
|
// want consistency in production and not compute that is different each deploy.
|
||||||
|
// For this reason we log a warning and ignore the environment variable.
|
||||||
|
return diag.Warningf("the DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target uses 'mode: production'")
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
||||||
b.Config.Bundle.ClusterId = v
|
b.Config.Bundle.ClusterId = v
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.ClusterId == "" {
|
if b.Config.Bundle.ClusterId == "" {
|
||||||
return nil
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
r := b.Config.Resources
|
r := b.Config.Resources
|
||||||
|
@ -57,5 +66,5 @@ func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diag
|
||||||
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId)
|
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return diags
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOverrideDevelopment(t *testing.T) {
|
func TestOverrideComputeModeDevelopment(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
@ -62,10 +62,13 @@ func TestOverrideDevelopment(t *testing.T) {
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideDevelopmentEnv(t *testing.T) {
|
func TestOverrideComputeModeDefault(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Mode: "",
|
||||||
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job1": {JobSettings: &jobs.JobSettings{
|
"job1": {JobSettings: &jobs.JobSettings{
|
||||||
|
@ -86,11 +89,12 @@ func TestOverrideDevelopmentEnv(t *testing.T) {
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
diags := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, diags.Error())
|
require.Empty(t, diags)
|
||||||
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
assert.Equal(t, "newClusterId", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
|
assert.Equal(t, "newClusterId", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverridePipelineTask(t *testing.T) {
|
func TestOverrideComputePipelineTask(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
@ -115,7 +119,7 @@ func TestOverridePipelineTask(t *testing.T) {
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideForEachTask(t *testing.T) {
|
func TestOverrideComputeForEachTask(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
@ -140,10 +144,11 @@ func TestOverrideForEachTask(t *testing.T) {
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideProduction(t *testing.T) {
|
func TestOverrideComputeModeProduction(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
|
Mode: config.Production,
|
||||||
ClusterId: "newClusterID",
|
ClusterId: "newClusterID",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
|
@ -166,13 +171,18 @@ func TestOverrideProduction(t *testing.T) {
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
diags := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.True(t, diags.HasError())
|
require.Len(t, diags, 1)
|
||||||
|
assert.Equal(t, "overriding compute for a target that uses 'mode: production' is not recommended", diags[0].Summary)
|
||||||
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideProductionEnv(t *testing.T) {
|
func TestOverrideComputeModeProductionIgnoresVariable(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Mode: config.Production,
|
||||||
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job1": {JobSettings: &jobs.JobSettings{
|
"job1": {JobSettings: &jobs.JobSettings{
|
||||||
|
@ -193,5 +203,7 @@ func TestOverrideProductionEnv(t *testing.T) {
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
diags := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, diags.Error())
|
require.Len(t, diags, 1)
|
||||||
|
assert.Equal(t, "the DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target uses 'mode: production'", diags[0].Summary)
|
||||||
|
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,7 +126,33 @@ func (t *translateContext) rewritePath(
|
||||||
func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||||
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
|
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
return "", fmt.Errorf("notebook %s not found", literal)
|
if filepath.Ext(localFullPath) != notebook.ExtensionNone {
|
||||||
|
return "", fmt.Errorf("notebook %s not found", literal)
|
||||||
|
}
|
||||||
|
|
||||||
|
extensions := []string{
|
||||||
|
notebook.ExtensionPython,
|
||||||
|
notebook.ExtensionR,
|
||||||
|
notebook.ExtensionScala,
|
||||||
|
notebook.ExtensionSql,
|
||||||
|
notebook.ExtensionJupyter,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether a file with a notebook extension already exists. This
|
||||||
|
// way we can provide a more targeted error message.
|
||||||
|
for _, ext := range extensions {
|
||||||
|
literalWithExt := literal + ext
|
||||||
|
localRelPathWithExt := filepath.ToSlash(localRelPath + ext)
|
||||||
|
if _, err := fs.Stat(t.b.SyncRoot, localRelPathWithExt); err == nil {
|
||||||
|
return "", fmt.Errorf(`notebook %s not found. Did you mean %s?
|
||||||
|
Local notebook references are expected to contain one of the following
|
||||||
|
file extensions: [%s]`, literal, literalWithExt, strings.Join(extensions, ", "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a generic error message if no matching possible file is found.
|
||||||
|
return "", fmt.Errorf(`notebook %s not found. Local notebook references are expected
|
||||||
|
to contain one of the following file extensions: [%s]`, literal, strings.Join(extensions, ", "))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localFullPath, err)
|
return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localFullPath, err)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package mutator_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -508,6 +509,59 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
||||||
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPipelineNotebookDoesNotExistErrorWithoutExtension(t *testing.T) {
|
||||||
|
for _, ext := range []string{
|
||||||
|
".py",
|
||||||
|
".r",
|
||||||
|
".scala",
|
||||||
|
".sql",
|
||||||
|
".ipynb",
|
||||||
|
"",
|
||||||
|
} {
|
||||||
|
t.Run("case_"+ext, func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
if ext != "" {
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "foo"+ext))
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
SyncRootPath: dir,
|
||||||
|
SyncRoot: vfs.MustNew(dir),
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
"pipeline": {
|
||||||
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
Libraries: []pipelines.PipelineLibrary{
|
||||||
|
{
|
||||||
|
Notebook: &pipelines.NotebookLibrary{
|
||||||
|
Path: "./foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
|
|
||||||
|
if ext == "" {
|
||||||
|
assert.EqualError(t, diags.Error(), `notebook ./foo not found. Local notebook references are expected
|
||||||
|
to contain one of the following file extensions: [.py, .r, .scala, .sql, .ipynb]`)
|
||||||
|
} else {
|
||||||
|
assert.EqualError(t, diags.Error(), fmt.Sprintf(`notebook ./foo not found. Did you mean ./foo%s?
|
||||||
|
Local notebook references are expected to contain one of the following
|
||||||
|
file extensions: [.py, .r, .scala, .sql, .ipynb]`, ext))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestPipelineFileDoesNotExistError(t *testing.T) {
|
func TestPipelineFileDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,12 @@ func (v *filesToSync) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
|
func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||||
|
// The user may be intentional about not synchronizing any files.
|
||||||
|
// In this case, we should not show any warnings.
|
||||||
|
if len(rb.Config().Sync.Paths) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
sync, err := files.GetSync(ctx, rb)
|
sync, err := files.GetSync(ctx, rb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
|
@ -31,6 +37,7 @@ func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there are files to sync, we don't need to show any warnings.
|
||||||
if len(fl) != 0 {
|
if len(fl) != 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,105 @@
|
||||||
|
package validate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/internal/testutil"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/vfs"
|
||||||
|
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||||
|
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFilesToSync_NoPaths(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Paths: []string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
rb := bundle.ReadOnly(b)
|
||||||
|
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
|
||||||
|
assert.Empty(t, diags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupBundleForFilesToSyncTest(t *testing.T) *bundle.Bundle {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
testutil.Touch(t, dir, "file1")
|
||||||
|
testutil.Touch(t, dir, "file2")
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
BundleRootPath: dir,
|
||||||
|
BundleRoot: vfs.MustNew(dir),
|
||||||
|
SyncRootPath: dir,
|
||||||
|
SyncRoot: vfs.MustNew(dir),
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Target: "default",
|
||||||
|
},
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
FilePath: "/this/doesnt/matter",
|
||||||
|
CurrentUser: &config.User{
|
||||||
|
User: &iam.User{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Sync: config.Sync{
|
||||||
|
// Paths are relative to [SyncRootPath].
|
||||||
|
Paths: []string{"."},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
m.WorkspaceClient.Config = &sdkconfig.Config{
|
||||||
|
Host: "https://foo.com",
|
||||||
|
}
|
||||||
|
|
||||||
|
// The initialization logic in [sync.New] performs a check on the destination path.
|
||||||
|
// Removing this check at initialization time is tbd...
|
||||||
|
m.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/this/doesnt/matter").Return(&workspace.ObjectInfo{
|
||||||
|
ObjectType: workspace.ObjectTypeDirectory,
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilesToSync_EverythingIgnored(t *testing.T) {
|
||||||
|
b := setupBundleForFilesToSyncTest(t)
|
||||||
|
|
||||||
|
// Ignore all files.
|
||||||
|
testutil.WriteFile(t, "*\n.*\n", b.BundleRootPath, ".gitignore")
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
rb := bundle.ReadOnly(b)
|
||||||
|
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
|
||||||
|
require.Equal(t, 1, len(diags))
|
||||||
|
assert.Equal(t, diag.Warning, diags[0].Severity)
|
||||||
|
assert.Equal(t, "There are no files to sync, please check your .gitignore", diags[0].Summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilesToSync_EverythingExcluded(t *testing.T) {
|
||||||
|
b := setupBundleForFilesToSyncTest(t)
|
||||||
|
|
||||||
|
// Exclude all files.
|
||||||
|
b.Config.Sync.Exclude = []string{"*"}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
rb := bundle.ReadOnly(b)
|
||||||
|
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
|
||||||
|
require.Equal(t, 1, len(diags))
|
||||||
|
assert.Equal(t, diag.Warning, diags[0].Severity)
|
||||||
|
assert.Equal(t, "There are no files to sync, please check your .gitignore and sync.exclude configuration", diags[0].Summary)
|
||||||
|
}
|
|
@ -22,6 +22,8 @@ type Lookup struct {
|
||||||
|
|
||||||
Metastore string `json:"metastore,omitempty"`
|
Metastore string `json:"metastore,omitempty"`
|
||||||
|
|
||||||
|
NotificationDestination string `json:"notification_destination,omitempty"`
|
||||||
|
|
||||||
Pipeline string `json:"pipeline,omitempty"`
|
Pipeline string `json:"pipeline,omitempty"`
|
||||||
|
|
||||||
Query string `json:"query,omitempty"`
|
Query string `json:"query,omitempty"`
|
||||||
|
@ -63,6 +65,9 @@ func (l *Lookup) constructResolver() (resolver, error) {
|
||||||
if l.Metastore != "" {
|
if l.Metastore != "" {
|
||||||
resolvers = append(resolvers, resolveMetastore{name: l.Metastore})
|
resolvers = append(resolvers, resolveMetastore{name: l.Metastore})
|
||||||
}
|
}
|
||||||
|
if l.NotificationDestination != "" {
|
||||||
|
resolvers = append(resolvers, resolveNotificationDestination{name: l.NotificationDestination})
|
||||||
|
}
|
||||||
if l.Pipeline != "" {
|
if l.Pipeline != "" {
|
||||||
resolvers = append(resolvers, resolvePipeline{name: l.Pipeline})
|
resolvers = append(resolvers, resolvePipeline{name: l.Pipeline})
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
package variable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/settings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type resolveNotificationDestination struct {
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l resolveNotificationDestination) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
|
||||||
|
result, err := w.NotificationDestinations.ListAll(ctx, settings.ListNotificationDestinationsRequest{
|
||||||
|
// The default page size for this API is 20.
|
||||||
|
// We use a higher value to make fewer API calls.
|
||||||
|
PageSize: 200,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all notification destinations with the given name.
|
||||||
|
var entities []settings.ListNotificationDestinationsResult
|
||||||
|
for _, entity := range result {
|
||||||
|
if entity.DisplayName == l.name {
|
||||||
|
entities = append(entities, entity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the ID of the first matching notification destination.
|
||||||
|
switch len(entities) {
|
||||||
|
case 0:
|
||||||
|
return "", fmt.Errorf("notification destination named %q does not exist", l.name)
|
||||||
|
case 1:
|
||||||
|
return entities[0].Id, nil
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("there are %d instances of clusters named %q", len(entities), l.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l resolveNotificationDestination) String() string {
|
||||||
|
return fmt.Sprintf("notification-destination: %s", l.name)
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
package variable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/settings"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestResolveNotificationDestination_ResolveSuccess(t *testing.T) {
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
|
||||||
|
api := m.GetMockNotificationDestinationsAPI()
|
||||||
|
api.EXPECT().
|
||||||
|
ListAll(mock.Anything, mock.Anything).
|
||||||
|
Return([]settings.ListNotificationDestinationsResult{
|
||||||
|
{Id: "1234", DisplayName: "destination"},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
l := resolveNotificationDestination{name: "destination"}
|
||||||
|
result, err := l.Resolve(ctx, m.WorkspaceClient)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "1234", result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveNotificationDestination_ResolveError(t *testing.T) {
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
|
||||||
|
api := m.GetMockNotificationDestinationsAPI()
|
||||||
|
api.EXPECT().
|
||||||
|
ListAll(mock.Anything, mock.Anything).
|
||||||
|
Return(nil, fmt.Errorf("bad"))
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
l := resolveNotificationDestination{name: "destination"}
|
||||||
|
_, err := l.Resolve(ctx, m.WorkspaceClient)
|
||||||
|
assert.ErrorContains(t, err, "bad")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveNotificationDestination_ResolveNotFound(t *testing.T) {
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
|
||||||
|
api := m.GetMockNotificationDestinationsAPI()
|
||||||
|
api.EXPECT().
|
||||||
|
ListAll(mock.Anything, mock.Anything).
|
||||||
|
Return([]settings.ListNotificationDestinationsResult{}, nil)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
l := resolveNotificationDestination{name: "destination"}
|
||||||
|
_, err := l.Resolve(ctx, m.WorkspaceClient)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.ErrorContains(t, err, `notification destination named "destination" does not exist`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveNotificationDestination_ResolveMultiple(t *testing.T) {
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
|
||||||
|
api := m.GetMockNotificationDestinationsAPI()
|
||||||
|
api.EXPECT().
|
||||||
|
ListAll(mock.Anything, mock.Anything).
|
||||||
|
Return([]settings.ListNotificationDestinationsResult{
|
||||||
|
{Id: "1234", DisplayName: "destination"},
|
||||||
|
{Id: "5678", DisplayName: "destination"},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
l := resolveNotificationDestination{name: "destination"}
|
||||||
|
_, err := l.Resolve(ctx, m.WorkspaceClient)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.ErrorContains(t, err, `there are 2 instances of clusters named "destination"`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveNotificationDestination_String(t *testing.T) {
|
||||||
|
l := resolveNotificationDestination{name: "name"}
|
||||||
|
assert.Equal(t, "notification-destination: name", l.String())
|
||||||
|
}
|
|
@ -97,7 +97,7 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
job, err := w.Jobs.GetByJobId(context.Background(), batchJobId)
|
job, err := w.Jobs.GetByJobId(context.Background(), batchJobId)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, fmt.Sprintf("dev-%s-batch-inference-job", projectName), job.Settings.Name)
|
assert.Contains(t, job.Settings.Name, fmt.Sprintf("dev-%s-batch-inference-job", projectName))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccBundleInitHelpers(t *testing.T) {
|
func TestAccBundleInitHelpers(t *testing.T) {
|
||||||
|
|
Loading…
Reference in New Issue