mirror of https://github.com/databricks/cli.git
Compare commits
5 Commits
b044a6c0e0
...
dc44dbd667
Author | SHA1 | Date |
---|---|---|
Richard Nordström | dc44dbd667 | |
Andrew Nester | 02e83877f4 | |
Pieter Noordhuis | ceefa80d72 | |
Andrew Nester | f71d9e7649 | |
Andrew Nester | 72030844c5 |
|
@ -116,6 +116,10 @@ func allResolvers() *resolvers {
|
||||||
{{range .Services -}}
|
{{range .Services -}}
|
||||||
{{- if in $allowlist .KebabName -}}
|
{{- if in $allowlist .KebabName -}}
|
||||||
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["{{.Singular.PascalName}}"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
29
CHANGELOG.md
29
CHANGELOG.md
|
@ -1,5 +1,34 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## [Release] Release v0.228.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Do not error if we cannot prompt for a profile in `auth login` ([#1745](https://github.com/databricks/cli/pull/1745)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
|
||||||
|
As of this release, the CLI will show a prompt if there are configuration changes that lead to DLT pipeline recreation.
|
||||||
|
Users can skip the prompt by specifying the `--auto-approve` flag.
|
||||||
|
|
||||||
|
* Pass along to Terraform process ([#1734](https://github.com/databricks/cli/pull/1734)).
|
||||||
|
* Add prompt when a pipeline recreation happens ([#1672](https://github.com/databricks/cli/pull/1672)).
|
||||||
|
* Use materialized views in the default-sql template ([#1709](https://github.com/databricks/cli/pull/1709)).
|
||||||
|
* Update templates to latest LTS DBR ([#1715](https://github.com/databricks/cli/pull/1715)).
|
||||||
|
* Make lock optional in the JSON schema ([#1738](https://github.com/databricks/cli/pull/1738)).
|
||||||
|
* Do not suppress normalisation diagnostics for resolving variables ([#1740](https://github.com/databricks/cli/pull/1740)).
|
||||||
|
* Include a permissions section in all templates ([#1713](https://github.com/databricks/cli/pull/1713)).
|
||||||
|
* Fixed complex variables are not being correctly merged from include files ([#1746](https://github.com/databricks/cli/pull/1746)).
|
||||||
|
* Fixed variable override in target with full variable syntax ([#1749](https://github.com/databricks/cli/pull/1749)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Consider serverless clusters as compatible for Python wheel tasks ([#1733](https://github.com/databricks/cli/pull/1733)).
|
||||||
|
* PythonMutator: explain missing package error ([#1736](https://github.com/databricks/cli/pull/1736)).
|
||||||
|
* Add `dyn.Time` to box a timestamp with its original string value ([#1732](https://github.com/databricks/cli/pull/1732)).
|
||||||
|
* Fix streaming of stdout, stdin, stderr in cobra test runner ([#1742](https://github.com/databricks/cli/pull/1742)).
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/Masterminds/semver/v3 from 3.2.1 to 3.3.0 ([#1741](https://github.com/databricks/cli/pull/1741)).
|
||||||
|
|
||||||
## [Release] Release v0.227.1
|
## [Release] Release v0.227.1
|
||||||
|
|
||||||
CLI:
|
CLI:
|
||||||
|
|
|
@ -33,12 +33,7 @@ func createGlobError(v dyn.Value, p dyn.Path, message string) diag.Diagnostic {
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: fmt.Sprintf("%s: %s", source, message),
|
Summary: fmt.Sprintf("%s: %s", source, message),
|
||||||
Locations: []dyn.Location{v.Location()},
|
Locations: []dyn.Location{v.Location()},
|
||||||
|
Paths: []dyn.Path{p},
|
||||||
Paths: []dyn.Path{
|
|
||||||
// Hack to clone the path. This path copy is mutable.
|
|
||||||
// To be addressed in a later PR.
|
|
||||||
p.Append(),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,6 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -44,11 +43,13 @@ func TestResolveClusterReference(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef1).Return(&compute.ClusterDetails{
|
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
ClusterId: "1234-5678-abcd",
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
}, nil)
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef2).Return(&compute.ClusterDetails{
|
},
|
||||||
ClusterId: "9876-5432-xywz",
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "1234-5678-abcd", ClusterName: clusterRef1},
|
||||||
|
{ClusterId: "9876-5432-xywz", ClusterName: clusterRef2},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
|
@ -78,10 +79,16 @@ func TestResolveNonExistentClusterReference(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef))
|
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "1234-5678-abcd", ClusterName: "some other cluster"},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist")
|
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: cluster named 'Random' does not exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
||||||
|
@ -158,8 +165,14 @@ func TestResolveVariableReferencesInVariableLookups(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, "cluster-bar-dev").Return(&compute.ClusterDetails{
|
|
||||||
ClusterId: "1234-5678-abcd",
|
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "1234-5678-abcd", ClusterName: "cluster-bar-dev"},
|
||||||
|
{ClusterId: "9876-5432-xywz", ClusterName: "some other cluster"},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
|
||||||
|
|
|
@ -406,6 +406,30 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
||||||
return r.updateWithDynamicValue(root)
|
return r.updateWithDynamicValue(root)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var variableKeywords = []string{"default", "lookup"}
|
||||||
|
|
||||||
|
// isFullVariableOverrideDef checks if the given value is a full syntax varaible override.
|
||||||
|
// A full syntax variable override is a map with only one of the following
|
||||||
|
// keys: "default", "lookup".
|
||||||
|
func isFullVariableOverrideDef(v dyn.Value) bool {
|
||||||
|
mv, ok := v.AsMap()
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if mv.Len() != 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, keyword := range variableKeywords {
|
||||||
|
if _, ok := mv.GetByString(keyword); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// rewriteShorthands performs lightweight rewriting of the configuration
|
// rewriteShorthands performs lightweight rewriting of the configuration
|
||||||
// tree where we allow users to write a shorthand and must rewrite to the full form.
|
// tree where we allow users to write a shorthand and must rewrite to the full form.
|
||||||
func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
||||||
|
@ -433,30 +457,27 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
||||||
}, variable.Locations()), nil
|
}, variable.Locations()), nil
|
||||||
|
|
||||||
case dyn.KindMap, dyn.KindSequence:
|
case dyn.KindMap, dyn.KindSequence:
|
||||||
lookup, err := dyn.Get(variable, "lookup")
|
// If it's a full variable definition, leave it as is.
|
||||||
// If lookup is set, we don't want to rewrite the variable and return it as is.
|
if isFullVariableOverrideDef(variable) {
|
||||||
if err == nil && lookup.Kind() != dyn.KindInvalid {
|
|
||||||
return variable, nil
|
return variable, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the original definition of variable has a type field.
|
// Check if the original definition of variable has a type field.
|
||||||
|
// If it has a type field, it means the shorthand is a value of a complex type.
|
||||||
// Type might not be found if the variable overriden in a separate file
|
// Type might not be found if the variable overriden in a separate file
|
||||||
// and configuration is not merged yet.
|
// and configuration is not merged yet.
|
||||||
typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type")))
|
typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type")))
|
||||||
if err != nil {
|
if err == nil && typeV.MustString() == "complex" {
|
||||||
return dyn.NewValue(map[string]dyn.Value{
|
|
||||||
"default": variable,
|
|
||||||
}, variable.Locations()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if typeV.MustString() == "complex" {
|
|
||||||
return dyn.NewValue(map[string]dyn.Value{
|
return dyn.NewValue(map[string]dyn.Value{
|
||||||
"type": typeV,
|
"type": typeV,
|
||||||
"default": variable,
|
"default": variable,
|
||||||
}, variable.Locations()), nil
|
}, variable.Locations()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return variable, nil
|
// If it's a shorthand, rewrite it to a full variable definition.
|
||||||
|
return dyn.NewValue(map[string]dyn.Value{
|
||||||
|
"default": variable,
|
||||||
|
}, variable.Locations()), nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return variable, nil
|
return variable, nil
|
||||||
|
|
|
@ -3,7 +3,6 @@ package validate
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -66,10 +65,7 @@ func (m *uniqueResourceKeys) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// dyn.Path under the hood is a slice. The code that walks the configuration
|
m.paths = append(m.paths, p)
|
||||||
// tree uses the same underlying slice to track the path as it walks
|
|
||||||
// the tree. So, we need to clone it here.
|
|
||||||
m.paths = append(m.paths, slices.Clone(p))
|
|
||||||
m.locations = append(m.locations, v.Locations()...)
|
m.locations = append(m.locations, v.Locations()...)
|
||||||
|
|
||||||
resourceMetadata[k] = m
|
resourceMetadata[k] = m
|
||||||
|
|
|
@ -220,6 +220,10 @@ type resolvers struct {
|
||||||
func allResolvers() *resolvers {
|
func allResolvers() *resolvers {
|
||||||
r := &resolvers{}
|
r := &resolvers{}
|
||||||
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Alert"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Alerts.GetByDisplayName(ctx, name)
|
entity, err := w.Alerts.GetByDisplayName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -228,6 +232,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.Id), nil
|
return fmt.Sprint(entity.Id), nil
|
||||||
}
|
}
|
||||||
r.ClusterPolicy = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.ClusterPolicy = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["ClusterPolicy"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.ClusterPolicies.GetByName(ctx, name)
|
entity, err := w.ClusterPolicies.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -236,6 +244,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.PolicyId), nil
|
return fmt.Sprint(entity.PolicyId), nil
|
||||||
}
|
}
|
||||||
r.Cluster = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Cluster = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Cluster"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Clusters.GetByClusterName(ctx, name)
|
entity, err := w.Clusters.GetByClusterName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -244,6 +256,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.ClusterId), nil
|
return fmt.Sprint(entity.ClusterId), nil
|
||||||
}
|
}
|
||||||
r.Dashboard = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Dashboard = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Dashboard"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Dashboards.GetByName(ctx, name)
|
entity, err := w.Dashboards.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -252,6 +268,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.Id), nil
|
return fmt.Sprint(entity.Id), nil
|
||||||
}
|
}
|
||||||
r.InstancePool = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.InstancePool = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["InstancePool"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.InstancePools.GetByInstancePoolName(ctx, name)
|
entity, err := w.InstancePools.GetByInstancePoolName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -260,6 +280,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.InstancePoolId), nil
|
return fmt.Sprint(entity.InstancePoolId), nil
|
||||||
}
|
}
|
||||||
r.Job = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Job = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Job"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Jobs.GetBySettingsName(ctx, name)
|
entity, err := w.Jobs.GetBySettingsName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -268,6 +292,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.JobId), nil
|
return fmt.Sprint(entity.JobId), nil
|
||||||
}
|
}
|
||||||
r.Metastore = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Metastore = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Metastore"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Metastores.GetByName(ctx, name)
|
entity, err := w.Metastores.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -276,6 +304,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.MetastoreId), nil
|
return fmt.Sprint(entity.MetastoreId), nil
|
||||||
}
|
}
|
||||||
r.Pipeline = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Pipeline = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Pipeline"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Pipelines.GetByName(ctx, name)
|
entity, err := w.Pipelines.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -284,6 +316,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.PipelineId), nil
|
return fmt.Sprint(entity.PipelineId), nil
|
||||||
}
|
}
|
||||||
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Query"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Queries.GetByDisplayName(ctx, name)
|
entity, err := w.Queries.GetByDisplayName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -292,6 +328,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.Id), nil
|
return fmt.Sprint(entity.Id), nil
|
||||||
}
|
}
|
||||||
r.ServicePrincipal = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.ServicePrincipal = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["ServicePrincipal"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.ServicePrincipals.GetByDisplayName(ctx, name)
|
entity, err := w.ServicePrincipals.GetByDisplayName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -300,6 +340,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.ApplicationId), nil
|
return fmt.Sprint(entity.ApplicationId), nil
|
||||||
}
|
}
|
||||||
r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Warehouse"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Warehouses.GetByName(ctx, name)
|
entity, err := w.Warehouses.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
package variable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
)
|
||||||
|
|
||||||
|
var lookupOverrides = map[string]resolverFunc{
|
||||||
|
"Cluster": resolveCluster,
|
||||||
|
}
|
||||||
|
|
||||||
|
// We added a custom resolver for the cluster to add filtering for the cluster source when we list all clusters.
|
||||||
|
// Without the filtering listing could take a very long time (5-10 mins) which leads to lookup timeouts.
|
||||||
|
func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
result, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{
|
||||||
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp := map[string][]compute.ClusterDetails{}
|
||||||
|
for _, v := range result {
|
||||||
|
key := v.ClusterName
|
||||||
|
tmp[key] = append(tmp[key], v)
|
||||||
|
}
|
||||||
|
alternatives, ok := tmp[name]
|
||||||
|
if !ok || len(alternatives) == 0 {
|
||||||
|
return "", fmt.Errorf("cluster named '%s' does not exist", name)
|
||||||
|
}
|
||||||
|
if len(alternatives) > 1 {
|
||||||
|
return "", fmt.Errorf("there are %d instances of clusters named '%s'", len(alternatives), name)
|
||||||
|
}
|
||||||
|
return alternatives[0].ClusterId, nil
|
||||||
|
}
|
|
@ -16,12 +16,10 @@ type expand struct {
|
||||||
|
|
||||||
func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic {
|
func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic {
|
||||||
return diag.Diagnostic{
|
return diag.Diagnostic{
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: message,
|
Summary: message,
|
||||||
Paths: []dyn.Path{
|
|
||||||
p.Append(),
|
|
||||||
},
|
|
||||||
Locations: l,
|
Locations: l,
|
||||||
|
Paths: []dyn.Path{p},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error
|
||||||
|
|
||||||
source = filepath.Join(b.RootPath, source)
|
source = filepath.Join(b.RootPath, source)
|
||||||
libs[source] = append(libs[source], configLocation{
|
libs[source] = append(libs[source], configLocation{
|
||||||
configPath: p.Append(), // Hack to get the copy of path
|
configPath: p,
|
||||||
location: v.Location(),
|
location: v.Location(),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -81,9 +81,10 @@ func TestComplexVariablesOverrideWithMultipleFiles(t *testing.T) {
|
||||||
),
|
),
|
||||||
))
|
))
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
for _, cluster := range b.Config.Resources.Jobs["my_job"].JobClusters {
|
||||||
require.Equal(t, "14.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion)
|
require.Equalf(t, "14.2.x-scala2.11", cluster.NewCluster.SparkVersion, "cluster: %v", cluster.JobClusterKey)
|
||||||
require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId)
|
require.Equalf(t, "Standard_DS3_v2", cluster.NewCluster.NodeTypeId, "cluster: %v", cluster.JobClusterKey)
|
||||||
require.Equal(t, 4, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers)
|
require.Equalf(t, 4, cluster.NewCluster.NumWorkers, "cluster: %v", cluster.JobClusterKey)
|
||||||
require.Equal(t, "false", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"])
|
require.Equalf(t, "false", cluster.NewCluster.SparkConf["spark.speculation"], "cluster: %v", cluster.JobClusterKey)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,13 +5,48 @@ resources:
|
||||||
jobs:
|
jobs:
|
||||||
my_job:
|
my_job:
|
||||||
job_clusters:
|
job_clusters:
|
||||||
- job_cluster_key: key
|
- job_cluster_key: key1
|
||||||
new_cluster: ${var.cluster}
|
new_cluster: ${var.cluster1}
|
||||||
|
- job_cluster_key: key2
|
||||||
|
new_cluster: ${var.cluster2}
|
||||||
|
- job_cluster_key: key3
|
||||||
|
new_cluster: ${var.cluster3}
|
||||||
|
- job_cluster_key: key4
|
||||||
|
new_cluster: ${var.cluster4}
|
||||||
variables:
|
variables:
|
||||||
cluster:
|
cluster1:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
cluster2:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
cluster3:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
cluster4:
|
||||||
type: complex
|
type: complex
|
||||||
description: "A cluster definition"
|
description: "A cluster definition"
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- ./variables/*.yml
|
- ./variables/*.yml
|
||||||
|
|
||||||
|
|
||||||
|
targets:
|
||||||
|
default:
|
||||||
|
dev:
|
||||||
|
variables:
|
||||||
|
cluster3:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
||||||
|
cluster4:
|
||||||
|
default:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
||||||
|
|
|
@ -2,10 +2,18 @@ targets:
|
||||||
default:
|
default:
|
||||||
dev:
|
dev:
|
||||||
variables:
|
variables:
|
||||||
cluster:
|
cluster1:
|
||||||
spark_version: "14.2.x-scala2.11"
|
spark_version: "14.2.x-scala2.11"
|
||||||
node_type_id: "Standard_DS3_v2"
|
node_type_id: "Standard_DS3_v2"
|
||||||
num_workers: 4
|
num_workers: 4
|
||||||
spark_conf:
|
spark_conf:
|
||||||
spark.speculation: false
|
spark.speculation: false
|
||||||
spark.databricks.delta.retentionDurationCheck.enabled: false
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
||||||
|
cluster2:
|
||||||
|
default:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
||||||
|
|
|
@ -124,8 +124,13 @@ func TestVariablesWithTargetLookupOverrides(t *testing.T) {
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
clustersApi := mockWorkspaceClient.GetMockClustersAPI()
|
clustersApi := mockWorkspaceClient.GetMockClustersAPI()
|
||||||
clustersApi.EXPECT().GetByClusterName(mock.Anything, "some-test-cluster").Return(&compute.ClusterDetails{
|
clustersApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
ClusterId: "4321",
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "4321", ClusterName: "some-test-cluster"},
|
||||||
|
{ClusterId: "9876", ClusterName: "some-other-cluster"},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI()
|
clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI()
|
||||||
|
|
|
@ -70,7 +70,7 @@ type visitOptions struct {
|
||||||
|
|
||||||
func visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) {
|
func visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) {
|
||||||
if len(suffix) == 0 {
|
if len(suffix) == 0 {
|
||||||
return opts.fn(prefix, v)
|
return opts.fn(slices.Clone(prefix), v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize prefix if it is empty.
|
// Initialize prefix if it is empty.
|
||||||
|
|
|
@ -21,7 +21,7 @@ func Foreach(fn MapFunc) MapFunc {
|
||||||
for _, pair := range m.Pairs() {
|
for _, pair := range m.Pairs() {
|
||||||
pk := pair.Key
|
pk := pair.Key
|
||||||
pv := pair.Value
|
pv := pair.Value
|
||||||
nv, err := fn(append(p, Key(pk.MustString())), pv)
|
nv, err := fn(p.Append(Key(pk.MustString())), pv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return InvalidValue, err
|
return InvalidValue, err
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@ func Foreach(fn MapFunc) MapFunc {
|
||||||
s := slices.Clone(v.MustSequence())
|
s := slices.Clone(v.MustSequence())
|
||||||
for i, value := range s {
|
for i, value := range s {
|
||||||
var err error
|
var err error
|
||||||
s[i], err = fn(append(p, Index(i)), value)
|
s[i], err = fn(p.Append(Index(i)), value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return InvalidValue, err
|
return InvalidValue, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
package dyn_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestVisitCallbackPathCopy(t *testing.T) {
|
||||||
|
vin := dyn.V(map[string]dyn.Value{
|
||||||
|
"foo": dyn.V(42),
|
||||||
|
"bar": dyn.V(43),
|
||||||
|
})
|
||||||
|
|
||||||
|
var paths []dyn.Path
|
||||||
|
|
||||||
|
// The callback should receive a copy of the path.
|
||||||
|
// If the same underlying value is used, all collected paths will be the same.
|
||||||
|
// This test uses `MapByPattern` to collect all paths in the map.
|
||||||
|
// Visit itself doesn't have public functions and we exclusively use black-box testing for this package.
|
||||||
|
_, _ = dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyKey()), func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
paths = append(paths, p)
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Verify that the paths retained their original values.
|
||||||
|
var strings []string
|
||||||
|
for _, p := range paths {
|
||||||
|
strings = append(strings, p.String())
|
||||||
|
}
|
||||||
|
assert.ElementsMatch(t, strings, []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
})
|
||||||
|
}
|
Loading…
Reference in New Issue