mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin/main' into dashboards
This commit is contained in:
commit
7403101d59
|
@ -116,6 +116,10 @@ func allResolvers() *resolvers {
|
||||||
{{range .Services -}}
|
{{range .Services -}}
|
||||||
{{- if in $allowlist .KebabName -}}
|
{{- if in $allowlist .KebabName -}}
|
||||||
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["{{.Singular.PascalName}}"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
29
CHANGELOG.md
29
CHANGELOG.md
|
@ -1,5 +1,34 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## [Release] Release v0.228.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Do not error if we cannot prompt for a profile in `auth login` ([#1745](https://github.com/databricks/cli/pull/1745)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
|
||||||
|
As of this release, the CLI will show a prompt if there are configuration changes that lead to DLT pipeline recreation.
|
||||||
|
Users can skip the prompt by specifying the `--auto-approve` flag.
|
||||||
|
|
||||||
|
* Pass along to Terraform process ([#1734](https://github.com/databricks/cli/pull/1734)).
|
||||||
|
* Add prompt when a pipeline recreation happens ([#1672](https://github.com/databricks/cli/pull/1672)).
|
||||||
|
* Use materialized views in the default-sql template ([#1709](https://github.com/databricks/cli/pull/1709)).
|
||||||
|
* Update templates to latest LTS DBR ([#1715](https://github.com/databricks/cli/pull/1715)).
|
||||||
|
* Make lock optional in the JSON schema ([#1738](https://github.com/databricks/cli/pull/1738)).
|
||||||
|
* Do not suppress normalisation diagnostics for resolving variables ([#1740](https://github.com/databricks/cli/pull/1740)).
|
||||||
|
* Include a permissions section in all templates ([#1713](https://github.com/databricks/cli/pull/1713)).
|
||||||
|
* Fixed complex variables are not being correctly merged from include files ([#1746](https://github.com/databricks/cli/pull/1746)).
|
||||||
|
* Fixed variable override in target with full variable syntax ([#1749](https://github.com/databricks/cli/pull/1749)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Consider serverless clusters as compatible for Python wheel tasks ([#1733](https://github.com/databricks/cli/pull/1733)).
|
||||||
|
* PythonMutator: explain missing package error ([#1736](https://github.com/databricks/cli/pull/1736)).
|
||||||
|
* Add `dyn.Time` to box a timestamp with its original string value ([#1732](https://github.com/databricks/cli/pull/1732)).
|
||||||
|
* Fix streaming of stdout, stdin, stderr in cobra test runner ([#1742](https://github.com/databricks/cli/pull/1742)).
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/Masterminds/semver/v3 from 3.2.1 to 3.3.0 ([#1741](https://github.com/databricks/cli/pull/1741)).
|
||||||
|
|
||||||
## [Release] Release v0.227.1
|
## [Release] Release v0.227.1
|
||||||
|
|
||||||
CLI:
|
CLI:
|
||||||
|
|
|
@ -33,12 +33,7 @@ func createGlobError(v dyn.Value, p dyn.Path, message string) diag.Diagnostic {
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: fmt.Sprintf("%s: %s", source, message),
|
Summary: fmt.Sprintf("%s: %s", source, message),
|
||||||
Locations: []dyn.Location{v.Location()},
|
Locations: []dyn.Location{v.Location()},
|
||||||
|
Paths: []dyn.Path{p},
|
||||||
Paths: []dyn.Path{
|
|
||||||
// Hack to clone the path. This path copy is mutable.
|
|
||||||
// To be addressed in a later PR.
|
|
||||||
p.Append(),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,5 +6,5 @@ type Deployment struct {
|
||||||
FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"`
|
FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"`
|
||||||
|
|
||||||
// Lock configures locking behavior on deployment.
|
// Lock configures locking behavior on deployment.
|
||||||
Lock Lock `json:"lock"`
|
Lock Lock `json:"lock,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,21 @@
|
||||||
package python
|
package python
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/python"
|
|
||||||
"github.com/databricks/databricks-sdk-go/logger"
|
"github.com/databricks/databricks-sdk-go/logger"
|
||||||
|
"github.com/fatih/color"
|
||||||
|
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/python"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
|
||||||
|
@ -169,7 +175,11 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
||||||
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
|
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stderrWriter := newLogWriter(ctx, "stderr: ")
|
stderrBuf := bytes.Buffer{}
|
||||||
|
stderrWriter := io.MultiWriter(
|
||||||
|
newLogWriter(ctx, "stderr: "),
|
||||||
|
&stderrBuf,
|
||||||
|
)
|
||||||
stdoutWriter := newLogWriter(ctx, "stdout: ")
|
stdoutWriter := newLogWriter(ctx, "stdout: ")
|
||||||
|
|
||||||
_, processErr := process.Background(
|
_, processErr := process.Background(
|
||||||
|
@ -197,7 +207,13 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
||||||
// process can fail without reporting errors in diagnostics file or creating it, for instance,
|
// process can fail without reporting errors in diagnostics file or creating it, for instance,
|
||||||
// venv doesn't have PyDABs library installed
|
// venv doesn't have PyDABs library installed
|
||||||
if processErr != nil {
|
if processErr != nil {
|
||||||
return dyn.InvalidValue, diag.Errorf("python mutator process failed: %sw, use --debug to enable logging", processErr)
|
diagnostic := diag.Diagnostic{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: fmt.Sprintf("python mutator process failed: %q, use --debug to enable logging", processErr),
|
||||||
|
Detail: explainProcessErr(stderrBuf.String()),
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.InvalidValue, diag.Diagnostics{diagnostic}
|
||||||
}
|
}
|
||||||
|
|
||||||
// or we can fail to read diagnostics file, that should always be created
|
// or we can fail to read diagnostics file, that should always be created
|
||||||
|
@ -205,15 +221,40 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
||||||
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
|
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := loadOutputFile(rootPath, outputPath)
|
output, outputDiags := loadOutputFile(rootPath, outputPath)
|
||||||
if err != nil {
|
pythonDiagnostics = pythonDiagnostics.Extend(outputDiags)
|
||||||
return dyn.InvalidValue, diag.Errorf("failed to load Python mutator output: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// we pass through pythonDiagnostic because it contains warnings
|
// we pass through pythonDiagnostic because it contains warnings
|
||||||
return output, pythonDiagnostics
|
return output, pythonDiagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const installExplanation = `If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||||
|
and that the wheel is installed in the Python environment:
|
||||||
|
|
||||||
|
$ .venv/bin/pip install -e .
|
||||||
|
|
||||||
|
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||||
|
or activate the environment before running CLI commands:
|
||||||
|
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
venv_path: .venv
|
||||||
|
`
|
||||||
|
|
||||||
|
// explainProcessErr provides additional explanation for common errors.
|
||||||
|
// It's meant to be the best effort, and not all errors are covered.
|
||||||
|
// Output should be used only used for error reporting.
|
||||||
|
func explainProcessErr(stderr string) string {
|
||||||
|
// implemented in cpython/Lib/runpy.py and portable across Python 3.x, including pypy
|
||||||
|
if strings.Contains(stderr, "Error while finding module specification for 'databricks.bundles.build'") {
|
||||||
|
summary := color.CyanString("Explanation: ") + "'databricks-pydabs' library is not installed in the Python environment.\n"
|
||||||
|
|
||||||
|
return stderr + "\n" + summary + "\n" + installExplanation
|
||||||
|
}
|
||||||
|
|
||||||
|
return stderr
|
||||||
|
}
|
||||||
|
|
||||||
func writeInputFile(inputPath string, input dyn.Value) error {
|
func writeInputFile(inputPath string, input dyn.Value) error {
|
||||||
// we need to marshal dyn.Value instead of bundle.Config to JSON to support
|
// we need to marshal dyn.Value instead of bundle.Config to JSON to support
|
||||||
// non-string fields assigned with bundle variables
|
// non-string fields assigned with bundle variables
|
||||||
|
@ -225,10 +266,10 @@ func writeInputFile(inputPath string, input dyn.Value) error {
|
||||||
return os.WriteFile(inputPath, rootConfigJson, 0600)
|
return os.WriteFile(inputPath, rootConfigJson, 0600)
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) {
|
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, diag.Diagnostics) {
|
||||||
outputFile, err := os.Open(outputPath)
|
outputFile, err := os.Open(outputPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to open output file: %w", err)
|
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer outputFile.Close()
|
defer outputFile.Close()
|
||||||
|
@ -243,27 +284,34 @@ func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) {
|
||||||
// for that, we pass virtualPath instead of outputPath as file location
|
// for that, we pass virtualPath instead of outputPath as file location
|
||||||
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml"))
|
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to get absolute path: %w", err)
|
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to get absolute path: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
generated, err := yamlloader.LoadYAML(virtualPath, outputFile)
|
generated, err := yamlloader.LoadYAML(virtualPath, outputFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to parse output file: %w", err)
|
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to parse output file: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
normalized, diagnostic := convert.Normalize(config.Root{}, generated)
|
return strictNormalize(config.Root{}, generated)
|
||||||
if diagnostic.Error() != nil {
|
}
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %w", diagnostic.Error())
|
|
||||||
}
|
func strictNormalize(dst any, generated dyn.Value) (dyn.Value, diag.Diagnostics) {
|
||||||
|
normalized, diags := convert.Normalize(dst, generated)
|
||||||
|
|
||||||
// warnings shouldn't happen because output should be already normalized
|
// warnings shouldn't happen because output should be already normalized
|
||||||
// when it happens, it's a bug in the mutator, and should be treated as an error
|
// when it happens, it's a bug in the mutator, and should be treated as an error
|
||||||
|
|
||||||
for _, d := range diagnostic.Filter(diag.Warning) {
|
strictDiags := diag.Diagnostics{}
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %s", d.Summary)
|
|
||||||
|
for _, d := range diags {
|
||||||
|
if d.Severity == diag.Warning {
|
||||||
|
d.Severity = diag.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
strictDiags = strictDiags.Append(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
return normalized, nil
|
return normalized, strictDiags
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadDiagnosticsFile loads diagnostics from a file.
|
// loadDiagnosticsFile loads diagnostics from a file.
|
||||||
|
|
|
@ -10,6 +10,8 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/dyn/merge"
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
@ -255,7 +257,7 @@ func TestPythonMutator_badOutput(t *testing.T) {
|
||||||
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
||||||
diag := bundle.Apply(ctx, b, mutator)
|
diag := bundle.Apply(ctx, b, mutator)
|
||||||
|
|
||||||
assert.EqualError(t, diag.Error(), "failed to load Python mutator output: failed to normalize output: unknown field: unknown_property")
|
assert.EqualError(t, diag.Error(), "unknown field: unknown_property")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonMutator_disabled(t *testing.T) {
|
func TestPythonMutator_disabled(t *testing.T) {
|
||||||
|
@ -546,6 +548,46 @@ func TestInterpreterPath(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStrictNormalize(t *testing.T) {
|
||||||
|
// NB: there is no way to trigger diag.Error, so we don't test it
|
||||||
|
|
||||||
|
type TestStruct struct {
|
||||||
|
A int `json:"a"`
|
||||||
|
}
|
||||||
|
|
||||||
|
value := dyn.NewValue(map[string]dyn.Value{"A": dyn.NewValue("abc", nil)}, nil)
|
||||||
|
|
||||||
|
_, diags := convert.Normalize(TestStruct{}, value)
|
||||||
|
_, strictDiags := strictNormalize(TestStruct{}, value)
|
||||||
|
|
||||||
|
assert.False(t, diags.HasError())
|
||||||
|
assert.True(t, strictDiags.HasError())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExplainProcessErr(t *testing.T) {
|
||||||
|
stderr := "/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')\n"
|
||||||
|
expected := `/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')
|
||||||
|
|
||||||
|
Explanation: 'databricks-pydabs' library is not installed in the Python environment.
|
||||||
|
|
||||||
|
If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||||
|
and that the wheel is installed in the Python environment:
|
||||||
|
|
||||||
|
$ .venv/bin/pip install -e .
|
||||||
|
|
||||||
|
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||||
|
or activate the environment before running CLI commands:
|
||||||
|
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
venv_path: .venv
|
||||||
|
`
|
||||||
|
|
||||||
|
out := explainProcessErr(stderr)
|
||||||
|
|
||||||
|
assert.Equal(t, expected, out)
|
||||||
|
}
|
||||||
|
|
||||||
func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context {
|
func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
ctx, stub := process.WithStub(ctx)
|
ctx, stub := process.WithStub(ctx)
|
||||||
|
|
|
@ -2,7 +2,6 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -44,11 +43,13 @@ func TestResolveClusterReference(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef1).Return(&compute.ClusterDetails{
|
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
ClusterId: "1234-5678-abcd",
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
}, nil)
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef2).Return(&compute.ClusterDetails{
|
},
|
||||||
ClusterId: "9876-5432-xywz",
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "1234-5678-abcd", ClusterName: clusterRef1},
|
||||||
|
{ClusterId: "9876-5432-xywz", ClusterName: clusterRef2},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
|
@ -78,10 +79,16 @@ func TestResolveNonExistentClusterReference(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef))
|
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "1234-5678-abcd", ClusterName: "some other cluster"},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist")
|
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: cluster named 'Random' does not exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
||||||
|
@ -158,8 +165,14 @@ func TestResolveVariableReferencesInVariableLookups(t *testing.T) {
|
||||||
m := mocks.NewMockWorkspaceClient(t)
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, "cluster-bar-dev").Return(&compute.ClusterDetails{
|
|
||||||
ClusterId: "1234-5678-abcd",
|
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "1234-5678-abcd", ClusterName: "cluster-bar-dev"},
|
||||||
|
{ClusterId: "9876-5432-xywz", ClusterName: "some other cluster"},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/dyn/convert"
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
"github.com/databricks/cli/libs/dyn/dynvar"
|
"github.com/databricks/cli/libs/dyn/dynvar"
|
||||||
"github.com/databricks/cli/libs/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type resolveVariableReferences struct {
|
type resolveVariableReferences struct {
|
||||||
|
@ -124,6 +123,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
// We rewrite it here to make the resolution logic simpler.
|
// We rewrite it here to make the resolution logic simpler.
|
||||||
varPath := dyn.NewPath(dyn.Key("var"))
|
varPath := dyn.NewPath(dyn.Key("var"))
|
||||||
|
|
||||||
|
var diags diag.Diagnostics
|
||||||
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||||
// Synthesize a copy of the root that has all fields that are present in the type
|
// Synthesize a copy of the root that has all fields that are present in the type
|
||||||
// but not set in the dynamic value set to their corresponding empty value.
|
// but not set in the dynamic value set to their corresponding empty value.
|
||||||
|
@ -180,14 +180,13 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
|
|
||||||
// Normalize the result because variable resolution may have been applied to non-string fields.
|
// Normalize the result because variable resolution may have been applied to non-string fields.
|
||||||
// For example, a variable reference may have been resolved to a integer.
|
// For example, a variable reference may have been resolved to a integer.
|
||||||
root, diags := convert.Normalize(b.Config, root)
|
root, normaliseDiags := convert.Normalize(b.Config, root)
|
||||||
for _, diag := range diags {
|
diags = diags.Extend(normaliseDiags)
|
||||||
// This occurs when a variable's resolved value is incompatible with the field's type.
|
|
||||||
// Log a warning until we have a better way to surface these diagnostics to the user.
|
|
||||||
log.Warnf(ctx, "normalization diagnostic: %s", diag.Summary)
|
|
||||||
}
|
|
||||||
return root, nil
|
return root, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return diag.FromErr(err)
|
if err != nil {
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
}
|
||||||
|
return diags
|
||||||
}
|
}
|
||||||
|
|
|
@ -406,6 +406,30 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
||||||
return r.updateWithDynamicValue(root)
|
return r.updateWithDynamicValue(root)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var variableKeywords = []string{"default", "lookup"}
|
||||||
|
|
||||||
|
// isFullVariableOverrideDef checks if the given value is a full syntax varaible override.
|
||||||
|
// A full syntax variable override is a map with only one of the following
|
||||||
|
// keys: "default", "lookup".
|
||||||
|
func isFullVariableOverrideDef(v dyn.Value) bool {
|
||||||
|
mv, ok := v.AsMap()
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if mv.Len() != 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, keyword := range variableKeywords {
|
||||||
|
if _, ok := mv.GetByString(keyword); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// rewriteShorthands performs lightweight rewriting of the configuration
|
// rewriteShorthands performs lightweight rewriting of the configuration
|
||||||
// tree where we allow users to write a shorthand and must rewrite to the full form.
|
// tree where we allow users to write a shorthand and must rewrite to the full form.
|
||||||
func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
||||||
|
@ -433,20 +457,27 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
||||||
}, variable.Locations()), nil
|
}, variable.Locations()), nil
|
||||||
|
|
||||||
case dyn.KindMap, dyn.KindSequence:
|
case dyn.KindMap, dyn.KindSequence:
|
||||||
// Check if the original definition of variable has a type field.
|
// If it's a full variable definition, leave it as is.
|
||||||
typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type")))
|
if isFullVariableOverrideDef(variable) {
|
||||||
if err != nil {
|
|
||||||
return variable, nil
|
return variable, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if typeV.MustString() == "complex" {
|
// Check if the original definition of variable has a type field.
|
||||||
|
// If it has a type field, it means the shorthand is a value of a complex type.
|
||||||
|
// Type might not be found if the variable overriden in a separate file
|
||||||
|
// and configuration is not merged yet.
|
||||||
|
typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type")))
|
||||||
|
if err == nil && typeV.MustString() == "complex" {
|
||||||
return dyn.NewValue(map[string]dyn.Value{
|
return dyn.NewValue(map[string]dyn.Value{
|
||||||
"type": typeV,
|
"type": typeV,
|
||||||
"default": variable,
|
"default": variable,
|
||||||
}, variable.Locations()), nil
|
}, variable.Locations()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return variable, nil
|
// If it's a shorthand, rewrite it to a full variable definition.
|
||||||
|
return dyn.NewValue(map[string]dyn.Value{
|
||||||
|
"default": variable,
|
||||||
|
}, variable.Locations()), nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return variable, nil
|
return variable, nil
|
||||||
|
|
|
@ -3,7 +3,6 @@ package validate
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -66,10 +65,7 @@ func (m *uniqueResourceKeys) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// dyn.Path under the hood is a slice. The code that walks the configuration
|
m.paths = append(m.paths, p)
|
||||||
// tree uses the same underlying slice to track the path as it walks
|
|
||||||
// the tree. So, we need to clone it here.
|
|
||||||
m.paths = append(m.paths, slices.Clone(p))
|
|
||||||
m.locations = append(m.locations, v.Locations()...)
|
m.locations = append(m.locations, v.Locations()...)
|
||||||
|
|
||||||
resourceMetadata[k] = m
|
resourceMetadata[k] = m
|
||||||
|
|
|
@ -220,6 +220,10 @@ type resolvers struct {
|
||||||
func allResolvers() *resolvers {
|
func allResolvers() *resolvers {
|
||||||
r := &resolvers{}
|
r := &resolvers{}
|
||||||
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Alert"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Alerts.GetByDisplayName(ctx, name)
|
entity, err := w.Alerts.GetByDisplayName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -228,6 +232,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.Id), nil
|
return fmt.Sprint(entity.Id), nil
|
||||||
}
|
}
|
||||||
r.ClusterPolicy = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.ClusterPolicy = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["ClusterPolicy"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.ClusterPolicies.GetByName(ctx, name)
|
entity, err := w.ClusterPolicies.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -236,6 +244,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.PolicyId), nil
|
return fmt.Sprint(entity.PolicyId), nil
|
||||||
}
|
}
|
||||||
r.Cluster = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Cluster = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Cluster"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Clusters.GetByClusterName(ctx, name)
|
entity, err := w.Clusters.GetByClusterName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -244,6 +256,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.ClusterId), nil
|
return fmt.Sprint(entity.ClusterId), nil
|
||||||
}
|
}
|
||||||
r.Dashboard = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Dashboard = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Dashboard"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Dashboards.GetByName(ctx, name)
|
entity, err := w.Dashboards.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -252,6 +268,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.Id), nil
|
return fmt.Sprint(entity.Id), nil
|
||||||
}
|
}
|
||||||
r.InstancePool = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.InstancePool = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["InstancePool"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.InstancePools.GetByInstancePoolName(ctx, name)
|
entity, err := w.InstancePools.GetByInstancePoolName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -260,6 +280,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.InstancePoolId), nil
|
return fmt.Sprint(entity.InstancePoolId), nil
|
||||||
}
|
}
|
||||||
r.Job = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Job = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Job"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Jobs.GetBySettingsName(ctx, name)
|
entity, err := w.Jobs.GetBySettingsName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -268,6 +292,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.JobId), nil
|
return fmt.Sprint(entity.JobId), nil
|
||||||
}
|
}
|
||||||
r.Metastore = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Metastore = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Metastore"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Metastores.GetByName(ctx, name)
|
entity, err := w.Metastores.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -276,6 +304,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.MetastoreId), nil
|
return fmt.Sprint(entity.MetastoreId), nil
|
||||||
}
|
}
|
||||||
r.Pipeline = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Pipeline = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Pipeline"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Pipelines.GetByName(ctx, name)
|
entity, err := w.Pipelines.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -284,6 +316,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.PipelineId), nil
|
return fmt.Sprint(entity.PipelineId), nil
|
||||||
}
|
}
|
||||||
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Query"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Queries.GetByDisplayName(ctx, name)
|
entity, err := w.Queries.GetByDisplayName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -292,6 +328,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.Id), nil
|
return fmt.Sprint(entity.Id), nil
|
||||||
}
|
}
|
||||||
r.ServicePrincipal = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.ServicePrincipal = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["ServicePrincipal"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.ServicePrincipals.GetByDisplayName(ctx, name)
|
entity, err := w.ServicePrincipals.GetByDisplayName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -300,6 +340,10 @@ func allResolvers() *resolvers {
|
||||||
return fmt.Sprint(entity.ApplicationId), nil
|
return fmt.Sprint(entity.ApplicationId), nil
|
||||||
}
|
}
|
||||||
r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
fn, ok := lookupOverrides["Warehouse"]
|
||||||
|
if ok {
|
||||||
|
return fn(ctx, w, name)
|
||||||
|
}
|
||||||
entity, err := w.Warehouses.GetByName(ctx, name)
|
entity, err := w.Warehouses.GetByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
package variable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
)
|
||||||
|
|
||||||
|
var lookupOverrides = map[string]resolverFunc{
|
||||||
|
"Cluster": resolveCluster,
|
||||||
|
}
|
||||||
|
|
||||||
|
// We added a custom resolver for the cluster to add filtering for the cluster source when we list all clusters.
|
||||||
|
// Without the filtering listing could take a very long time (5-10 mins) which leads to lookup timeouts.
|
||||||
|
func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||||
|
result, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{
|
||||||
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp := map[string][]compute.ClusterDetails{}
|
||||||
|
for _, v := range result {
|
||||||
|
key := v.ClusterName
|
||||||
|
tmp[key] = append(tmp[key], v)
|
||||||
|
}
|
||||||
|
alternatives, ok := tmp[name]
|
||||||
|
if !ok || len(alternatives) == 0 {
|
||||||
|
return "", fmt.Errorf("cluster named '%s' does not exist", name)
|
||||||
|
}
|
||||||
|
if len(alternatives) > 1 {
|
||||||
|
return "", fmt.Errorf("there are %d instances of clusters named '%s'", len(alternatives), name)
|
||||||
|
}
|
||||||
|
return alternatives[0].ClusterId, nil
|
||||||
|
}
|
|
@ -69,6 +69,11 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
||||||
// Remove output starting from Warning until end of output
|
// Remove output starting from Warning until end of output
|
||||||
output = output[:bytes.Index([]byte(output), []byte("Warning:"))]
|
output = output[:bytes.Index([]byte(output), []byte("Warning:"))]
|
||||||
cmdio.LogString(ctx, output)
|
cmdio.LogString(ctx, output)
|
||||||
|
|
||||||
|
if !cmdio.IsPromptSupported(ctx) {
|
||||||
|
return diag.Errorf("This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed.")
|
||||||
|
}
|
||||||
|
|
||||||
ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.")
|
ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
|
|
|
@ -111,6 +111,13 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error {
|
||||||
environ["PATH"] = path
|
environ["PATH"] = path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Include $AZURE_CONFIG_FILE in set of environment variables to pass along.
|
||||||
|
// This is set in Azure DevOps by the AzureCLI@2 task.
|
||||||
|
azureConfigFile, ok := env.Lookup(ctx, "AZURE_CONFIG_FILE")
|
||||||
|
if ok {
|
||||||
|
environ["AZURE_CONFIG_FILE"] = azureConfigFile
|
||||||
|
}
|
||||||
|
|
||||||
// Include $TF_CLI_CONFIG_FILE to override terraform provider in development.
|
// Include $TF_CLI_CONFIG_FILE to override terraform provider in development.
|
||||||
// See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration
|
// See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration
|
||||||
devConfigFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE")
|
devConfigFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE")
|
||||||
|
|
|
@ -269,19 +269,20 @@ func TestSetUserAgentExtraEnvVar(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInheritEnvVars(t *testing.T) {
|
func TestInheritEnvVars(t *testing.T) {
|
||||||
env := map[string]string{}
|
|
||||||
|
|
||||||
t.Setenv("HOME", "/home/testuser")
|
t.Setenv("HOME", "/home/testuser")
|
||||||
t.Setenv("PATH", "/foo:/bar")
|
t.Setenv("PATH", "/foo:/bar")
|
||||||
t.Setenv("TF_CLI_CONFIG_FILE", "/tmp/config.tfrc")
|
t.Setenv("TF_CLI_CONFIG_FILE", "/tmp/config.tfrc")
|
||||||
|
t.Setenv("AZURE_CONFIG_FILE", "/tmp/foo/bar")
|
||||||
|
|
||||||
err := inheritEnvVars(context.Background(), env)
|
ctx := context.Background()
|
||||||
|
env := map[string]string{}
|
||||||
require.NoError(t, err)
|
err := inheritEnvVars(ctx, env)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
require.Equal(t, env["HOME"], "/home/testuser")
|
assert.Equal(t, "/home/testuser", env["HOME"])
|
||||||
require.Equal(t, env["PATH"], "/foo:/bar")
|
assert.Equal(t, "/foo:/bar", env["PATH"])
|
||||||
require.Equal(t, env["TF_CLI_CONFIG_FILE"], "/tmp/config.tfrc")
|
assert.Equal(t, "/tmp/config.tfrc", env["TF_CLI_CONFIG_FILE"])
|
||||||
|
assert.Equal(t, "/tmp/foo/bar", env["AZURE_CONFIG_FILE"])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetUserProfileFromInheritEnvVars(t *testing.T) {
|
func TestSetUserProfileFromInheritEnvVars(t *testing.T) {
|
||||||
|
|
|
@ -16,12 +16,10 @@ type expand struct {
|
||||||
|
|
||||||
func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic {
|
func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic {
|
||||||
return diag.Diagnostic{
|
return diag.Diagnostic{
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: message,
|
Summary: message,
|
||||||
Paths: []dyn.Path{
|
|
||||||
p.Append(),
|
|
||||||
},
|
|
||||||
Locations: l,
|
Locations: l,
|
||||||
|
Paths: []dyn.Path{p},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +39,7 @@ func getLibDetails(v dyn.Value) (string, string, bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func findMatches(b *bundle.Bundle, path string) ([]string, error) {
|
func findMatches(b *bundle.Bundle, path string) ([]string, error) {
|
||||||
matches, err := filepath.Glob(filepath.Join(b.RootPath, path))
|
matches, err := filepath.Glob(filepath.Join(b.SyncRootPath, path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -54,10 +52,10 @@ func findMatches(b *bundle.Bundle, path string) ([]string, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We make the matched path relative to the root path before storing it
|
// We make the matched path relative to the sync root path before storing it
|
||||||
// to allow upload mutator to distinguish between local and remote paths
|
// to allow upload mutator to distinguish between local and remote paths
|
||||||
for i, match := range matches {
|
for i, match := range matches {
|
||||||
matches[i], err = filepath.Rel(b.RootPath, match)
|
matches[i], err = filepath.Rel(b.SyncRootPath, match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -213,8 +211,8 @@ func (e *expand) Name() string {
|
||||||
|
|
||||||
// ExpandGlobReferences expands any glob references in the libraries or environments section
|
// ExpandGlobReferences expands any glob references in the libraries or environments section
|
||||||
// to corresponding local paths.
|
// to corresponding local paths.
|
||||||
// We only expand local paths (i.e. paths that are relative to the root path).
|
// We only expand local paths (i.e. paths that are relative to the sync root path).
|
||||||
// After expanding we make the paths relative to the root path to allow upload mutator later in the chain to
|
// After expanding we make the paths relative to the sync root path to allow upload mutator later in the chain to
|
||||||
// distinguish between local and remote paths.
|
// distinguish between local and remote paths.
|
||||||
func ExpandGlobReferences() bundle.Mutator {
|
func ExpandGlobReferences() bundle.Mutator {
|
||||||
return &expand{}
|
return &expand{}
|
||||||
|
|
|
@ -23,7 +23,7 @@ func TestGlobReferencesExpandedForTaskLibraries(t *testing.T) {
|
||||||
testutil.Touch(t, dir, "jar", "my2.jar")
|
testutil.Touch(t, dir, "jar", "my2.jar")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -104,7 +104,7 @@ func TestGlobReferencesExpandedForForeachTaskLibraries(t *testing.T) {
|
||||||
testutil.Touch(t, dir, "jar", "my2.jar")
|
testutil.Touch(t, dir, "jar", "my2.jar")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -189,7 +189,7 @@ func TestGlobReferencesExpandedForEnvironmentsDeps(t *testing.T) {
|
||||||
testutil.Touch(t, dir, "jar", "my2.jar")
|
testutil.Touch(t, dir, "jar", "my2.jar")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
SyncRootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
|
|
@ -18,7 +18,7 @@ func TestValidateEnvironments(t *testing.T) {
|
||||||
testutil.Touch(t, tmpDir, "wheel.whl")
|
testutil.Touch(t, tmpDir, "wheel.whl")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
SyncRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -50,7 +50,7 @@ func TestValidateEnvironmentsNoFile(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
SyncRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -84,7 +84,7 @@ func TestValidateTaskLibraries(t *testing.T) {
|
||||||
testutil.Touch(t, tmpDir, "wheel.whl")
|
testutil.Touch(t, tmpDir, "wheel.whl")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
SyncRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -117,7 +117,7 @@ func TestValidateTaskLibrariesNoFile(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
SyncRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
|
|
@ -74,9 +74,9 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
source = filepath.Join(b.RootPath, source)
|
source = filepath.Join(b.SyncRootPath, source)
|
||||||
libs[source] = append(libs[source], configLocation{
|
libs[source] = append(libs[source], configLocation{
|
||||||
configPath: p.Append(), // Hack to get the copy of path
|
configPath: p,
|
||||||
location: v.Location(),
|
location: v.Location(),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ func TestArtifactUploadForWorkspace(t *testing.T) {
|
||||||
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
SyncRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
ArtifactPath: "/foo/bar/artifacts",
|
ArtifactPath: "/foo/bar/artifacts",
|
||||||
|
@ -112,7 +112,7 @@ func TestArtifactUploadForVolumes(t *testing.T) {
|
||||||
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
SyncRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
ArtifactPath: "/Volumes/foo/bar/artifacts",
|
ArtifactPath: "/Volumes/foo/bar/artifacts",
|
||||||
|
@ -200,7 +200,7 @@ func TestArtifactUploadWithNoLibraryReference(t *testing.T) {
|
||||||
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
SyncRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
ArtifactPath: "/Workspace/foo/bar/artifacts",
|
ArtifactPath: "/Workspace/foo/bar/artifacts",
|
||||||
|
@ -240,7 +240,7 @@ func TestUploadMultipleLibraries(t *testing.T) {
|
||||||
testutil.Touch(t, whlFolder, "source4.whl")
|
testutil.Touch(t, whlFolder, "source4.whl")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
SyncRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
ArtifactPath: "/foo/bar/artifacts",
|
ArtifactPath: "/foo/bar/artifacts",
|
||||||
|
|
|
@ -19,9 +19,38 @@ import (
|
||||||
"github.com/databricks/cli/bundle/scripts"
|
"github.com/databricks/cli/bundle/scripts"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
terraformlib "github.com/databricks/cli/libs/terraform"
|
terraformlib "github.com/databricks/cli/libs/terraform"
|
||||||
|
tfjson "github.com/hashicorp/terraform-json"
|
||||||
)
|
)
|
||||||
|
|
||||||
func approvalForUcSchemaDelete(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
func parseTerraformActions(changes []*tfjson.ResourceChange, toInclude func(typ string, actions tfjson.Actions) bool) []terraformlib.Action {
|
||||||
|
res := make([]terraformlib.Action, 0)
|
||||||
|
for _, rc := range changes {
|
||||||
|
if !toInclude(rc.Type, rc.Change.Actions) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var actionType terraformlib.ActionType
|
||||||
|
switch {
|
||||||
|
case rc.Change.Actions.Delete():
|
||||||
|
actionType = terraformlib.ActionTypeDelete
|
||||||
|
case rc.Change.Actions.Replace():
|
||||||
|
actionType = terraformlib.ActionTypeRecreate
|
||||||
|
default:
|
||||||
|
// No use case for other action types yet.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
res = append(res, terraformlib.Action{
|
||||||
|
Action: actionType,
|
||||||
|
ResourceType: rc.Type,
|
||||||
|
ResourceName: rc.Name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return false, fmt.Errorf("terraform not initialized")
|
return false, fmt.Errorf("terraform not initialized")
|
||||||
|
@ -33,41 +62,52 @@ func approvalForUcSchemaDelete(ctx context.Context, b *bundle.Bundle) (bool, err
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
actions := make([]terraformlib.Action, 0)
|
schemaActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool {
|
||||||
for _, rc := range plan.ResourceChanges {
|
// Filter in only UC schema resources.
|
||||||
// We only care about destructive actions on UC schema resources.
|
if typ != "databricks_schema" {
|
||||||
if rc.Type != "databricks_schema" {
|
return false
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var actionType terraformlib.ActionType
|
// We only display prompts for destructive actions like deleting or
|
||||||
|
// recreating a schema.
|
||||||
|
return actions.Delete() || actions.Replace()
|
||||||
|
})
|
||||||
|
|
||||||
switch {
|
dltActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool {
|
||||||
case rc.Change.Actions.Delete():
|
// Filter in only DLT pipeline resources.
|
||||||
actionType = terraformlib.ActionTypeDelete
|
if typ != "databricks_pipeline" {
|
||||||
case rc.Change.Actions.Replace():
|
return false
|
||||||
actionType = terraformlib.ActionTypeRecreate
|
|
||||||
default:
|
|
||||||
// We don't need a prompt for non-destructive actions like creating
|
|
||||||
// or updating a schema.
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
actions = append(actions, terraformlib.Action{
|
// Recreating DLT pipeline leads to metadata loss and for a transient period
|
||||||
Action: actionType,
|
// the underling tables will be unavailable.
|
||||||
ResourceType: rc.Type,
|
return actions.Replace() || actions.Delete()
|
||||||
ResourceName: rc.Name,
|
})
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// No restricted actions planned. No need for approval.
|
// We don't need to display any prompts in this case.
|
||||||
if len(actions) == 0 {
|
if len(dltActions) == 0 && len(schemaActions) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "The following UC schemas will be deleted or recreated. Any underlying data may be lost:")
|
// One or more UC schema resources will be deleted or recreated.
|
||||||
for _, action := range actions {
|
if len(schemaActions) != 0 {
|
||||||
cmdio.Log(ctx, action)
|
cmdio.LogString(ctx, "The following UC schemas will be deleted or recreated. Any underlying data may be lost:")
|
||||||
|
for _, action := range schemaActions {
|
||||||
|
cmdio.Log(ctx, action)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// One or more DLT pipelines is being recreated.
|
||||||
|
if len(dltActions) != 0 {
|
||||||
|
msg := `
|
||||||
|
This action will result in the deletion or recreation of the following DLT Pipelines along with the
|
||||||
|
Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the Pipelines will
|
||||||
|
restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline
|
||||||
|
properties such as the 'catalog' or 'storage' are changed:`
|
||||||
|
cmdio.LogString(ctx, msg)
|
||||||
|
for _, action := range dltActions {
|
||||||
|
cmdio.Log(ctx, action)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.AutoApprove {
|
if b.AutoApprove {
|
||||||
|
@ -126,7 +166,7 @@ func Deploy() bundle.Mutator {
|
||||||
terraform.CheckRunningResource(),
|
terraform.CheckRunningResource(),
|
||||||
terraform.Plan(terraform.PlanGoal("deploy")),
|
terraform.Plan(terraform.PlanGoal("deploy")),
|
||||||
bundle.If(
|
bundle.If(
|
||||||
approvalForUcSchemaDelete,
|
approvalForDeploy,
|
||||||
deployCore,
|
deployCore,
|
||||||
bundle.LogString("Deployment cancelled!"),
|
bundle.LogString("Deployment cancelled!"),
|
||||||
),
|
),
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
package phases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
terraformlib "github.com/databricks/cli/libs/terraform"
|
||||||
|
tfjson "github.com/hashicorp/terraform-json"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseTerraformActions(t *testing.T) {
|
||||||
|
changes := []*tfjson.ResourceChange{
|
||||||
|
{
|
||||||
|
Type: "databricks_pipeline",
|
||||||
|
Change: &tfjson.Change{
|
||||||
|
Actions: tfjson.Actions{tfjson.ActionCreate},
|
||||||
|
},
|
||||||
|
Name: "create pipeline",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: "databricks_pipeline",
|
||||||
|
Change: &tfjson.Change{
|
||||||
|
Actions: tfjson.Actions{tfjson.ActionDelete},
|
||||||
|
},
|
||||||
|
Name: "delete pipeline",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: "databricks_pipeline",
|
||||||
|
Change: &tfjson.Change{
|
||||||
|
Actions: tfjson.Actions{tfjson.ActionDelete, tfjson.ActionCreate},
|
||||||
|
},
|
||||||
|
Name: "recreate pipeline",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: "databricks_whatever",
|
||||||
|
Change: &tfjson.Change{
|
||||||
|
Actions: tfjson.Actions{tfjson.ActionDelete, tfjson.ActionCreate},
|
||||||
|
},
|
||||||
|
Name: "recreate whatever",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
res := parseTerraformActions(changes, func(typ string, actions tfjson.Actions) bool {
|
||||||
|
if typ != "databricks_pipeline" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if actions.Delete() || actions.Replace() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, []terraformlib.Action{
|
||||||
|
{
|
||||||
|
Action: terraformlib.ActionTypeDelete,
|
||||||
|
ResourceType: "databricks_pipeline",
|
||||||
|
ResourceName: "delete pipeline",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: terraformlib.ActionTypeRecreate,
|
||||||
|
ResourceType: "databricks_pipeline",
|
||||||
|
ResourceName: "recreate pipeline",
|
||||||
|
},
|
||||||
|
}, res)
|
||||||
|
}
|
|
@ -68,3 +68,23 @@ func TestComplexVariablesOverride(t *testing.T) {
|
||||||
require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"])
|
require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"])
|
||||||
require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId)
|
require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestComplexVariablesOverrideWithMultipleFiles(t *testing.T) {
|
||||||
|
b, diags := loadTargetWithDiags("variables/complex_multiple_files", "dev")
|
||||||
|
require.Empty(t, diags)
|
||||||
|
|
||||||
|
diags = bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
|
mutator.SetVariables(),
|
||||||
|
mutator.ResolveVariableReferencesInComplexVariables(),
|
||||||
|
mutator.ResolveVariableReferences(
|
||||||
|
"variables",
|
||||||
|
),
|
||||||
|
))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
for _, cluster := range b.Config.Resources.Jobs["my_job"].JobClusters {
|
||||||
|
require.Equalf(t, "14.2.x-scala2.11", cluster.NewCluster.SparkVersion, "cluster: %v", cluster.JobClusterKey)
|
||||||
|
require.Equalf(t, "Standard_DS3_v2", cluster.NewCluster.NodeTypeId, "cluster: %v", cluster.JobClusterKey)
|
||||||
|
require.Equalf(t, 4, cluster.NewCluster.NumWorkers, "cluster: %v", cluster.JobClusterKey)
|
||||||
|
require.Equalf(t, "false", cluster.NewCluster.SparkConf["spark.speculation"], "cluster: %v", cluster.JobClusterKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
bundle:
|
bundle:
|
||||||
name: python-wheel-local
|
name: python-wheel-local
|
||||||
|
|
||||||
|
workspace:
|
||||||
|
artifact_path: /foo/bar
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
jobs:
|
jobs:
|
||||||
test_job:
|
test_job:
|
||||||
|
|
|
@ -15,11 +15,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPythonWheelBuild(t *testing.T) {
|
func TestPythonWheelBuild(t *testing.T) {
|
||||||
ctx := context.Background()
|
b := loadTarget(t, "./python_wheel/python_wheel", "default")
|
||||||
b, err := bundle.Load(ctx, "./python_wheel/python_wheel")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, phases.Build())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
matches, err := filepath.Glob("./python_wheel/python_wheel/my_test_code/dist/my_test_code-*.whl")
|
matches, err := filepath.Glob("./python_wheel/python_wheel/my_test_code/dist/my_test_code-*.whl")
|
||||||
|
@ -32,11 +31,10 @@ func TestPythonWheelBuild(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonWheelBuildAutoDetect(t *testing.T) {
|
func TestPythonWheelBuildAutoDetect(t *testing.T) {
|
||||||
ctx := context.Background()
|
b := loadTarget(t, "./python_wheel/python_wheel_no_artifact", "default")
|
||||||
b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, phases.Build())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact/dist/my_test_code-*.whl")
|
matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact/dist/my_test_code-*.whl")
|
||||||
|
@ -49,11 +47,10 @@ func TestPythonWheelBuildAutoDetect(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonWheelBuildAutoDetectWithNotebookTask(t *testing.T) {
|
func TestPythonWheelBuildAutoDetectWithNotebookTask(t *testing.T) {
|
||||||
ctx := context.Background()
|
b := loadTarget(t, "./python_wheel/python_wheel_no_artifact_notebook", "default")
|
||||||
b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact_notebook")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, phases.Build())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact_notebook/dist/my_test_code-*.whl")
|
matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact_notebook/dist/my_test_code-*.whl")
|
||||||
|
@ -66,11 +63,10 @@ func TestPythonWheelBuildAutoDetectWithNotebookTask(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonWheelWithDBFSLib(t *testing.T) {
|
func TestPythonWheelWithDBFSLib(t *testing.T) {
|
||||||
ctx := context.Background()
|
b := loadTarget(t, "./python_wheel/python_wheel_dbfs_lib", "default")
|
||||||
b, err := bundle.Load(ctx, "./python_wheel/python_wheel_dbfs_lib")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, phases.Build())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
match := libraries.ExpandGlobReferences()
|
match := libraries.ExpandGlobReferences()
|
||||||
|
@ -79,11 +75,11 @@ func TestPythonWheelWithDBFSLib(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) {
|
func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) {
|
||||||
ctx := context.Background()
|
b := loadTarget(t, "./python_wheel/python_wheel_no_artifact_no_setup", "default")
|
||||||
b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact_no_setup")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
b.Config.Workspace.ArtifactPath = "/foo/bar"
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, phases.Build())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
mockFiler := mockfiler.NewMockFiler(t)
|
mockFiler := mockfiler.NewMockFiler(t)
|
||||||
mockFiler.EXPECT().Write(
|
mockFiler.EXPECT().Write(
|
||||||
|
@ -94,20 +90,20 @@ func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) {
|
||||||
filer.CreateParentDirectories,
|
filer.CreateParentDirectories,
|
||||||
).Return(nil)
|
).Return(nil)
|
||||||
|
|
||||||
u := libraries.UploadWithClient(mockFiler)
|
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build(), libraries.ExpandGlobReferences(), u))
|
libraries.ExpandGlobReferences(),
|
||||||
|
libraries.UploadWithClient(mockFiler),
|
||||||
|
))
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
require.Empty(t, diags)
|
require.Empty(t, diags)
|
||||||
|
|
||||||
require.Equal(t, "/Workspace/foo/bar/.internal/my_test_code-0.0.1-py3-none-any.whl", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[0].Libraries[0].Whl)
|
require.Equal(t, "/Workspace/foo/bar/.internal/my_test_code-0.0.1-py3-none-any.whl", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[0].Libraries[0].Whl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) {
|
func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) {
|
||||||
ctx := context.Background()
|
b := loadTarget(t, "./python_wheel/environment_key", "default")
|
||||||
b, err := bundle.Load(ctx, "./python_wheel/environment_key")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, phases.Build())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
matches, err := filepath.Glob("./python_wheel/environment_key/my_test_code/dist/my_test_code-*.whl")
|
matches, err := filepath.Glob("./python_wheel/environment_key/my_test_code/dist/my_test_code-*.whl")
|
||||||
|
@ -120,11 +116,10 @@ func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonWheelBuildMultiple(t *testing.T) {
|
func TestPythonWheelBuildMultiple(t *testing.T) {
|
||||||
ctx := context.Background()
|
b := loadTarget(t, "./python_wheel/python_wheel_multiple", "default")
|
||||||
b, err := bundle.Load(ctx, "./python_wheel/python_wheel_multiple")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, phases.Build())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
matches, err := filepath.Glob("./python_wheel/python_wheel_multiple/my_test_code/dist/my_test_code*.whl")
|
matches, err := filepath.Glob("./python_wheel/python_wheel_multiple/my_test_code/dist/my_test_code*.whl")
|
||||||
|
@ -137,11 +132,10 @@ func TestPythonWheelBuildMultiple(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonWheelNoBuild(t *testing.T) {
|
func TestPythonWheelNoBuild(t *testing.T) {
|
||||||
ctx := context.Background()
|
b := loadTarget(t, "./python_wheel/python_wheel_no_build", "default")
|
||||||
b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_build")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build()))
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, phases.Build())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
match := libraries.ExpandGlobReferences()
|
match := libraries.ExpandGlobReferences()
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
bundle:
|
||||||
|
name: complex-variables-multiple-files
|
||||||
|
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
my_job:
|
||||||
|
job_clusters:
|
||||||
|
- job_cluster_key: key1
|
||||||
|
new_cluster: ${var.cluster1}
|
||||||
|
- job_cluster_key: key2
|
||||||
|
new_cluster: ${var.cluster2}
|
||||||
|
- job_cluster_key: key3
|
||||||
|
new_cluster: ${var.cluster3}
|
||||||
|
- job_cluster_key: key4
|
||||||
|
new_cluster: ${var.cluster4}
|
||||||
|
variables:
|
||||||
|
cluster1:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
cluster2:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
cluster3:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
cluster4:
|
||||||
|
type: complex
|
||||||
|
description: "A cluster definition"
|
||||||
|
|
||||||
|
include:
|
||||||
|
- ./variables/*.yml
|
||||||
|
|
||||||
|
|
||||||
|
targets:
|
||||||
|
default:
|
||||||
|
dev:
|
||||||
|
variables:
|
||||||
|
cluster3:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
||||||
|
cluster4:
|
||||||
|
default:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
|
@ -0,0 +1,19 @@
|
||||||
|
targets:
|
||||||
|
default:
|
||||||
|
dev:
|
||||||
|
variables:
|
||||||
|
cluster1:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
||||||
|
cluster2:
|
||||||
|
default:
|
||||||
|
spark_version: "14.2.x-scala2.11"
|
||||||
|
node_type_id: "Standard_DS3_v2"
|
||||||
|
num_workers: 4
|
||||||
|
spark_conf:
|
||||||
|
spark.speculation: false
|
||||||
|
spark.databricks.delta.retentionDurationCheck.enabled: false
|
|
@ -124,8 +124,13 @@ func TestVariablesWithTargetLookupOverrides(t *testing.T) {
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
clustersApi := mockWorkspaceClient.GetMockClustersAPI()
|
clustersApi := mockWorkspaceClient.GetMockClustersAPI()
|
||||||
clustersApi.EXPECT().GetByClusterName(mock.Anything, "some-test-cluster").Return(&compute.ClusterDetails{
|
clustersApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
|
||||||
ClusterId: "4321",
|
FilterBy: &compute.ListClustersFilterBy{
|
||||||
|
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
|
||||||
|
},
|
||||||
|
}).Return([]compute.ClusterDetails{
|
||||||
|
{ClusterId: "4321", ClusterName: "some-test-cluster"},
|
||||||
|
{ClusterId: "9876", ClusterName: "some-other-cluster"},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI()
|
clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI()
|
||||||
|
|
|
@ -19,7 +19,7 @@ import (
|
||||||
|
|
||||||
func promptForProfile(ctx context.Context, defaultValue string) (string, error) {
|
func promptForProfile(ctx context.Context, defaultValue string) (string, error) {
|
||||||
if !cmdio.IsInTTY(ctx) {
|
if !cmdio.IsInTTY(ctx) {
|
||||||
return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify a profile using --profile")
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
prompt := cmdio.Prompt(ctx)
|
prompt := cmdio.Prompt(ctx)
|
||||||
|
|
|
@ -29,6 +29,12 @@ func (f *progressLoggerFlag) resolveModeDefault(format flags.ProgressLogFormat)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Context, error) {
|
func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Context, error) {
|
||||||
|
// No need to initialize the logger if it's already set in the context. This
|
||||||
|
// happens in unit tests where the logger is setup as a fixture.
|
||||||
|
if _, ok := cmdio.FromContext(ctx); ok {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
|
||||||
if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" &&
|
if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" &&
|
||||||
f.ProgressLogFormat == flags.ModeInplace {
|
f.ProgressLogFormat == flags.ModeInplace {
|
||||||
return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr")
|
return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr")
|
||||||
|
|
10
go.mod
10
go.mod
|
@ -3,7 +3,7 @@ module github.com/databricks/cli
|
||||||
go 1.22
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
github.com/Masterminds/semver/v3 v3.3.0 // MIT
|
||||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||||
github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0
|
github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0
|
||||||
github.com/fatih/color v1.17.0 // MIT
|
github.com/fatih/color v1.17.0 // MIT
|
||||||
|
@ -23,10 +23,10 @@ require (
|
||||||
github.com/stretchr/testify v1.9.0 // MIT
|
github.com/stretchr/testify v1.9.0 // MIT
|
||||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||||
golang.org/x/mod v0.20.0
|
golang.org/x/mod v0.20.0
|
||||||
golang.org/x/oauth2 v0.22.0
|
golang.org/x/oauth2 v0.23.0
|
||||||
golang.org/x/sync v0.8.0
|
golang.org/x/sync v0.8.0
|
||||||
golang.org/x/term v0.23.0
|
golang.org/x/term v0.24.0
|
||||||
golang.org/x/text v0.17.0
|
golang.org/x/text v0.18.0
|
||||||
gopkg.in/ini.v1 v1.67.0 // Apache 2.0
|
gopkg.in/ini.v1 v1.67.0 // Apache 2.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
@ -61,7 +61,7 @@ require (
|
||||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||||
golang.org/x/crypto v0.24.0 // indirect
|
golang.org/x/crypto v0.24.0 // indirect
|
||||||
golang.org/x/net v0.26.0 // indirect
|
golang.org/x/net v0.26.0 // indirect
|
||||||
golang.org/x/sys v0.23.0 // indirect
|
golang.org/x/sys v0.25.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
google.golang.org/api v0.182.0 // indirect
|
google.golang.org/api v0.182.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect
|
||||||
|
|
|
@ -8,8 +8,8 @@ cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h
|
||||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
|
||||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg=
|
github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg=
|
||||||
|
@ -191,8 +191,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
|
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||||
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
@ -208,14 +208,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
|
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||||
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
|
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
|
||||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -101,12 +102,15 @@ func TestAccAbortBind(t *testing.T) {
|
||||||
destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Bind should fail because prompting is not possible.
|
||||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
|
t.Setenv("TERM", "dumb")
|
||||||
c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
||||||
|
|
||||||
// Simulate user aborting the bind. This is done by not providing any input to the prompt in non-interactive mode.
|
// Expect error suggesting to use --auto-approve
|
||||||
_, _, err = c.Run()
|
_, _, err = c.Run()
|
||||||
require.ErrorContains(t, err, "failed to bind the resource")
|
assert.ErrorContains(t, err, "failed to bind the resource")
|
||||||
|
assert.ErrorContains(t, err, "This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
err = deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
{
|
||||||
|
"properties": {
|
||||||
|
"unique_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Unique ID for the schema and pipeline names"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
bundle:
|
||||||
|
name: "bundle-playground"
|
||||||
|
|
||||||
|
variables:
|
||||||
|
catalog:
|
||||||
|
description: The catalog the DLT pipeline should use.
|
||||||
|
default: main
|
||||||
|
|
||||||
|
|
||||||
|
resources:
|
||||||
|
pipelines:
|
||||||
|
foo:
|
||||||
|
name: test-pipeline-{{.unique_id}}
|
||||||
|
libraries:
|
||||||
|
- notebook:
|
||||||
|
path: ./nb.sql
|
||||||
|
development: true
|
||||||
|
catalog: ${var.catalog}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- "*.yml"
|
||||||
|
|
||||||
|
targets:
|
||||||
|
development:
|
||||||
|
default: true
|
|
@ -0,0 +1,2 @@
|
||||||
|
-- Databricks notebook source
|
||||||
|
select 1
|
|
@ -120,8 +120,97 @@ func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) {
|
||||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
t.Setenv("TERM", "dumb")
|
t.Setenv("TERM", "dumb")
|
||||||
c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock")
|
c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock")
|
||||||
stdout, _, err := c.Run()
|
stdout, stderr, err := c.Run()
|
||||||
|
|
||||||
assert.EqualError(t, err, root.ErrAlreadyPrinted.Error())
|
assert.EqualError(t, err, root.ErrAlreadyPrinted.Error())
|
||||||
|
assert.Contains(t, stderr.String(), "The following UC schemas will be deleted or recreated. Any underlying data may be lost:\n delete schema bar")
|
||||||
|
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccBundlePipelineDeleteWithoutAutoApprove(t *testing.T) {
|
||||||
|
ctx, wt := acc.WorkspaceTest(t)
|
||||||
|
w := wt.W
|
||||||
|
|
||||||
|
nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV"))
|
||||||
|
uniqueId := uuid.New().String()
|
||||||
|
bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{
|
||||||
|
"unique_id": uniqueId,
|
||||||
|
"node_type_id": nodeTypeId,
|
||||||
|
"spark_version": defaultSparkVersion,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// deploy pipeline
|
||||||
|
err = deployBundle(t, ctx, bundleRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// assert pipeline is created
|
||||||
|
pipelineName := "test-bundle-pipeline-" + uniqueId
|
||||||
|
pipeline, err := w.Pipelines.GetByName(ctx, pipelineName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, pipeline.Name, pipelineName)
|
||||||
|
|
||||||
|
// assert job is created
|
||||||
|
jobName := "test-bundle-job-" + uniqueId
|
||||||
|
job, err := w.Jobs.GetBySettingsName(ctx, jobName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, job.Settings.Name, jobName)
|
||||||
|
|
||||||
|
// delete resources.yml
|
||||||
|
err = os.Remove(filepath.Join(bundleRoot, "resources.yml"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Redeploy the bundle. Expect it to fail because deleting the pipeline requires --auto-approve.
|
||||||
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
|
t.Setenv("TERM", "dumb")
|
||||||
|
c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock")
|
||||||
|
stdout, stderr, err := c.Run()
|
||||||
|
|
||||||
|
assert.EqualError(t, err, root.ErrAlreadyPrinted.Error())
|
||||||
|
assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following DLT Pipelines along with the
|
||||||
|
Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the Pipelines will
|
||||||
|
restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline
|
||||||
|
properties such as the 'catalog' or 'storage' are changed:
|
||||||
|
delete pipeline bar`)
|
||||||
|
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) {
|
||||||
|
ctx, wt := acc.UcWorkspaceTest(t)
|
||||||
|
w := wt.W
|
||||||
|
uniqueId := uuid.New().String()
|
||||||
|
|
||||||
|
bundleRoot, err := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{
|
||||||
|
"unique_id": uniqueId,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = deployBundle(t, ctx, bundleRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Assert the pipeline is created
|
||||||
|
pipelineName := "test-pipeline-" + uniqueId
|
||||||
|
pipeline, err := w.Pipelines.GetByName(ctx, pipelineName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, pipelineName, pipeline.Name)
|
||||||
|
|
||||||
|
// Redeploy the bundle, pointing the DLT pipeline to a different UC catalog.
|
||||||
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
|
t.Setenv("TERM", "dumb")
|
||||||
|
c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"")
|
||||||
|
stdout, stderr, err := c.Run()
|
||||||
|
|
||||||
|
assert.EqualError(t, err, root.ErrAlreadyPrinted.Error())
|
||||||
|
assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following DLT Pipelines along with the
|
||||||
|
Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the Pipelines will
|
||||||
|
restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline
|
||||||
|
properties such as the 'catalog' or 'storage' are changed:
|
||||||
|
recreate pipeline foo`)
|
||||||
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ type visitOptions struct {
|
||||||
|
|
||||||
func visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) {
|
func visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) {
|
||||||
if len(suffix) == 0 {
|
if len(suffix) == 0 {
|
||||||
return opts.fn(prefix, v)
|
return opts.fn(slices.Clone(prefix), v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize prefix if it is empty.
|
// Initialize prefix if it is empty.
|
||||||
|
|
|
@ -21,7 +21,7 @@ func Foreach(fn MapFunc) MapFunc {
|
||||||
for _, pair := range m.Pairs() {
|
for _, pair := range m.Pairs() {
|
||||||
pk := pair.Key
|
pk := pair.Key
|
||||||
pv := pair.Value
|
pv := pair.Value
|
||||||
nv, err := fn(append(p, Key(pk.MustString())), pv)
|
nv, err := fn(p.Append(Key(pk.MustString())), pv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return InvalidValue, err
|
return InvalidValue, err
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@ func Foreach(fn MapFunc) MapFunc {
|
||||||
s := slices.Clone(v.MustSequence())
|
s := slices.Clone(v.MustSequence())
|
||||||
for i, value := range s {
|
for i, value := range s {
|
||||||
var err error
|
var err error
|
||||||
s[i], err = fn(append(p, Index(i)), value)
|
s[i], err = fn(p.Append(Index(i)), value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return InvalidValue, err
|
return InvalidValue, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
package dyn_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestVisitCallbackPathCopy(t *testing.T) {
|
||||||
|
vin := dyn.V(map[string]dyn.Value{
|
||||||
|
"foo": dyn.V(42),
|
||||||
|
"bar": dyn.V(43),
|
||||||
|
})
|
||||||
|
|
||||||
|
var paths []dyn.Path
|
||||||
|
|
||||||
|
// The callback should receive a copy of the path.
|
||||||
|
// If the same underlying value is used, all collected paths will be the same.
|
||||||
|
// This test uses `MapByPattern` to collect all paths in the map.
|
||||||
|
// Visit itself doesn't have public functions and we exclusively use black-box testing for this package.
|
||||||
|
_, _ = dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyKey()), func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
paths = append(paths, p)
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Verify that the paths retained their original values.
|
||||||
|
var strings []string
|
||||||
|
for _, p := range paths {
|
||||||
|
strings = append(strings, p.String())
|
||||||
|
}
|
||||||
|
assert.ElementsMatch(t, strings, []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,7 +1,7 @@
|
||||||
{{define "latest_lts_dbr_version" -}}
|
{{define "latest_lts_dbr_version" -}}
|
||||||
13.3.x-scala2.12
|
15.4.x-scala2.12
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
||||||
{{define "latest_lts_db_connect_version_spec" -}}
|
{{define "latest_lts_db_connect_version_spec" -}}
|
||||||
>=13.3,<13.4
|
>=15.4,<15.5
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
|
@ -12,8 +12,10 @@ include:
|
||||||
targets:
|
targets:
|
||||||
dev:
|
dev:
|
||||||
default: true
|
default: true
|
||||||
# We use 'mode: development' to indicate this is a personal development copy.
|
# The default target uses 'mode: development' to create a development copy.
|
||||||
# Any job schedules and triggers are paused by default.
|
# - Deployed resources get prefixed with '[dev my_user_name]'
|
||||||
|
# - Any job schedules and triggers are paused by default.
|
||||||
|
# See also https://docs.databricks.com/dev-tools/bundles/deployment-modes.html.
|
||||||
mode: development
|
mode: development
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
|
@ -22,11 +24,10 @@ targets:
|
||||||
mode: production
|
mode: production
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
# We always use /Users/{{user_name}} for all resources to make sure we only have a single copy.
|
# We explicitly specify /Users/{{user_name}} to make sure we only have a single copy.
|
||||||
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
||||||
{{- if not is_service_principal}}
|
permissions:
|
||||||
|
- {{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
|
level: CAN_MANAGE
|
||||||
run_as:
|
run_as:
|
||||||
# This runs as {{user_name}} in production. We could also use a service principal here
|
{{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
# using service_principal_name (see the Databricks documentation).
|
|
||||||
user_name: {{user_name}}
|
|
||||||
{{- end}}
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{{define "latest_lts_dbr_version" -}}
|
{{define "latest_lts_dbr_version" -}}
|
||||||
13.3.x-scala2.12
|
15.4.x-scala2.12
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
||||||
{{define "latest_lts_db_connect_version_spec" -}}
|
{{define "latest_lts_db_connect_version_spec" -}}
|
||||||
>=13.3,<13.4
|
>=15.4,<15.5
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
|
@ -7,44 +7,24 @@ include:
|
||||||
- resources/*.yml
|
- resources/*.yml
|
||||||
|
|
||||||
targets:
|
targets:
|
||||||
# The 'dev' target, for development purposes. This target is the default.
|
|
||||||
dev:
|
dev:
|
||||||
# We use 'mode: development' to indicate this is a personal development copy:
|
# The default target uses 'mode: development' to create a development copy.
|
||||||
# - Deployed resources get prefixed with '[dev my_user_name]'
|
# - Deployed resources get prefixed with '[dev my_user_name]'
|
||||||
# - Any job schedules and triggers are paused by default
|
# - Any job schedules and triggers are paused by default.
|
||||||
# - The 'development' mode is used for Delta Live Tables pipelines
|
# See also https://docs.databricks.com/dev-tools/bundles/deployment-modes.html.
|
||||||
mode: development
|
mode: development
|
||||||
default: true
|
default: true
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
|
|
||||||
## Optionally, there could be a 'staging' target here.
|
|
||||||
## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/ci-cd.html.)
|
|
||||||
#
|
|
||||||
# staging:
|
|
||||||
# workspace:
|
|
||||||
# host: {{workspace_host}}
|
|
||||||
|
|
||||||
# The 'prod' target, used for production deployment.
|
|
||||||
prod:
|
prod:
|
||||||
# We use 'mode: production' to indicate this is a production deployment.
|
|
||||||
# Doing so enables strict verification of the settings below.
|
|
||||||
mode: production
|
mode: production
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
# We always use /Users/{{user_name}} for all resources to make sure we only have a single copy.
|
# We explicitly specify /Users/{{user_name}} to make sure we only have a single copy.
|
||||||
{{- /*
|
|
||||||
Internal note 2023-12: CLI versions v0.211.0 and before would show an error when using `mode: production`
|
|
||||||
with a path that doesn't say "/Shared". For now, we'll include an extra comment in the template
|
|
||||||
to explain that customers should update if they see this.
|
|
||||||
*/}}
|
|
||||||
# If this path results in an error, please make sure you have a recent version of the CLI installed.
|
|
||||||
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
||||||
|
permissions:
|
||||||
|
- {{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
|
level: CAN_MANAGE
|
||||||
run_as:
|
run_as:
|
||||||
{{- if is_service_principal}}
|
{{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
service_principal_name: {{user_name}}
|
|
||||||
{{- else}}
|
|
||||||
# This runs as {{user_name}} in production. We could also use a service principal here,
|
|
||||||
# see https://docs.databricks.com/dev-tools/bundles/permissions.html.
|
|
||||||
user_name: {{user_name}}
|
|
||||||
{{- end}}
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{{define "latest_lts_dbr_version" -}}
|
{{define "latest_lts_dbr_version" -}}
|
||||||
13.3.x-scala2.12
|
15.4.x-scala2.12
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
||||||
{{define "latest_lts_db_connect_version_spec" -}}
|
{{define "latest_lts_db_connect_version_spec" -}}
|
||||||
>=13.3,<13.4
|
>=15.4,<15.5
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
|
@ -18,16 +18,16 @@ variables:
|
||||||
{{- $dev_schema := .shared_schema }}
|
{{- $dev_schema := .shared_schema }}
|
||||||
{{- $prod_schema := .shared_schema }}
|
{{- $prod_schema := .shared_schema }}
|
||||||
{{- if (regexp "^yes").MatchString .personal_schemas}}
|
{{- if (regexp "^yes").MatchString .personal_schemas}}
|
||||||
{{- $dev_schema = "${workspace.current_user.short_name}"}}
|
{{- $dev_schema = "${workspace.current_user.short_name}"}}
|
||||||
{{- $prod_schema = "default"}}
|
{{- $prod_schema = "default"}}
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
||||||
# Deployment targets.
|
|
||||||
targets:
|
targets:
|
||||||
# The 'dev' target, for development purposes. This target is the default.
|
|
||||||
dev:
|
dev:
|
||||||
# We use 'mode: development' to indicate this is a personal development copy.
|
# The default target uses 'mode: development' to create a development copy.
|
||||||
# Any job schedules and triggers are paused by default
|
# - Deployed resources get prefixed with '[dev my_user_name]'
|
||||||
|
# - Any job schedules and triggers are paused by default.
|
||||||
|
# See also https://docs.databricks.com/dev-tools/bundles/deployment-modes.html.
|
||||||
mode: development
|
mode: development
|
||||||
default: true
|
default: true
|
||||||
workspace:
|
workspace:
|
||||||
|
@ -37,35 +37,18 @@ targets:
|
||||||
catalog: {{.default_catalog}}
|
catalog: {{.default_catalog}}
|
||||||
schema: {{$dev_schema}}
|
schema: {{$dev_schema}}
|
||||||
|
|
||||||
## Optionally, there could be a 'staging' target here.
|
|
||||||
## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/ci-cd.html.)
|
|
||||||
#
|
|
||||||
# staging:
|
|
||||||
# workspace:
|
|
||||||
# host: {{workspace_host}}
|
|
||||||
|
|
||||||
# The 'prod' target, used for production deployment.
|
|
||||||
prod:
|
prod:
|
||||||
# We use 'mode: production' to indicate this is a production deployment.
|
|
||||||
# Doing so enables strict verification of the settings below.
|
|
||||||
mode: production
|
mode: production
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
# We always use /Users/{{user_name}} for all resources to make sure we only have a single copy.
|
# We explicitly specify /Users/{{user_name}} to make sure we only have a single copy.
|
||||||
{{- /*
|
|
||||||
Internal note 2023-12: CLI versions v0.211.0 and before would show an error when using `mode: production`
|
|
||||||
with a path that doesn't say "/Shared". For now, we'll include an extra comment in the template
|
|
||||||
to explain that customers should update if they see this.
|
|
||||||
*/}}
|
|
||||||
# If this path results in an error, please make sure you have a recent version of the CLI installed.
|
|
||||||
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
||||||
variables:
|
variables:
|
||||||
warehouse_id: {{index ((regexp "[^/]+$").FindStringSubmatch .http_path) 0}}
|
warehouse_id: {{index ((regexp "[^/]+$").FindStringSubmatch .http_path) 0}}
|
||||||
catalog: {{.default_catalog}}
|
catalog: {{.default_catalog}}
|
||||||
schema: {{$prod_schema}}
|
schema: {{$prod_schema}}
|
||||||
{{- if not is_service_principal}}
|
permissions:
|
||||||
|
- {{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
|
level: CAN_MANAGE
|
||||||
run_as:
|
run_as:
|
||||||
# This runs as {{user_name}} in production. We could also use a service principal here
|
{{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
# using service_principal_name (see https://docs.databricks.com/en/dev-tools/bundles/permissions.html).
|
|
||||||
user_name: {{user_name}}
|
|
||||||
{{end -}}
|
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml)
|
-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml)
|
||||||
{{- /* We can't use a materialized view here since they don't support 'create or refresh' yet.*/}}
|
|
||||||
|
|
||||||
USE CATALOG {{"{{"}}catalog{{"}}"}};
|
USE CATALOG {{"{{"}}catalog{{"}}"}};
|
||||||
USE IDENTIFIER({{"{{"}}schema{{"}}"}});
|
USE IDENTIFIER({{"{{"}}schema{{"}}"}});
|
||||||
|
|
||||||
CREATE OR REPLACE VIEW
|
CREATE OR REPLACE MATERIALIZED VIEW
|
||||||
orders_daily
|
orders_daily
|
||||||
AS SELECT
|
AS SELECT
|
||||||
order_date, count(*) AS number_of_orders
|
order_date, count(*) AS number_of_orders
|
||||||
|
|
Loading…
Reference in New Issue