mirror of https://github.com/databricks/cli.git
Merge branch 'main' into schuettm/support-symlinks
This commit is contained in:
commit
93c026d5c1
23
CHANGELOG.md
23
CHANGELOG.md
|
@ -1,5 +1,28 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## [Release] Release v0.227.1
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Disable prompt for storage-credentials get command ([#1723](https://github.com/databricks/cli/pull/1723)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Do not treat empty path as a local path ([#1717](https://github.com/databricks/cli/pull/1717)).
|
||||||
|
* Correctly mark PyPI package name specs with multiple specifiers as remote libraries ([#1725](https://github.com/databricks/cli/pull/1725)).
|
||||||
|
* Improve error handling for /Volumes paths in mode: development ([#1716](https://github.com/databricks/cli/pull/1716)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Ignore CLI version check on development builds of the CLI ([#1714](https://github.com/databricks/cli/pull/1714)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Added `databricks resource-quotas` command group.
|
||||||
|
* Added `databricks policy-compliance-for-clusters` command group.
|
||||||
|
* Added `databricks policy-compliance-for-jobs` command group.
|
||||||
|
|
||||||
|
OpenAPI commit 3eae49b444cac5a0118a3503e5b7ecef7f96527a (2024-08-21)
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.44.0 to 0.45.0 ([#1719](https://github.com/databricks/cli/pull/1719)).
|
||||||
|
* Revert hc-install version to 0.7.0 ([#1711](https://github.com/databricks/cli/pull/1711)).
|
||||||
|
|
||||||
## [Release] Release v0.227.0
|
## [Release] Release v0.227.0
|
||||||
|
|
||||||
CLI:
|
CLI:
|
||||||
|
|
|
@ -6,5 +6,5 @@ type Deployment struct {
|
||||||
FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"`
|
FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"`
|
||||||
|
|
||||||
// Lock configures locking behavior on deployment.
|
// Lock configures locking behavior on deployment.
|
||||||
Lock Lock `json:"lock"`
|
Lock Lock `json:"lock,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,6 +64,7 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
var diags diag.Diagnostics
|
||||||
p := b.Config.Presets
|
p := b.Config.Presets
|
||||||
u := b.Config.Workspace.CurrentUser
|
u := b.Config.Workspace.CurrentUser
|
||||||
|
|
||||||
|
@ -74,44 +75,56 @@ func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||||
// status to UNPAUSED at the level of an individual object, whic hwas
|
// status to UNPAUSED at the level of an individual object, whic hwas
|
||||||
// historically allowed.)
|
// historically allowed.)
|
||||||
if p.TriggerPauseStatus == config.Unpaused {
|
if p.TriggerPauseStatus == config.Unpaused {
|
||||||
return diag.Diagnostics{{
|
diags = diags.Append(diag.Diagnostic{
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
|
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
|
||||||
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
||||||
}}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure this development copy has unique names and paths to avoid conflicts
|
// Make sure this development copy has unique names and paths to avoid conflicts
|
||||||
if path := findNonUserPath(b); path != "" {
|
if path := findNonUserPath(b); path != "" {
|
||||||
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
if path == "artifact_path" && strings.HasPrefix(b.Config.Workspace.ArtifactPath, "/Volumes") {
|
||||||
|
// For Volumes paths we recommend including the current username as a substring
|
||||||
|
diags = diags.Extend(diag.Errorf("%s should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'", path))
|
||||||
|
} else {
|
||||||
|
// For non-Volumes paths recommend simply putting things in the home folder
|
||||||
|
diags = diags.Extend(diag.Errorf("%s must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'", path))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
|
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
|
||||||
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
|
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
|
||||||
// For this reason we require the name prefix to contain the current username;
|
// For this reason we require the name prefix to contain the current username;
|
||||||
// it's a pitfall for users if they don't include it and later find out that
|
// it's a pitfall for users if they don't include it and later find out that
|
||||||
// only a single user can do development deployments.
|
// only a single user can do development deployments.
|
||||||
return diag.Diagnostics{{
|
diags = diags.Append(diag.Diagnostic{
|
||||||
Severity: diag.Error,
|
Severity: diag.Error,
|
||||||
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
|
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
|
||||||
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
|
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
|
||||||
}}
|
})
|
||||||
}
|
}
|
||||||
return nil
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// findNonUserPath finds the first workspace path such as root_path that doesn't
|
||||||
|
// contain the current username or current user's shortname.
|
||||||
func findNonUserPath(b *bundle.Bundle) string {
|
func findNonUserPath(b *bundle.Bundle) string {
|
||||||
username := b.Config.Workspace.CurrentUser.UserName
|
containsName := func(path string) bool {
|
||||||
|
username := b.Config.Workspace.CurrentUser.UserName
|
||||||
|
shortname := b.Config.Workspace.CurrentUser.ShortName
|
||||||
|
return strings.Contains(path, username) || strings.Contains(path, shortname)
|
||||||
|
}
|
||||||
|
|
||||||
if b.Config.Workspace.RootPath != "" && !strings.Contains(b.Config.Workspace.RootPath, username) {
|
if b.Config.Workspace.RootPath != "" && !containsName(b.Config.Workspace.RootPath) {
|
||||||
return "root_path"
|
return "root_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.StatePath != "" && !strings.Contains(b.Config.Workspace.StatePath, username) {
|
if b.Config.Workspace.StatePath != "" && !containsName(b.Config.Workspace.StatePath) {
|
||||||
return "state_path"
|
return "state_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.FilePath != "" && !strings.Contains(b.Config.Workspace.FilePath, username) {
|
if b.Config.Workspace.FilePath != "" && !containsName(b.Config.Workspace.FilePath) {
|
||||||
return "file_path"
|
return "file_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.ArtifactPath != "" && !strings.Contains(b.Config.Workspace.ArtifactPath, username) {
|
if b.Config.Workspace.ArtifactPath != "" && !containsName(b.Config.Workspace.ArtifactPath) {
|
||||||
return "artifact_path"
|
return "artifact_path"
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
|
|
|
@ -230,10 +230,20 @@ func TestValidateDevelopmentMode(t *testing.T) {
|
||||||
diags := validateDevelopmentMode(b)
|
diags := validateDevelopmentMode(b)
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
// Test with /Volumes path
|
||||||
|
b = mockBundle(config.Development)
|
||||||
|
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/lennart/libs"
|
||||||
|
diags = validateDevelopmentMode(b)
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/libs"
|
||||||
|
diags = validateDevelopmentMode(b)
|
||||||
|
require.ErrorContains(t, diags.Error(), "artifact_path should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'")
|
||||||
|
|
||||||
// Test with a bundle that has a non-user path
|
// Test with a bundle that has a non-user path
|
||||||
|
b = mockBundle(config.Development)
|
||||||
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
|
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
|
||||||
diags = validateDevelopmentMode(b)
|
diags = validateDevelopmentMode(b)
|
||||||
require.ErrorContains(t, diags.Error(), "root_path")
|
require.ErrorContains(t, diags.Error(), "root_path must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'")
|
||||||
|
|
||||||
// Test with a bundle that has an unpaused trigger pause status
|
// Test with a bundle that has an unpaused trigger pause status
|
||||||
b = mockBundle(config.Development)
|
b = mockBundle(config.Development)
|
||||||
|
|
|
@ -1,15 +1,21 @@
|
||||||
package python
|
package python
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/python"
|
|
||||||
"github.com/databricks/databricks-sdk-go/logger"
|
"github.com/databricks/databricks-sdk-go/logger"
|
||||||
|
"github.com/fatih/color"
|
||||||
|
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/python"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
|
||||||
|
@ -169,7 +175,11 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
||||||
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
|
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stderrWriter := newLogWriter(ctx, "stderr: ")
|
stderrBuf := bytes.Buffer{}
|
||||||
|
stderrWriter := io.MultiWriter(
|
||||||
|
newLogWriter(ctx, "stderr: "),
|
||||||
|
&stderrBuf,
|
||||||
|
)
|
||||||
stdoutWriter := newLogWriter(ctx, "stdout: ")
|
stdoutWriter := newLogWriter(ctx, "stdout: ")
|
||||||
|
|
||||||
_, processErr := process.Background(
|
_, processErr := process.Background(
|
||||||
|
@ -197,7 +207,13 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
||||||
// process can fail without reporting errors in diagnostics file or creating it, for instance,
|
// process can fail without reporting errors in diagnostics file or creating it, for instance,
|
||||||
// venv doesn't have PyDABs library installed
|
// venv doesn't have PyDABs library installed
|
||||||
if processErr != nil {
|
if processErr != nil {
|
||||||
return dyn.InvalidValue, diag.Errorf("python mutator process failed: %sw, use --debug to enable logging", processErr)
|
diagnostic := diag.Diagnostic{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: fmt.Sprintf("python mutator process failed: %q, use --debug to enable logging", processErr),
|
||||||
|
Detail: explainProcessErr(stderrBuf.String()),
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.InvalidValue, diag.Diagnostics{diagnostic}
|
||||||
}
|
}
|
||||||
|
|
||||||
// or we can fail to read diagnostics file, that should always be created
|
// or we can fail to read diagnostics file, that should always be created
|
||||||
|
@ -205,15 +221,40 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
|
||||||
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
|
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := loadOutputFile(rootPath, outputPath)
|
output, outputDiags := loadOutputFile(rootPath, outputPath)
|
||||||
if err != nil {
|
pythonDiagnostics = pythonDiagnostics.Extend(outputDiags)
|
||||||
return dyn.InvalidValue, diag.Errorf("failed to load Python mutator output: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// we pass through pythonDiagnostic because it contains warnings
|
// we pass through pythonDiagnostic because it contains warnings
|
||||||
return output, pythonDiagnostics
|
return output, pythonDiagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const installExplanation = `If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||||
|
and that the wheel is installed in the Python environment:
|
||||||
|
|
||||||
|
$ .venv/bin/pip install -e .
|
||||||
|
|
||||||
|
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||||
|
or activate the environment before running CLI commands:
|
||||||
|
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
venv_path: .venv
|
||||||
|
`
|
||||||
|
|
||||||
|
// explainProcessErr provides additional explanation for common errors.
|
||||||
|
// It's meant to be the best effort, and not all errors are covered.
|
||||||
|
// Output should be used only used for error reporting.
|
||||||
|
func explainProcessErr(stderr string) string {
|
||||||
|
// implemented in cpython/Lib/runpy.py and portable across Python 3.x, including pypy
|
||||||
|
if strings.Contains(stderr, "Error while finding module specification for 'databricks.bundles.build'") {
|
||||||
|
summary := color.CyanString("Explanation: ") + "'databricks-pydabs' library is not installed in the Python environment.\n"
|
||||||
|
|
||||||
|
return stderr + "\n" + summary + "\n" + installExplanation
|
||||||
|
}
|
||||||
|
|
||||||
|
return stderr
|
||||||
|
}
|
||||||
|
|
||||||
func writeInputFile(inputPath string, input dyn.Value) error {
|
func writeInputFile(inputPath string, input dyn.Value) error {
|
||||||
// we need to marshal dyn.Value instead of bundle.Config to JSON to support
|
// we need to marshal dyn.Value instead of bundle.Config to JSON to support
|
||||||
// non-string fields assigned with bundle variables
|
// non-string fields assigned with bundle variables
|
||||||
|
@ -225,10 +266,10 @@ func writeInputFile(inputPath string, input dyn.Value) error {
|
||||||
return os.WriteFile(inputPath, rootConfigJson, 0600)
|
return os.WriteFile(inputPath, rootConfigJson, 0600)
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) {
|
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, diag.Diagnostics) {
|
||||||
outputFile, err := os.Open(outputPath)
|
outputFile, err := os.Open(outputPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to open output file: %w", err)
|
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer outputFile.Close()
|
defer outputFile.Close()
|
||||||
|
@ -243,27 +284,34 @@ func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) {
|
||||||
// for that, we pass virtualPath instead of outputPath as file location
|
// for that, we pass virtualPath instead of outputPath as file location
|
||||||
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml"))
|
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to get absolute path: %w", err)
|
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to get absolute path: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
generated, err := yamlloader.LoadYAML(virtualPath, outputFile)
|
generated, err := yamlloader.LoadYAML(virtualPath, outputFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to parse output file: %w", err)
|
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to parse output file: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
normalized, diagnostic := convert.Normalize(config.Root{}, generated)
|
return strictNormalize(config.Root{}, generated)
|
||||||
if diagnostic.Error() != nil {
|
}
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %w", diagnostic.Error())
|
|
||||||
}
|
func strictNormalize(dst any, generated dyn.Value) (dyn.Value, diag.Diagnostics) {
|
||||||
|
normalized, diags := convert.Normalize(dst, generated)
|
||||||
|
|
||||||
// warnings shouldn't happen because output should be already normalized
|
// warnings shouldn't happen because output should be already normalized
|
||||||
// when it happens, it's a bug in the mutator, and should be treated as an error
|
// when it happens, it's a bug in the mutator, and should be treated as an error
|
||||||
|
|
||||||
for _, d := range diagnostic.Filter(diag.Warning) {
|
strictDiags := diag.Diagnostics{}
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %s", d.Summary)
|
|
||||||
|
for _, d := range diags {
|
||||||
|
if d.Severity == diag.Warning {
|
||||||
|
d.Severity = diag.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
strictDiags = strictDiags.Append(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
return normalized, nil
|
return normalized, strictDiags
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadDiagnosticsFile loads diagnostics from a file.
|
// loadDiagnosticsFile loads diagnostics from a file.
|
||||||
|
|
|
@ -10,6 +10,8 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/dyn/merge"
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
@ -255,7 +257,7 @@ func TestPythonMutator_badOutput(t *testing.T) {
|
||||||
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
||||||
diag := bundle.Apply(ctx, b, mutator)
|
diag := bundle.Apply(ctx, b, mutator)
|
||||||
|
|
||||||
assert.EqualError(t, diag.Error(), "failed to load Python mutator output: failed to normalize output: unknown field: unknown_property")
|
assert.EqualError(t, diag.Error(), "unknown field: unknown_property")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPythonMutator_disabled(t *testing.T) {
|
func TestPythonMutator_disabled(t *testing.T) {
|
||||||
|
@ -546,6 +548,46 @@ func TestInterpreterPath(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStrictNormalize(t *testing.T) {
|
||||||
|
// NB: there is no way to trigger diag.Error, so we don't test it
|
||||||
|
|
||||||
|
type TestStruct struct {
|
||||||
|
A int `json:"a"`
|
||||||
|
}
|
||||||
|
|
||||||
|
value := dyn.NewValue(map[string]dyn.Value{"A": dyn.NewValue("abc", nil)}, nil)
|
||||||
|
|
||||||
|
_, diags := convert.Normalize(TestStruct{}, value)
|
||||||
|
_, strictDiags := strictNormalize(TestStruct{}, value)
|
||||||
|
|
||||||
|
assert.False(t, diags.HasError())
|
||||||
|
assert.True(t, strictDiags.HasError())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExplainProcessErr(t *testing.T) {
|
||||||
|
stderr := "/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')\n"
|
||||||
|
expected := `/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')
|
||||||
|
|
||||||
|
Explanation: 'databricks-pydabs' library is not installed in the Python environment.
|
||||||
|
|
||||||
|
If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
|
||||||
|
and that the wheel is installed in the Python environment:
|
||||||
|
|
||||||
|
$ .venv/bin/pip install -e .
|
||||||
|
|
||||||
|
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
|
||||||
|
or activate the environment before running CLI commands:
|
||||||
|
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
venv_path: .venv
|
||||||
|
`
|
||||||
|
|
||||||
|
out := explainProcessErr(stderr)
|
||||||
|
|
||||||
|
assert.Equal(t, expected, out)
|
||||||
|
}
|
||||||
|
|
||||||
func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context {
|
func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
ctx, stub := process.WithStub(ctx)
|
ctx, stub := process.WithStub(ctx)
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/dyn/convert"
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
"github.com/databricks/cli/libs/dyn/dynvar"
|
"github.com/databricks/cli/libs/dyn/dynvar"
|
||||||
"github.com/databricks/cli/libs/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type resolveVariableReferences struct {
|
type resolveVariableReferences struct {
|
||||||
|
@ -124,6 +123,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
// We rewrite it here to make the resolution logic simpler.
|
// We rewrite it here to make the resolution logic simpler.
|
||||||
varPath := dyn.NewPath(dyn.Key("var"))
|
varPath := dyn.NewPath(dyn.Key("var"))
|
||||||
|
|
||||||
|
var diags diag.Diagnostics
|
||||||
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||||
// Synthesize a copy of the root that has all fields that are present in the type
|
// Synthesize a copy of the root that has all fields that are present in the type
|
||||||
// but not set in the dynamic value set to their corresponding empty value.
|
// but not set in the dynamic value set to their corresponding empty value.
|
||||||
|
@ -180,14 +180,13 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
|
|
||||||
// Normalize the result because variable resolution may have been applied to non-string fields.
|
// Normalize the result because variable resolution may have been applied to non-string fields.
|
||||||
// For example, a variable reference may have been resolved to a integer.
|
// For example, a variable reference may have been resolved to a integer.
|
||||||
root, diags := convert.Normalize(b.Config, root)
|
root, normaliseDiags := convert.Normalize(b.Config, root)
|
||||||
for _, diag := range diags {
|
diags = diags.Extend(normaliseDiags)
|
||||||
// This occurs when a variable's resolved value is incompatible with the field's type.
|
|
||||||
// Log a warning until we have a better way to surface these diagnostics to the user.
|
|
||||||
log.Warnf(ctx, "normalization diagnostic: %s", diag.Summary)
|
|
||||||
}
|
|
||||||
return root, nil
|
return root, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return diag.FromErr(err)
|
if err != nil {
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
}
|
||||||
|
return diags
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,6 +69,11 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
||||||
// Remove output starting from Warning until end of output
|
// Remove output starting from Warning until end of output
|
||||||
output = output[:bytes.Index([]byte(output), []byte("Warning:"))]
|
output = output[:bytes.Index([]byte(output), []byte("Warning:"))]
|
||||||
cmdio.LogString(ctx, output)
|
cmdio.LogString(ctx, output)
|
||||||
|
|
||||||
|
if !cmdio.IsPromptSupported(ctx) {
|
||||||
|
return diag.Errorf("This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed.")
|
||||||
|
}
|
||||||
|
|
||||||
ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.")
|
ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
|
|
|
@ -111,6 +111,13 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error {
|
||||||
environ["PATH"] = path
|
environ["PATH"] = path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Include $AZURE_CONFIG_FILE in set of environment variables to pass along.
|
||||||
|
// This is set in Azure DevOps by the AzureCLI@2 task.
|
||||||
|
azureConfigFile, ok := env.Lookup(ctx, "AZURE_CONFIG_FILE")
|
||||||
|
if ok {
|
||||||
|
environ["AZURE_CONFIG_FILE"] = azureConfigFile
|
||||||
|
}
|
||||||
|
|
||||||
// Include $TF_CLI_CONFIG_FILE to override terraform provider in development.
|
// Include $TF_CLI_CONFIG_FILE to override terraform provider in development.
|
||||||
// See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration
|
// See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration
|
||||||
devConfigFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE")
|
devConfigFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE")
|
||||||
|
|
|
@ -269,19 +269,20 @@ func TestSetUserAgentExtraEnvVar(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInheritEnvVars(t *testing.T) {
|
func TestInheritEnvVars(t *testing.T) {
|
||||||
env := map[string]string{}
|
|
||||||
|
|
||||||
t.Setenv("HOME", "/home/testuser")
|
t.Setenv("HOME", "/home/testuser")
|
||||||
t.Setenv("PATH", "/foo:/bar")
|
t.Setenv("PATH", "/foo:/bar")
|
||||||
t.Setenv("TF_CLI_CONFIG_FILE", "/tmp/config.tfrc")
|
t.Setenv("TF_CLI_CONFIG_FILE", "/tmp/config.tfrc")
|
||||||
|
t.Setenv("AZURE_CONFIG_FILE", "/tmp/foo/bar")
|
||||||
|
|
||||||
err := inheritEnvVars(context.Background(), env)
|
ctx := context.Background()
|
||||||
|
env := map[string]string{}
|
||||||
require.NoError(t, err)
|
err := inheritEnvVars(ctx, env)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
require.Equal(t, env["HOME"], "/home/testuser")
|
assert.Equal(t, "/home/testuser", env["HOME"])
|
||||||
require.Equal(t, env["PATH"], "/foo:/bar")
|
assert.Equal(t, "/foo:/bar", env["PATH"])
|
||||||
require.Equal(t, env["TF_CLI_CONFIG_FILE"], "/tmp/config.tfrc")
|
assert.Equal(t, "/tmp/config.tfrc", env["TF_CLI_CONFIG_FILE"])
|
||||||
|
assert.Equal(t, "/tmp/foo/bar", env["AZURE_CONFIG_FILE"])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetUserProfileFromInheritEnvVars(t *testing.T) {
|
func TestSetUserProfileFromInheritEnvVars(t *testing.T) {
|
||||||
|
|
|
@ -72,9 +72,11 @@ func IsLibraryLocal(dep string) bool {
|
||||||
|
|
||||||
// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_).
|
// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_).
|
||||||
// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security].
|
// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security].
|
||||||
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?)?: Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).
|
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?): Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).
|
||||||
|
// ,?: Optionally matches a comma (,) at the end of the specifier which is used to separate multiple specifiers.
|
||||||
|
// There can be multiple version specifiers separated by commas or no specifiers.
|
||||||
// Spec for package name and version specifier: https://pip.pypa.io/en/stable/reference/requirement-specifiers/
|
// Spec for package name and version specifier: https://pip.pypa.io/en/stable/reference/requirement-specifiers/
|
||||||
var packageRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_]+\s?(\[.*\])?\s?((==|!=|<=|>=|~=|==|>|<)\s?\d+(\.\d+){0,2}(\.\*)?)?$`)
|
var packageRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_]+\s?(\[.*\])?\s?((==|!=|<=|>=|~=|==|>|<)\s?\d+(\.\d+){0,2}(\.\*)?,?)*$`)
|
||||||
|
|
||||||
func isPackage(name string) bool {
|
func isPackage(name string) bool {
|
||||||
if packageRegex.MatchString(name) {
|
if packageRegex.MatchString(name) {
|
||||||
|
|
|
@ -62,6 +62,8 @@ func TestIsLibraryLocal(t *testing.T) {
|
||||||
{path: "beautifulsoup4 ~= 4.12.3", expected: false},
|
{path: "beautifulsoup4 ~= 4.12.3", expected: false},
|
||||||
{path: "beautifulsoup4[security, tests]", expected: false},
|
{path: "beautifulsoup4[security, tests]", expected: false},
|
||||||
{path: "beautifulsoup4[security, tests] ~= 4.12.3", expected: false},
|
{path: "beautifulsoup4[security, tests] ~= 4.12.3", expected: false},
|
||||||
|
{path: "beautifulsoup4>=1.0.0,<2.0.0", expected: false},
|
||||||
|
{path: "beautifulsoup4>=1.0.0,~=1.2.0,<2.0.0", expected: false},
|
||||||
{path: "https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
{path: "https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||||
{path: "pip @ https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
{path: "pip @ https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||||
{path: "requests [security] @ https://github.com/psf/requests/archive/refs/heads/main.zip", expected: false},
|
{path: "requests [security] @ https://github.com/psf/requests/archive/refs/heads/main.zip", expected: false},
|
||||||
|
|
|
@ -2,6 +2,7 @@ package python
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -38,7 +39,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
|
||||||
tasks := libraries.FindTasksWithLocalLibraries(b)
|
tasks := libraries.FindTasksWithLocalLibraries(b)
|
||||||
for _, task := range tasks {
|
for _, task := range tasks {
|
||||||
if task.NewCluster != nil {
|
if task.NewCluster != nil {
|
||||||
if lowerThanExpectedVersion(ctx, task.NewCluster.SparkVersion) {
|
if lowerThanExpectedVersion(task.NewCluster.SparkVersion) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -47,7 +48,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
|
||||||
for _, job := range b.Config.Resources.Jobs {
|
for _, job := range b.Config.Resources.Jobs {
|
||||||
for _, cluster := range job.JobClusters {
|
for _, cluster := range job.JobClusters {
|
||||||
if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster.SparkVersion != "" {
|
if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster.SparkVersion != "" {
|
||||||
if lowerThanExpectedVersion(ctx, cluster.NewCluster.SparkVersion) {
|
if lowerThanExpectedVersion(cluster.NewCluster.SparkVersion) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -64,7 +65,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if lowerThanExpectedVersion(ctx, version) {
|
if lowerThanExpectedVersion(version) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -73,7 +74,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func lowerThanExpectedVersion(ctx context.Context, sparkVersion string) bool {
|
func lowerThanExpectedVersion(sparkVersion string) bool {
|
||||||
parts := strings.Split(sparkVersion, ".")
|
parts := strings.Split(sparkVersion, ".")
|
||||||
if len(parts) < 2 {
|
if len(parts) < 2 {
|
||||||
return false
|
return false
|
||||||
|
@ -82,6 +83,17 @@ func lowerThanExpectedVersion(ctx context.Context, sparkVersion string) bool {
|
||||||
if parts[1][0] == 'x' { // treat versions like 13.x as the very latest minor (13.99)
|
if parts[1][0] == 'x' { // treat versions like 13.x as the very latest minor (13.99)
|
||||||
parts[1] = "99"
|
parts[1] = "99"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if any of the version parts are not numbers, we can't compare
|
||||||
|
// so consider it as compatible version
|
||||||
|
if _, err := strconv.Atoi(parts[0]); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := strconv.Atoi(parts[1]); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
v := "v" + parts[0] + "." + parts[1]
|
v := "v" + parts[0] + "." + parts[1]
|
||||||
return semver.Compare(v, "v13.1") < 0
|
return semver.Compare(v, "v13.1") < 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -344,6 +344,8 @@ func TestSparkVersionLowerThanExpected(t *testing.T) {
|
||||||
"14.1.x-scala2.12": false,
|
"14.1.x-scala2.12": false,
|
||||||
"13.x-snapshot-scala-2.12": false,
|
"13.x-snapshot-scala-2.12": false,
|
||||||
"13.x-rc-scala-2.12": false,
|
"13.x-rc-scala-2.12": false,
|
||||||
|
"client.1.10-scala2.12": false,
|
||||||
|
"latest-stable-gpu-scala2.11": false,
|
||||||
"10.4.x-aarch64-photon-scala2.12": true,
|
"10.4.x-aarch64-photon-scala2.12": true,
|
||||||
"10.4.x-scala2.12": true,
|
"10.4.x-scala2.12": true,
|
||||||
"13.0.x-scala2.12": true,
|
"13.0.x-scala2.12": true,
|
||||||
|
@ -351,7 +353,7 @@ func TestSparkVersionLowerThanExpected(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range testCases {
|
for k, v := range testCases {
|
||||||
result := lowerThanExpectedVersion(context.Background(), k)
|
result := lowerThanExpectedVersion(k)
|
||||||
require.Equal(t, v, result, k)
|
require.Equal(t, v, result, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,12 @@ func (f *progressLoggerFlag) resolveModeDefault(format flags.ProgressLogFormat)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Context, error) {
|
func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Context, error) {
|
||||||
|
// No need to initialize the logger if it's already set in the context. This
|
||||||
|
// happens in unit tests where the logger is setup as a fixture.
|
||||||
|
if _, ok := cmdio.FromContext(ctx); ok {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
|
||||||
if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" &&
|
if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" &&
|
||||||
f.ProgressLogFormat == flags.ModeInplace {
|
f.ProgressLogFormat == flags.ModeInplace {
|
||||||
return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr")
|
return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr")
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -3,7 +3,7 @@ module github.com/databricks/cli
|
||||||
go 1.22
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Masterminds/semver/v3 v3.2.1 // MIT
|
github.com/Masterminds/semver/v3 v3.3.0 // MIT
|
||||||
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
github.com/briandowns/spinner v1.23.1 // Apache 2.0
|
||||||
github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0
|
github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0
|
||||||
github.com/fatih/color v1.17.0 // MIT
|
github.com/fatih/color v1.17.0 // MIT
|
||||||
|
|
|
@ -8,8 +8,8 @@ cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h
|
||||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
|
||||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg=
|
github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg=
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -101,12 +102,15 @@ func TestAccAbortBind(t *testing.T) {
|
||||||
destroyBundle(t, ctx, bundleRoot)
|
destroyBundle(t, ctx, bundleRoot)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Bind should fail because prompting is not possible.
|
||||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||||
|
t.Setenv("TERM", "dumb")
|
||||||
c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
||||||
|
|
||||||
// Simulate user aborting the bind. This is done by not providing any input to the prompt in non-interactive mode.
|
// Expect error suggesting to use --auto-approve
|
||||||
_, _, err = c.Run()
|
_, _, err = c.Run()
|
||||||
require.ErrorContains(t, err, "failed to bind the resource")
|
assert.ErrorContains(t, err, "failed to bind the resource")
|
||||||
|
assert.ErrorContains(t, err, "This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||||
|
|
||||||
err = deployBundle(t, ctx, bundleRoot)
|
err = deployBundle(t, ctx, bundleRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -267,6 +267,8 @@ func (n normalizeOptions) normalizeString(typ reflect.Type, src dyn.Value, path
|
||||||
out = strconv.FormatInt(src.MustInt(), 10)
|
out = strconv.FormatInt(src.MustInt(), 10)
|
||||||
case dyn.KindFloat:
|
case dyn.KindFloat:
|
||||||
out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64)
|
out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64)
|
||||||
|
case dyn.KindTime:
|
||||||
|
out = src.MustTime().String()
|
||||||
case dyn.KindNil:
|
case dyn.KindNil:
|
||||||
// Return a warning if the field is present but has a null value.
|
// Return a warning if the field is present but has a null value.
|
||||||
return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindString, src, path))
|
return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindString, src, path))
|
||||||
|
|
|
@ -569,6 +569,14 @@ func TestNormalizeStringFromFloat(t *testing.T) {
|
||||||
assert.Equal(t, dyn.NewValue("1.2", vin.Locations()), vout)
|
assert.Equal(t, dyn.NewValue("1.2", vin.Locations()), vout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNormalizeStringFromTime(t *testing.T) {
|
||||||
|
var typ string
|
||||||
|
vin := dyn.NewValue(dyn.MustTime("2024-08-29"), []dyn.Location{{File: "file", Line: 1, Column: 1}})
|
||||||
|
vout, err := Normalize(&typ, vin)
|
||||||
|
assert.Empty(t, err)
|
||||||
|
assert.Equal(t, dyn.NewValue("2024-08-29", vin.Locations()), vout)
|
||||||
|
}
|
||||||
|
|
||||||
func TestNormalizeStringError(t *testing.T) {
|
func TestNormalizeStringError(t *testing.T) {
|
||||||
var typ string
|
var typ string
|
||||||
vin := dyn.V(map[string]dyn.Value{"an": dyn.V("error")})
|
vin := dyn.V(map[string]dyn.Value{"an": dyn.V("error")})
|
||||||
|
|
|
@ -2,7 +2,6 @@ package dyn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Kind int
|
type Kind int
|
||||||
|
@ -34,7 +33,7 @@ func kindOf(v any) Kind {
|
||||||
return KindInt
|
return KindInt
|
||||||
case float32, float64:
|
case float32, float64:
|
||||||
return KindFloat
|
return KindFloat
|
||||||
case time.Time:
|
case Time:
|
||||||
return KindTime
|
return KindTime
|
||||||
case nil:
|
case nil:
|
||||||
return KindNil
|
return KindNil
|
||||||
|
|
|
@ -83,16 +83,16 @@ func TestOverride_Primitive(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "time (updated)",
|
name: "time (updated)",
|
||||||
state: visitorState{updated: []string{"root"}},
|
state: visitorState{updated: []string{"root"}},
|
||||||
left: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}),
|
left: dyn.NewValue(dyn.FromTime(time.UnixMilli(10000)), []dyn.Location{leftLocation}),
|
||||||
right: dyn.NewValue(time.UnixMilli(10001), []dyn.Location{rightLocation}),
|
right: dyn.NewValue(dyn.FromTime(time.UnixMilli(10001)), []dyn.Location{rightLocation}),
|
||||||
expected: dyn.NewValue(time.UnixMilli(10001), []dyn.Location{rightLocation}),
|
expected: dyn.NewValue(dyn.FromTime(time.UnixMilli(10001)), []dyn.Location{rightLocation}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "time (not updated)",
|
name: "time (not updated)",
|
||||||
state: visitorState{},
|
state: visitorState{},
|
||||||
left: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}),
|
left: dyn.NewValue(dyn.FromTime(time.UnixMilli(10000)), []dyn.Location{leftLocation}),
|
||||||
right: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{rightLocation}),
|
right: dyn.NewValue(dyn.FromTime(time.UnixMilli(10000)), []dyn.Location{rightLocation}),
|
||||||
expected: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}),
|
expected: dyn.NewValue(dyn.FromTime(time.UnixMilli(10000)), []dyn.Location{leftLocation}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "different types (updated)",
|
name: "different types (updated)",
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
package dyn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Time represents a time-like primitive value.
|
||||||
|
//
|
||||||
|
// It represents a timestamp and includes the original string value
|
||||||
|
// that was parsed to create the timestamp. This makes it possible
|
||||||
|
// to coalesce a value that YAML interprets as a timestamp back into
|
||||||
|
// a string without losing information.
|
||||||
|
type Time struct {
|
||||||
|
t time.Time
|
||||||
|
s string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTime creates a new Time from the given string.
|
||||||
|
func NewTime(str string) (Time, error) {
|
||||||
|
// Try a couple of layouts
|
||||||
|
for _, layout := range []string{
|
||||||
|
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
||||||
|
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
||||||
|
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
||||||
|
"2006-1-2", // date only
|
||||||
|
} {
|
||||||
|
t, terr := time.Parse(layout, str)
|
||||||
|
if terr == nil {
|
||||||
|
return Time{t: t, s: str}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Time{}, fmt.Errorf("invalid time value: %q", str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustTime creates a new Time from the given string.
|
||||||
|
// It panics if the string cannot be parsed.
|
||||||
|
func MustTime(str string) Time {
|
||||||
|
t, err := NewTime(str)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromTime creates a new Time from the given time.Time.
|
||||||
|
// It uses the RFC3339Nano format for its string representation.
|
||||||
|
// This guarantees that it can roundtrip into a string without losing information.
|
||||||
|
func FromTime(t time.Time) Time {
|
||||||
|
return Time{t: t, s: t.Format(time.RFC3339Nano)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time returns the time.Time value.
|
||||||
|
func (t Time) Time() time.Time {
|
||||||
|
return t.t
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the original string value that was parsed to create the timestamp.
|
||||||
|
func (t Time) String() string {
|
||||||
|
return t.s
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package dyn_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTimeValid(t *testing.T) {
|
||||||
|
for _, tc := range []string{
|
||||||
|
"2024-08-29",
|
||||||
|
"2024-01-15T12:34:56.789012345Z",
|
||||||
|
} {
|
||||||
|
tm, err := dyn.NewTime(tc)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
assert.NotEqual(t, time.Time{}, tm.Time())
|
||||||
|
assert.Equal(t, tc, tm.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimeInvalid(t *testing.T) {
|
||||||
|
tm, err := dyn.NewTime("invalid")
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, dyn.Time{}, tm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimeFromTime(t *testing.T) {
|
||||||
|
tref := time.Now()
|
||||||
|
t1 := dyn.FromTime(tref)
|
||||||
|
|
||||||
|
// Verify that the underlying value is the same.
|
||||||
|
assert.Equal(t, tref, t1.Time())
|
||||||
|
|
||||||
|
// Verify that the string representation can be used to construct the same.
|
||||||
|
t2, err := dyn.NewTime(t1.String())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, t1.Time().Equal(t2.Time()))
|
||||||
|
}
|
|
@ -127,7 +127,8 @@ func (v Value) AsAny() any {
|
||||||
case KindFloat:
|
case KindFloat:
|
||||||
return v.v
|
return v.v
|
||||||
case KindTime:
|
case KindTime:
|
||||||
return v.v
|
t := v.v.(Time)
|
||||||
|
return t.Time()
|
||||||
default:
|
default:
|
||||||
// Panic because we only want to deal with known types.
|
// Panic because we only want to deal with known types.
|
||||||
panic(fmt.Sprintf("invalid kind: %d", v.k))
|
panic(fmt.Sprintf("invalid kind: %d", v.k))
|
||||||
|
|
|
@ -2,7 +2,6 @@ package dyn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// AsMap returns the underlying mapping if this value is a map,
|
// AsMap returns the underlying mapping if this value is a map,
|
||||||
|
@ -123,14 +122,14 @@ func (v Value) MustFloat() float64 {
|
||||||
|
|
||||||
// AsTime returns the underlying time if this value is a time,
|
// AsTime returns the underlying time if this value is a time,
|
||||||
// the zero value and false otherwise.
|
// the zero value and false otherwise.
|
||||||
func (v Value) AsTime() (time.Time, bool) {
|
func (v Value) AsTime() (Time, bool) {
|
||||||
vv, ok := v.v.(time.Time)
|
vv, ok := v.v.(Time)
|
||||||
return vv, ok
|
return vv, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustTime returns the underlying time if this value is a time,
|
// MustTime returns the underlying time if this value is a time,
|
||||||
// panics otherwise.
|
// panics otherwise.
|
||||||
func (v Value) MustTime() time.Time {
|
func (v Value) MustTime() Time {
|
||||||
vv, ok := v.AsTime()
|
vv, ok := v.AsTime()
|
||||||
if !ok || v.k != KindTime {
|
if !ok || v.k != KindTime {
|
||||||
panic(fmt.Sprintf("expected kind %s, got %s", KindTime, v.k))
|
panic(fmt.Sprintf("expected kind %s, got %s", KindTime, v.k))
|
||||||
|
|
|
@ -143,7 +143,7 @@ func TestValueUnderlyingFloat(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValueUnderlyingTime(t *testing.T) {
|
func TestValueUnderlyingTime(t *testing.T) {
|
||||||
v := dyn.V(time.Now())
|
v := dyn.V(dyn.FromTime(time.Now()))
|
||||||
|
|
||||||
vv1, ok := v.AsTime()
|
vv1, ok := v.AsTime()
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
|
@ -207,17 +206,9 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error
|
||||||
case "!!null":
|
case "!!null":
|
||||||
return dyn.NewValue(nil, []dyn.Location{loc}), nil
|
return dyn.NewValue(nil, []dyn.Location{loc}), nil
|
||||||
case "!!timestamp":
|
case "!!timestamp":
|
||||||
// Try a couple of layouts
|
t, err := dyn.NewTime(node.Value)
|
||||||
for _, layout := range []string{
|
if err == nil {
|
||||||
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
return dyn.NewValue(t, []dyn.Location{loc}), nil
|
||||||
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
|
||||||
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
|
||||||
"2006-1-2", // date only
|
|
||||||
} {
|
|
||||||
t, terr := time.Parse(layout, node.Value)
|
|
||||||
if terr == nil {
|
|
||||||
return dyn.NewValue(t, []dyn.Location{loc}), nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return dyn.InvalidValue, errorf(loc, "invalid timestamp value: %v", node.Value)
|
return dyn.InvalidValue, errorf(loc, "invalid timestamp value: %v", node.Value)
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -129,7 +129,7 @@ func (s *saver) toYamlNodeWithStyle(v dyn.Value, style yaml.Style) (*yaml.Node,
|
||||||
case dyn.KindFloat:
|
case dyn.KindFloat:
|
||||||
return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustFloat()), Style: style}, nil
|
return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustFloat()), Style: style}, nil
|
||||||
case dyn.KindTime:
|
case dyn.KindTime:
|
||||||
return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustTime().UTC().String(), Style: style}, nil
|
return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustTime().String(), Style: style}, nil
|
||||||
default:
|
default:
|
||||||
// Panic because we only want to deal with known types.
|
// Panic because we only want to deal with known types.
|
||||||
panic(fmt.Sprintf("invalid kind: %d", v.Kind()))
|
panic(fmt.Sprintf("invalid kind: %d", v.Kind()))
|
||||||
|
|
|
@ -2,10 +2,10 @@ package yamlsaver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -45,11 +45,14 @@ func TestMarshalBoolValue(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMarshalTimeValue(t *testing.T) {
|
func TestMarshalTimeValue(t *testing.T) {
|
||||||
|
tm, err := dyn.NewTime("1970-01-01")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
s := NewSaver()
|
s := NewSaver()
|
||||||
var timeValue = dyn.V(time.Unix(0, 0))
|
var timeValue = dyn.V(tm)
|
||||||
v, err := s.toYamlNode(timeValue)
|
v, err := s.toYamlNode(timeValue)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "1970-01-01 00:00:00 +0000 UTC", v.Value)
|
assert.Equal(t, "1970-01-01", v.Value)
|
||||||
assert.Equal(t, yaml.ScalarNode, v.Kind)
|
assert.Equal(t, yaml.ScalarNode, v.Kind)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{{define "latest_lts_dbr_version" -}}
|
{{define "latest_lts_dbr_version" -}}
|
||||||
13.3.x-scala2.12
|
15.4.x-scala2.12
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
||||||
{{define "latest_lts_db_connect_version_spec" -}}
|
{{define "latest_lts_db_connect_version_spec" -}}
|
||||||
>=13.3,<13.4
|
>=15.4,<15.5
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
|
@ -12,8 +12,10 @@ include:
|
||||||
targets:
|
targets:
|
||||||
dev:
|
dev:
|
||||||
default: true
|
default: true
|
||||||
# We use 'mode: development' to indicate this is a personal development copy.
|
# The default target uses 'mode: development' to create a development copy.
|
||||||
# Any job schedules and triggers are paused by default.
|
# - Deployed resources get prefixed with '[dev my_user_name]'
|
||||||
|
# - Any job schedules and triggers are paused by default.
|
||||||
|
# See also https://docs.databricks.com/dev-tools/bundles/deployment-modes.html.
|
||||||
mode: development
|
mode: development
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
|
@ -22,11 +24,10 @@ targets:
|
||||||
mode: production
|
mode: production
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
# We always use /Users/{{user_name}} for all resources to make sure we only have a single copy.
|
# We explicitly specify /Users/{{user_name}} to make sure we only have a single copy.
|
||||||
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
||||||
{{- if not is_service_principal}}
|
permissions:
|
||||||
|
- {{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
|
level: CAN_MANAGE
|
||||||
run_as:
|
run_as:
|
||||||
# This runs as {{user_name}} in production. We could also use a service principal here
|
{{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
# using service_principal_name (see the Databricks documentation).
|
|
||||||
user_name: {{user_name}}
|
|
||||||
{{- end}}
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{{define "latest_lts_dbr_version" -}}
|
{{define "latest_lts_dbr_version" -}}
|
||||||
13.3.x-scala2.12
|
15.4.x-scala2.12
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
||||||
{{define "latest_lts_db_connect_version_spec" -}}
|
{{define "latest_lts_db_connect_version_spec" -}}
|
||||||
>=13.3,<13.4
|
>=15.4,<15.5
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
|
@ -7,44 +7,24 @@ include:
|
||||||
- resources/*.yml
|
- resources/*.yml
|
||||||
|
|
||||||
targets:
|
targets:
|
||||||
# The 'dev' target, for development purposes. This target is the default.
|
|
||||||
dev:
|
dev:
|
||||||
# We use 'mode: development' to indicate this is a personal development copy:
|
# The default target uses 'mode: development' to create a development copy.
|
||||||
# - Deployed resources get prefixed with '[dev my_user_name]'
|
# - Deployed resources get prefixed with '[dev my_user_name]'
|
||||||
# - Any job schedules and triggers are paused by default
|
# - Any job schedules and triggers are paused by default.
|
||||||
# - The 'development' mode is used for Delta Live Tables pipelines
|
# See also https://docs.databricks.com/dev-tools/bundles/deployment-modes.html.
|
||||||
mode: development
|
mode: development
|
||||||
default: true
|
default: true
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
|
|
||||||
## Optionally, there could be a 'staging' target here.
|
|
||||||
## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/ci-cd.html.)
|
|
||||||
#
|
|
||||||
# staging:
|
|
||||||
# workspace:
|
|
||||||
# host: {{workspace_host}}
|
|
||||||
|
|
||||||
# The 'prod' target, used for production deployment.
|
|
||||||
prod:
|
prod:
|
||||||
# We use 'mode: production' to indicate this is a production deployment.
|
|
||||||
# Doing so enables strict verification of the settings below.
|
|
||||||
mode: production
|
mode: production
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
# We always use /Users/{{user_name}} for all resources to make sure we only have a single copy.
|
# We explicitly specify /Users/{{user_name}} to make sure we only have a single copy.
|
||||||
{{- /*
|
|
||||||
Internal note 2023-12: CLI versions v0.211.0 and before would show an error when using `mode: production`
|
|
||||||
with a path that doesn't say "/Shared". For now, we'll include an extra comment in the template
|
|
||||||
to explain that customers should update if they see this.
|
|
||||||
*/}}
|
|
||||||
# If this path results in an error, please make sure you have a recent version of the CLI installed.
|
|
||||||
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
||||||
|
permissions:
|
||||||
|
- {{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
|
level: CAN_MANAGE
|
||||||
run_as:
|
run_as:
|
||||||
{{- if is_service_principal}}
|
{{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
service_principal_name: {{user_name}}
|
|
||||||
{{- else}}
|
|
||||||
# This runs as {{user_name}} in production. We could also use a service principal here,
|
|
||||||
# see https://docs.databricks.com/dev-tools/bundles/permissions.html.
|
|
||||||
user_name: {{user_name}}
|
|
||||||
{{- end}}
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{{define "latest_lts_dbr_version" -}}
|
{{define "latest_lts_dbr_version" -}}
|
||||||
13.3.x-scala2.12
|
15.4.x-scala2.12
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
||||||
{{define "latest_lts_db_connect_version_spec" -}}
|
{{define "latest_lts_db_connect_version_spec" -}}
|
||||||
>=13.3,<13.4
|
>=15.4,<15.5
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
|
@ -18,16 +18,16 @@ variables:
|
||||||
{{- $dev_schema := .shared_schema }}
|
{{- $dev_schema := .shared_schema }}
|
||||||
{{- $prod_schema := .shared_schema }}
|
{{- $prod_schema := .shared_schema }}
|
||||||
{{- if (regexp "^yes").MatchString .personal_schemas}}
|
{{- if (regexp "^yes").MatchString .personal_schemas}}
|
||||||
{{- $dev_schema = "${workspace.current_user.short_name}"}}
|
{{- $dev_schema = "${workspace.current_user.short_name}"}}
|
||||||
{{- $prod_schema = "default"}}
|
{{- $prod_schema = "default"}}
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
||||||
# Deployment targets.
|
|
||||||
targets:
|
targets:
|
||||||
# The 'dev' target, for development purposes. This target is the default.
|
|
||||||
dev:
|
dev:
|
||||||
# We use 'mode: development' to indicate this is a personal development copy.
|
# The default target uses 'mode: development' to create a development copy.
|
||||||
# Any job schedules and triggers are paused by default
|
# - Deployed resources get prefixed with '[dev my_user_name]'
|
||||||
|
# - Any job schedules and triggers are paused by default.
|
||||||
|
# See also https://docs.databricks.com/dev-tools/bundles/deployment-modes.html.
|
||||||
mode: development
|
mode: development
|
||||||
default: true
|
default: true
|
||||||
workspace:
|
workspace:
|
||||||
|
@ -37,35 +37,18 @@ targets:
|
||||||
catalog: {{.default_catalog}}
|
catalog: {{.default_catalog}}
|
||||||
schema: {{$dev_schema}}
|
schema: {{$dev_schema}}
|
||||||
|
|
||||||
## Optionally, there could be a 'staging' target here.
|
|
||||||
## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/ci-cd.html.)
|
|
||||||
#
|
|
||||||
# staging:
|
|
||||||
# workspace:
|
|
||||||
# host: {{workspace_host}}
|
|
||||||
|
|
||||||
# The 'prod' target, used for production deployment.
|
|
||||||
prod:
|
prod:
|
||||||
# We use 'mode: production' to indicate this is a production deployment.
|
|
||||||
# Doing so enables strict verification of the settings below.
|
|
||||||
mode: production
|
mode: production
|
||||||
workspace:
|
workspace:
|
||||||
host: {{workspace_host}}
|
host: {{workspace_host}}
|
||||||
# We always use /Users/{{user_name}} for all resources to make sure we only have a single copy.
|
# We explicitly specify /Users/{{user_name}} to make sure we only have a single copy.
|
||||||
{{- /*
|
|
||||||
Internal note 2023-12: CLI versions v0.211.0 and before would show an error when using `mode: production`
|
|
||||||
with a path that doesn't say "/Shared". For now, we'll include an extra comment in the template
|
|
||||||
to explain that customers should update if they see this.
|
|
||||||
*/}}
|
|
||||||
# If this path results in an error, please make sure you have a recent version of the CLI installed.
|
|
||||||
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target}
|
||||||
variables:
|
variables:
|
||||||
warehouse_id: {{index ((regexp "[^/]+$").FindStringSubmatch .http_path) 0}}
|
warehouse_id: {{index ((regexp "[^/]+$").FindStringSubmatch .http_path) 0}}
|
||||||
catalog: {{.default_catalog}}
|
catalog: {{.default_catalog}}
|
||||||
schema: {{$prod_schema}}
|
schema: {{$prod_schema}}
|
||||||
{{- if not is_service_principal}}
|
permissions:
|
||||||
|
- {{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
|
level: CAN_MANAGE
|
||||||
run_as:
|
run_as:
|
||||||
# This runs as {{user_name}} in production. We could also use a service principal here
|
{{if is_service_principal}}service_principal{{else}}user{{end}}_name: {{user_name}}
|
||||||
# using service_principal_name (see https://docs.databricks.com/en/dev-tools/bundles/permissions.html).
|
|
||||||
user_name: {{user_name}}
|
|
||||||
{{end -}}
|
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml)
|
-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml)
|
||||||
{{- /* We can't use a materialized view here since they don't support 'create or refresh' yet.*/}}
|
|
||||||
|
|
||||||
USE CATALOG {{"{{"}}catalog{{"}}"}};
|
USE CATALOG {{"{{"}}catalog{{"}}"}};
|
||||||
USE IDENTIFIER({{"{{"}}schema{{"}}"}});
|
USE IDENTIFIER({{"{{"}}schema{{"}}"}});
|
||||||
|
|
||||||
CREATE OR REPLACE VIEW
|
CREATE OR REPLACE MATERIALIZED VIEW
|
||||||
orders_daily
|
orders_daily
|
||||||
AS SELECT
|
AS SELECT
|
||||||
order_date, count(*) AS number_of_orders
|
order_date, count(*) AS number_of_orders
|
||||||
|
|
Loading…
Reference in New Issue