mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'databricks/main' into cp-better-errors
This commit is contained in:
commit
5ed6fc4b46
|
@ -1 +1 @@
|
|||
7437dabb9dadee402c1fc060df4c1ce8cc5369f0
|
||||
f98c07f9c71f579de65d2587bb0292f83d10e55d
|
|
@ -116,12 +116,12 @@ func allResolvers() *resolvers {
|
|||
{{range .Services -}}
|
||||
{{- if in $allowlist .KebabName -}}
|
||||
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||
entity, err := w.{{.PascalName}}.GetBy{{range .List.NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
||||
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .List.NamedIdMap.IdPath 0).PascalName) }}), nil
|
||||
return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .NamedIdMap.IdPath 0).PascalName) }}), nil
|
||||
}
|
||||
{{end -}}
|
||||
{{- end}}
|
||||
|
|
|
@ -24,10 +24,12 @@ cmd/account/service-principals/service-principals.go linguist-generated=true
|
|||
cmd/account/settings/settings.go linguist-generated=true
|
||||
cmd/account/storage-credentials/storage-credentials.go linguist-generated=true
|
||||
cmd/account/storage/storage.go linguist-generated=true
|
||||
cmd/account/usage-dashboards/usage-dashboards.go linguist-generated=true
|
||||
cmd/account/users/users.go linguist-generated=true
|
||||
cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
|
||||
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
|
||||
cmd/account/workspaces/workspaces.go linguist-generated=true
|
||||
cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true
|
||||
cmd/workspace/alerts/alerts.go linguist-generated=true
|
||||
cmd/workspace/apps/apps.go linguist-generated=true
|
||||
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
|
||||
|
@ -54,6 +56,7 @@ cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go lingu
|
|||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
||||
cmd/workspace/functions/functions.go linguist-generated=true
|
||||
cmd/workspace/genie/genie.go linguist-generated=true
|
||||
cmd/workspace/git-credentials/git-credentials.go linguist-generated=true
|
||||
cmd/workspace/global-init-scripts/global-init-scripts.go linguist-generated=true
|
||||
cmd/workspace/grants/grants.go linguist-generated=true
|
||||
|
@ -67,6 +70,7 @@ cmd/workspace/libraries/libraries.go linguist-generated=true
|
|||
cmd/workspace/metastores/metastores.go linguist-generated=true
|
||||
cmd/workspace/model-registry/model-registry.go linguist-generated=true
|
||||
cmd/workspace/model-versions/model-versions.go linguist-generated=true
|
||||
cmd/workspace/notification-destinations/notification-destinations.go linguist-generated=true
|
||||
cmd/workspace/online-tables/online-tables.go linguist-generated=true
|
||||
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
|
||||
cmd/workspace/permissions/permissions.go linguist-generated=true
|
||||
|
@ -81,8 +85,10 @@ cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics
|
|||
cmd/workspace/provider-providers/provider-providers.go linguist-generated=true
|
||||
cmd/workspace/providers/providers.go linguist-generated=true
|
||||
cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true
|
||||
cmd/workspace/queries-legacy/queries-legacy.go linguist-generated=true
|
||||
cmd/workspace/queries/queries.go linguist-generated=true
|
||||
cmd/workspace/query-history/query-history.go linguist-generated=true
|
||||
cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go linguist-generated=true
|
||||
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true
|
||||
cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=true
|
||||
cmd/workspace/recipients/recipients.go linguist-generated=true
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
"files.trimTrailingWhitespace": true,
|
||||
"files.insertFinalNewline": true,
|
||||
"files.trimFinalNewlines": true,
|
||||
"python.envFile": "${workspaceFolder}/.databricks/.databricks.env",
|
||||
"python.envFile": "${workspaceRoot}/.env",
|
||||
"databricks.python.envFile": "${workspaceFolder}/.env",
|
||||
"python.analysis.stubPath": ".vscode",
|
||||
"jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\<codecell\\>|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])",
|
||||
|
|
187
CHANGELOG.md
187
CHANGELOG.md
|
@ -1,5 +1,192 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.227.0
|
||||
|
||||
CLI:
|
||||
* Added filtering flags for cluster list commands ([#1703](https://github.com/databricks/cli/pull/1703)).
|
||||
|
||||
Bundles:
|
||||
* Allow users to configure paths (including outside of the bundle root) to synchronize to the workspace. ([#1694](https://github.com/databricks/cli/pull/1694)).
|
||||
* Add configurable presets for name prefixes, tags, etc. ([#1490](https://github.com/databricks/cli/pull/1490)).
|
||||
* Add support for requirements libraries in Job Tasks ([#1543](https://github.com/databricks/cli/pull/1543)).
|
||||
* Remove reference to "dbt" in the default-sql template ([#1696](https://github.com/databricks/cli/pull/1696)).
|
||||
* Pause continuous pipelines when 'mode: development' is used ([#1590](https://github.com/databricks/cli/pull/1590)).
|
||||
* Report all empty resources present in error diagnostic ([#1685](https://github.com/databricks/cli/pull/1685)).
|
||||
* Improves detection of PyPI package names in environment dependencies ([#1699](https://github.com/databricks/cli/pull/1699)).
|
||||
|
||||
Internal:
|
||||
* Add `import` option for PyDABs ([#1693](https://github.com/databricks/cli/pull/1693)).
|
||||
* Make fileset take optional list of paths to list ([#1684](https://github.com/databricks/cli/pull/1684)).
|
||||
* Pass through paths argument to libs/sync ([#1689](https://github.com/databricks/cli/pull/1689)).
|
||||
* Correctly mark package names with versions as remote libraries ([#1697](https://github.com/databricks/cli/pull/1697)).
|
||||
* Share test initializer in common helper function ([#1695](https://github.com/databricks/cli/pull/1695)).
|
||||
* Make `pydabs/venv_path` optional ([#1687](https://github.com/databricks/cli/pull/1687)).
|
||||
* Use API mocks for duplicate path errors in workspace files extensions client ([#1690](https://github.com/databricks/cli/pull/1690)).
|
||||
* Fix prefix preset used for UC schemas ([#1704](https://github.com/databricks/cli/pull/1704)).
|
||||
|
||||
|
||||
|
||||
## [Release] Release v0.226.0
|
||||
|
||||
CLI:
|
||||
* Add command line autocomplete to the fs commands ([#1622](https://github.com/databricks/cli/pull/1622)).
|
||||
* Add trailing slash to directory to produce completions for ([#1666](https://github.com/databricks/cli/pull/1666)).
|
||||
* Fix ability to import the CLI repository as module ([#1671](https://github.com/databricks/cli/pull/1671)).
|
||||
* Fix host resolution order in `auth login` ([#1370](https://github.com/databricks/cli/pull/1370)).
|
||||
* Print text logs in `import-dir` and `export-dir` commands ([#1682](https://github.com/databricks/cli/pull/1682)).
|
||||
|
||||
Bundles:
|
||||
* Expand and upload local wheel libraries for all task types ([#1649](https://github.com/databricks/cli/pull/1649)).
|
||||
* Clarify file format required for the `config-file` flag in `bundle init` ([#1651](https://github.com/databricks/cli/pull/1651)).
|
||||
* Fixed incorrectly cleaning up python wheel dist folder ([#1656](https://github.com/databricks/cli/pull/1656)).
|
||||
* Merge job parameters based on their name ([#1659](https://github.com/databricks/cli/pull/1659)).
|
||||
* Fix glob expansion after running a generic build command ([#1662](https://github.com/databricks/cli/pull/1662)).
|
||||
* Upload local libraries even if they don't have artifact defined ([#1664](https://github.com/databricks/cli/pull/1664)).
|
||||
|
||||
Internal:
|
||||
* Fix python wheel task integration tests ([#1648](https://github.com/databricks/cli/pull/1648)).
|
||||
* Skip pushing Terraform state after destroy ([#1667](https://github.com/databricks/cli/pull/1667)).
|
||||
* Enable Spark JAR task test ([#1658](https://github.com/databricks/cli/pull/1658)).
|
||||
* Run Spark JAR task test on multiple DBR versions ([#1665](https://github.com/databricks/cli/pull/1665)).
|
||||
* Stop tracking file path locations in bundle resources ([#1673](https://github.com/databricks/cli/pull/1673)).
|
||||
* Update VS Code settings to match latest value from IDE plugin ([#1677](https://github.com/databricks/cli/pull/1677)).
|
||||
* Use `service.NamedIdMap` to make lookup generation deterministic ([#1678](https://github.com/databricks/cli/pull/1678)).
|
||||
* [Internal] Remove dependency to the `openapi` package of the Go SDK ([#1676](https://github.com/databricks/cli/pull/1676)).
|
||||
* Upgrade TF provider to 1.50.0 ([#1681](https://github.com/databricks/cli/pull/1681)).
|
||||
* Upgrade Go SDK to 0.44.0 ([#1679](https://github.com/databricks/cli/pull/1679)).
|
||||
|
||||
API Changes:
|
||||
* Changed `databricks account budgets create` command . New request type is .
|
||||
* Changed `databricks account budgets create` command to return .
|
||||
* Changed `databricks account budgets delete` command . New request type is .
|
||||
* Changed `databricks account budgets delete` command to return .
|
||||
* Changed `databricks account budgets get` command . New request type is .
|
||||
* Changed `databricks account budgets get` command to return .
|
||||
* Changed `databricks account budgets list` command to require request of .
|
||||
* Changed `databricks account budgets list` command to return .
|
||||
* Changed `databricks account budgets update` command . New request type is .
|
||||
* Changed `databricks account budgets update` command to return .
|
||||
* Added `databricks account usage-dashboards` command group.
|
||||
* Changed `databricks model-versions get` command to return .
|
||||
* Changed `databricks cluster-policies create` command with new required argument order.
|
||||
* Changed `databricks cluster-policies edit` command with new required argument order.
|
||||
* Added `databricks clusters update` command.
|
||||
* Added `databricks genie` command group.
|
||||
* Changed `databricks permission-migration migrate-permissions` command . New request type is .
|
||||
* Changed `databricks permission-migration migrate-permissions` command to return .
|
||||
* Changed `databricks account workspace-assignment delete` command to return .
|
||||
* Changed `databricks account workspace-assignment update` command with new required argument order.
|
||||
* Changed `databricks account custom-app-integration create` command with new required argument order.
|
||||
* Changed `databricks account custom-app-integration list` command to require request of .
|
||||
* Changed `databricks account published-app-integration list` command to require request of .
|
||||
* Removed `databricks apps` command group.
|
||||
* Added `databricks notification-destinations` command group.
|
||||
* Changed `databricks shares list` command to require request of .
|
||||
* Changed `databricks alerts create` command . New request type is .
|
||||
* Changed `databricks alerts delete` command . New request type is .
|
||||
* Changed `databricks alerts delete` command to return .
|
||||
* Changed `databricks alerts get` command with new required argument order.
|
||||
* Changed `databricks alerts list` command to require request of .
|
||||
* Changed `databricks alerts list` command to return .
|
||||
* Changed `databricks alerts update` command . New request type is .
|
||||
* Changed `databricks alerts update` command to return .
|
||||
* Changed `databricks queries create` command . New request type is .
|
||||
* Changed `databricks queries delete` command . New request type is .
|
||||
* Changed `databricks queries delete` command to return .
|
||||
* Changed `databricks queries get` command with new required argument order.
|
||||
* Changed `databricks queries list` command to return .
|
||||
* Removed `databricks queries restore` command.
|
||||
* Changed `databricks queries update` command . New request type is .
|
||||
* Added `databricks queries list-visualizations` command.
|
||||
* Changed `databricks query-visualizations create` command . New request type is .
|
||||
* Changed `databricks query-visualizations delete` command . New request type is .
|
||||
* Changed `databricks query-visualizations delete` command to return .
|
||||
* Changed `databricks query-visualizations update` command . New request type is .
|
||||
* Changed `databricks statement-execution execute-statement` command to return .
|
||||
* Changed `databricks statement-execution get-statement` command to return .
|
||||
* Added `databricks alerts-legacy` command group.
|
||||
* Added `databricks queries-legacy` command group.
|
||||
* Added `databricks query-visualizations-legacy` command group.
|
||||
|
||||
OpenAPI commit f98c07f9c71f579de65d2587bb0292f83d10e55d (2024-08-12)
|
||||
Dependency updates:
|
||||
* Bump github.com/hashicorp/hc-install from 0.7.0 to 0.8.0 ([#1652](https://github.com/databricks/cli/pull/1652)).
|
||||
* Bump golang.org/x/sync from 0.7.0 to 0.8.0 ([#1655](https://github.com/databricks/cli/pull/1655)).
|
||||
* Bump golang.org/x/mod from 0.19.0 to 0.20.0 ([#1654](https://github.com/databricks/cli/pull/1654)).
|
||||
* Bump golang.org/x/oauth2 from 0.21.0 to 0.22.0 ([#1653](https://github.com/databricks/cli/pull/1653)).
|
||||
* Bump golang.org/x/text from 0.16.0 to 0.17.0 ([#1670](https://github.com/databricks/cli/pull/1670)).
|
||||
* Bump golang.org/x/term from 0.22.0 to 0.23.0 ([#1669](https://github.com/databricks/cli/pull/1669)).
|
||||
|
||||
## 0.225.0
|
||||
|
||||
Bundles:
|
||||
* Add resource for UC schemas to DABs ([#1413](https://github.com/databricks/cli/pull/1413)).
|
||||
|
||||
Internal:
|
||||
* Use dynamic walking to validate unique resource keys ([#1614](https://github.com/databricks/cli/pull/1614)).
|
||||
* Regenerate TF schema ([#1635](https://github.com/databricks/cli/pull/1635)).
|
||||
* Add upgrade and upgrade eager flags to pip install call ([#1636](https://github.com/databricks/cli/pull/1636)).
|
||||
* Added test for negation pattern in sync include exclude section ([#1637](https://github.com/databricks/cli/pull/1637)).
|
||||
* Use precomputed terraform plan for `bundle deploy` ([#1640](https://github.com/databricks/cli/pull/1640)).
|
||||
|
||||
## 0.224.1
|
||||
|
||||
Bundles:
|
||||
* Add UUID function to bundle template functions ([#1612](https://github.com/databricks/cli/pull/1612)).
|
||||
* Upgrade TF provider to 1.49.0 ([#1617](https://github.com/databricks/cli/pull/1617)).
|
||||
* Upgrade TF provider to 1.49.1 ([#1626](https://github.com/databricks/cli/pull/1626)).
|
||||
* Support multiple locations for diagnostics ([#1610](https://github.com/databricks/cli/pull/1610)).
|
||||
* Split artifact cleanup into prepare step before build ([#1618](https://github.com/databricks/cli/pull/1618)).
|
||||
* Move to a single prompt during bundle destroy ([#1583](https://github.com/databricks/cli/pull/1583)).
|
||||
|
||||
Internal:
|
||||
* Add tests for the Workspace API readahead cache ([#1605](https://github.com/databricks/cli/pull/1605)).
|
||||
* Update Python dependencies before install when upgrading a labs project ([#1624](https://github.com/databricks/cli/pull/1624)).
|
||||
|
||||
|
||||
|
||||
## 0.224.0
|
||||
|
||||
CLI:
|
||||
* Do not buffer files in memory when downloading ([#1599](https://github.com/databricks/cli/pull/1599)).
|
||||
|
||||
Bundles:
|
||||
* Allow artifacts (JARs, wheels) to be uploaded to UC Volumes ([#1591](https://github.com/databricks/cli/pull/1591)).
|
||||
* Upgrade TF provider to 1.48.3 ([#1600](https://github.com/databricks/cli/pull/1600)).
|
||||
* Fixed job name normalisation for bundle generate ([#1601](https://github.com/databricks/cli/pull/1601)).
|
||||
|
||||
Internal:
|
||||
* Add UUID to uniquely identify a deployment state ([#1595](https://github.com/databricks/cli/pull/1595)).
|
||||
* Track multiple locations associated with a `dyn.Value` ([#1510](https://github.com/databricks/cli/pull/1510)).
|
||||
* Attribute Terraform API requests the CLI ([#1598](https://github.com/databricks/cli/pull/1598)).
|
||||
* Implement readahead cache for Workspace API calls ([#1582](https://github.com/databricks/cli/pull/1582)).
|
||||
* Add read-only mode for extension aware workspace filer ([#1609](https://github.com/databricks/cli/pull/1609)).
|
||||
|
||||
Dependency updates:
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.43.0 to 0.43.2 ([#1594](https://github.com/databricks/cli/pull/1594)).
|
||||
|
||||
## 0.223.2
|
||||
|
||||
Bundles:
|
||||
* Override complex variables with target overrides instead of merging ([#1567](https://github.com/databricks/cli/pull/1567)).
|
||||
* Rewrite local path for libraries in foreach tasks ([#1569](https://github.com/databricks/cli/pull/1569)).
|
||||
* Change SetVariables mutator to mutate dynamic configuration instead ([#1573](https://github.com/databricks/cli/pull/1573)).
|
||||
* Return early in bundle destroy if no deployment exists ([#1581](https://github.com/databricks/cli/pull/1581)).
|
||||
* Let notebook detection code use underlying metadata if available ([#1574](https://github.com/databricks/cli/pull/1574)).
|
||||
* Remove schema override for variable default value ([#1536](https://github.com/databricks/cli/pull/1536)).
|
||||
* Print diagnostics in 'bundle deploy' ([#1579](https://github.com/databricks/cli/pull/1579)).
|
||||
|
||||
Internal:
|
||||
* Update actions/upload-artifact to v4 ([#1559](https://github.com/databricks/cli/pull/1559)).
|
||||
* Use Go 1.22 to build and test ([#1562](https://github.com/databricks/cli/pull/1562)).
|
||||
* Move bespoke status call to main workspace files filer ([#1570](https://github.com/databricks/cli/pull/1570)).
|
||||
* Add new template ([#1578](https://github.com/databricks/cli/pull/1578)).
|
||||
* Add regression tests for CLI error output ([#1566](https://github.com/databricks/cli/pull/1566)).
|
||||
|
||||
Dependency updates:
|
||||
* Bump golang.org/x/mod from 0.18.0 to 0.19.0 ([#1576](https://github.com/databricks/cli/pull/1576)).
|
||||
* Bump golang.org/x/term from 0.21.0 to 0.22.0 ([#1577](https://github.com/databricks/cli/pull/1577)).
|
||||
|
||||
## 0.223.1
|
||||
|
||||
This bugfix release fixes missing error messages in v0.223.0.
|
||||
|
|
|
@ -1,21 +1,15 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
|
@ -25,7 +19,9 @@ var buildMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactTy
|
|||
config.ArtifactPythonWheel: whl.Build,
|
||||
}
|
||||
|
||||
var uploadMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{}
|
||||
var prepareMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{
|
||||
config.ArtifactPythonWheel: whl.Prepare,
|
||||
}
|
||||
|
||||
func getBuildMutator(t config.ArtifactType, name string) bundle.Mutator {
|
||||
mutatorFactory, ok := buildMutators[t]
|
||||
|
@ -36,10 +32,12 @@ func getBuildMutator(t config.ArtifactType, name string) bundle.Mutator {
|
|||
return mutatorFactory(name)
|
||||
}
|
||||
|
||||
func getUploadMutator(t config.ArtifactType, name string) bundle.Mutator {
|
||||
mutatorFactory, ok := uploadMutators[t]
|
||||
func getPrepareMutator(t config.ArtifactType, name string) bundle.Mutator {
|
||||
mutatorFactory, ok := prepareMutators[t]
|
||||
if !ok {
|
||||
mutatorFactory = BasicUpload
|
||||
mutatorFactory = func(_ string) bundle.Mutator {
|
||||
return mutator.NoOp()
|
||||
}
|
||||
}
|
||||
|
||||
return mutatorFactory(name)
|
||||
|
@ -74,162 +72,3 @@ func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnosti
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Basic Upload defines a general upload mutator which uploads artifact as a library to workspace
|
||||
type basicUpload struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func BasicUpload(name string) bundle.Mutator {
|
||||
return &basicUpload{name: name}
|
||||
}
|
||||
|
||||
func (m *basicUpload) Name() string {
|
||||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
if len(artifact.Files) == 0 {
|
||||
return diag.Errorf("artifact source is not configured: %s", m.name)
|
||||
}
|
||||
|
||||
uploadPath, err := getUploadBasePath(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
client, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = uploadArtifact(ctx, b, artifact, uploadPath, client)
|
||||
if err != nil {
|
||||
return diag.Errorf("upload for %s failed, error: %v", m.name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error {
|
||||
for i := range a.Files {
|
||||
f := &a.Files[i]
|
||||
|
||||
filename := filepath.Base(f.Source)
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename))
|
||||
|
||||
err := uploadArtifactFile(ctx, f.Source, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof(ctx, "Upload succeeded")
|
||||
f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source))
|
||||
|
||||
// TODO: confirm if we still need to update the remote path to start with /Workspace
|
||||
wsfsBase := "/Workspace"
|
||||
remotePath := path.Join(wsfsBase, f.RemotePath)
|
||||
|
||||
for _, job := range b.Config.Resources.Jobs {
|
||||
rewriteArtifactPath(b, f, job, remotePath)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rewriteArtifactPath(b *bundle.Bundle, f *config.ArtifactFile, job *resources.Job, remotePath string) {
|
||||
// Rewrite artifact path in job task libraries
|
||||
for i := range job.Tasks {
|
||||
task := &job.Tasks[i]
|
||||
for j := range task.Libraries {
|
||||
lib := &task.Libraries[j]
|
||||
if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) {
|
||||
lib.Whl = remotePath
|
||||
}
|
||||
if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) {
|
||||
lib.Jar = remotePath
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite artifact path in job task libraries for ForEachTask
|
||||
if task.ForEachTask != nil {
|
||||
forEachTask := task.ForEachTask
|
||||
for j := range forEachTask.Task.Libraries {
|
||||
lib := &forEachTask.Task.Libraries[j]
|
||||
if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) {
|
||||
lib.Whl = remotePath
|
||||
}
|
||||
if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) {
|
||||
lib.Jar = remotePath
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite artifact path in job environments
|
||||
for i := range job.Environments {
|
||||
env := &job.Environments[i]
|
||||
if env.Spec == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := range env.Spec.Dependencies {
|
||||
lib := env.Spec.Dependencies[j]
|
||||
if isArtifactMatchLibrary(f, lib, b) {
|
||||
env.Spec.Dependencies[j] = remotePath
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isArtifactMatchLibrary(f *config.ArtifactFile, libPath string, b *bundle.Bundle) bool {
|
||||
if !filepath.IsAbs(libPath) {
|
||||
libPath = filepath.Join(b.RootPath, libPath)
|
||||
}
|
||||
|
||||
// libPath can be a glob pattern, so do the match first
|
||||
matches, err := filepath.Glob(libPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, m := range matches {
|
||||
if m == f.Source {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Function to upload artifact file to Workspace
|
||||
func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error {
|
||||
raw, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read %s: %w", file, errors.Unwrap(err))
|
||||
}
|
||||
|
||||
filename := filepath.Base(file)
|
||||
err = client.Write(ctx, filename, bytes.NewReader(raw), filer.OverwriteIfExists, filer.CreateParentDirectories)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to import %s: %w", filename, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getUploadBasePath(b *bundle.Bundle) (string, error) {
|
||||
artifactPath := b.Config.Workspace.ArtifactPath
|
||||
if artifactPath == "" {
|
||||
return "", fmt.Errorf("remote artifact path not configured")
|
||||
}
|
||||
|
||||
return path.Join(artifactPath, ".internal"), nil
|
||||
}
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
mockfiler "github.com/databricks/cli/internal/mocks/libs/filer"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestArtifactUpload(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
whlFolder := filepath.Join(tmpDir, "whl")
|
||||
testutil.Touch(t, whlFolder, "source.whl")
|
||||
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/foo/bar/artifacts",
|
||||
},
|
||||
Artifacts: config.Artifacts{
|
||||
"whl": {
|
||||
Type: config.ArtifactPythonWheel,
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: whlLocalPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Environments: []jobs.JobEnvironment{
|
||||
{
|
||||
Spec: &compute.Environment{
|
||||
Dependencies: []string{
|
||||
filepath.Join("whl", "source.whl"),
|
||||
"/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
artifact := b.Config.Artifacts["whl"]
|
||||
mockFiler := mockfiler.NewMockFiler(t)
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source.whl"),
|
||||
mock.AnythingOfType("*bytes.Reader"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil)
|
||||
|
||||
err := uploadArtifact(context.Background(), b, artifact, "/foo/bar/artifacts", mockFiler)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that libraries path is updated
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl)
|
||||
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl)
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0])
|
||||
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl)
|
||||
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl)
|
||||
}
|
|
@ -29,6 +29,5 @@ func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnosti
|
|||
|
||||
return bundle.Apply(ctx, b, bundle.Seq(
|
||||
whl.DetectPackage(),
|
||||
whl.DefineArtifactsFromLibraries(),
|
||||
))
|
||||
}
|
||||
|
|
|
@ -3,10 +3,8 @@ package artifacts
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
|
@ -35,35 +33,7 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
// Check if source paths are absolute, if not, make them absolute
|
||||
for k := range artifact.Files {
|
||||
f := &artifact.Files[k]
|
||||
if !filepath.IsAbs(f.Source) {
|
||||
dirPath := filepath.Dir(artifact.ConfigFilePath)
|
||||
f.Source = filepath.Join(dirPath, f.Source)
|
||||
}
|
||||
}
|
||||
|
||||
// Expand any glob reference in files source path
|
||||
files := make([]config.ArtifactFile, 0, len(artifact.Files))
|
||||
for _, f := range artifact.Files {
|
||||
matches, err := filepath.Glob(f.Source)
|
||||
if err != nil {
|
||||
return diag.Errorf("unable to find files for %s: %v", f.Source, err)
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
return diag.Errorf("no files found for %s", f.Source)
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
files = append(files, config.ArtifactFile{
|
||||
Source: match,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
artifact.Files = files
|
||||
var mutators []bundle.Mutator
|
||||
|
||||
// Skip building if build command is not specified or infered
|
||||
if artifact.BuildCommand == "" {
|
||||
|
@ -72,18 +42,16 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
if len(artifact.Files) == 0 {
|
||||
return diag.Errorf("misconfigured artifact: please specify 'build' or 'files' property")
|
||||
}
|
||||
return nil
|
||||
|
||||
// We can skip calling build mutator if there is no build command
|
||||
// But we still need to expand glob references in files source path.
|
||||
} else {
|
||||
mutators = append(mutators, getBuildMutator(artifact.Type, m.name))
|
||||
}
|
||||
|
||||
// If artifact path is not provided, use bundle root dir
|
||||
if artifact.Path == "" {
|
||||
artifact.Path = b.RootPath
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(artifact.Path) {
|
||||
dirPath := filepath.Dir(artifact.ConfigFilePath)
|
||||
artifact.Path = filepath.Join(dirPath, artifact.Path)
|
||||
}
|
||||
|
||||
return bundle.Apply(ctx, b, getBuildMutator(artifact.Type, m.name))
|
||||
// We need to expand glob reference after build mutator is applied because
|
||||
// if we do it before, any files that are generated by build command will
|
||||
// not be included into artifact.Files and thus will not be uploaded.
|
||||
mutators = append(mutators, &expandGlobs{name: m.name})
|
||||
return bundle.Apply(ctx, b, bundle.Seq(mutators...))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type expandGlobs struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (m *expandGlobs) Name() string {
|
||||
return fmt.Sprintf("artifacts.ExpandGlobs(%s)", m.name)
|
||||
}
|
||||
|
||||
func createGlobError(v dyn.Value, p dyn.Path, message string) diag.Diagnostic {
|
||||
// The pattern contained in v is an absolute path.
|
||||
// Make it relative to the value's location to make it more readable.
|
||||
source := v.MustString()
|
||||
if l := v.Location(); l.File != "" {
|
||||
rel, err := filepath.Rel(filepath.Dir(l.File), source)
|
||||
if err == nil {
|
||||
source = rel
|
||||
}
|
||||
}
|
||||
|
||||
return diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("%s: %s", source, message),
|
||||
Locations: []dyn.Location{v.Location()},
|
||||
|
||||
Paths: []dyn.Path{
|
||||
// Hack to clone the path. This path copy is mutable.
|
||||
// To be addressed in a later PR.
|
||||
p.Append(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *expandGlobs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// Base path for this mutator.
|
||||
// This path is set with the list of expanded globs when done.
|
||||
base := dyn.NewPath(
|
||||
dyn.Key("artifacts"),
|
||||
dyn.Key(m.name),
|
||||
dyn.Key("files"),
|
||||
)
|
||||
|
||||
// Pattern to match the source key in the files sequence.
|
||||
pattern := dyn.NewPatternFromPath(base).Append(
|
||||
dyn.AnyIndex(),
|
||||
dyn.Key("source"),
|
||||
)
|
||||
|
||||
var diags diag.Diagnostics
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
var output []dyn.Value
|
||||
_, err := dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
if v.Kind() != dyn.KindString {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
source := v.MustString()
|
||||
|
||||
// Expand any glob reference in files source path
|
||||
matches, err := filepath.Glob(source)
|
||||
if err != nil {
|
||||
diags = diags.Append(createGlobError(v, p, err.Error()))
|
||||
|
||||
// Continue processing and leave this value unchanged.
|
||||
return v, nil
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
diags = diags.Append(createGlobError(v, p, "no matching files"))
|
||||
|
||||
// Continue processing and leave this value unchanged.
|
||||
return v, nil
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
output = append(output, dyn.V(
|
||||
map[string]dyn.Value{
|
||||
"source": dyn.NewValue(match, v.Locations()),
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
return v, nil
|
||||
})
|
||||
|
||||
if err != nil || diags.HasError() {
|
||||
return v, err
|
||||
}
|
||||
|
||||
// Set the expanded globs back into the configuration.
|
||||
return dyn.SetByPath(v, base, dyn.V(output))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestExpandGlobs_Nominal(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
testutil.Touch(t, tmpDir, "aa1.txt")
|
||||
testutil.Touch(t, tmpDir, "aa2.txt")
|
||||
testutil.Touch(t, tmpDir, "bb.txt")
|
||||
testutil.Touch(t, tmpDir, "bc.txt")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Artifacts: config.Artifacts{
|
||||
"test": {
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: "./aa*.txt"},
|
||||
{Source: "./b[bc].txt"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
// Run prepare first to make paths absolute.
|
||||
&prepare{"test"},
|
||||
&expandGlobs{"test"},
|
||||
))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that the expanded paths are correct.
|
||||
a, ok := b.Config.Artifacts["test"]
|
||||
if !assert.True(t, ok) {
|
||||
return
|
||||
}
|
||||
assert.Len(t, a.Files, 4)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "aa1.txt"), a.Files[0].Source)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "aa2.txt"), a.Files[1].Source)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "bb.txt"), a.Files[2].Source)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "bc.txt"), a.Files[3].Source)
|
||||
}
|
||||
|
||||
func TestExpandGlobs_InvalidPattern(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Artifacts: config.Artifacts{
|
||||
"test": {
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: "a[.txt"},
|
||||
{Source: "./a[.txt"},
|
||||
{Source: "../a[.txt"},
|
||||
{Source: "subdir/a[.txt"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
// Run prepare first to make paths absolute.
|
||||
&prepare{"test"},
|
||||
&expandGlobs{"test"},
|
||||
))
|
||||
|
||||
assert.Len(t, diags, 4)
|
||||
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[0].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[0].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[0].source", diags[0].Paths[0].String())
|
||||
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[1].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[1].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[1].source", diags[1].Paths[0].String())
|
||||
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("../a[.txt")), diags[2].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[2].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[2].source", diags[2].Paths[0].String())
|
||||
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("subdir/a[.txt")), diags[3].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[3].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[3].source", diags[3].Paths[0].String())
|
||||
}
|
||||
|
||||
func TestExpandGlobs_NoMatches(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
testutil.Touch(t, tmpDir, "a1.txt")
|
||||
testutil.Touch(t, tmpDir, "a2.txt")
|
||||
testutil.Touch(t, tmpDir, "b1.txt")
|
||||
testutil.Touch(t, tmpDir, "b2.txt")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Artifacts: config.Artifacts{
|
||||
"test": {
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: "a*.txt"},
|
||||
{Source: "b*.txt"},
|
||||
{Source: "c*.txt"},
|
||||
{Source: "d*.txt"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
// Run prepare first to make paths absolute.
|
||||
&prepare{"test"},
|
||||
&expandGlobs{"test"},
|
||||
))
|
||||
|
||||
assert.Len(t, diags, 2)
|
||||
assert.Equal(t, "c*.txt: no matching files", diags[0].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[0].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[2].source", diags[0].Paths[0].String())
|
||||
assert.Equal(t, "d*.txt: no matching files", diags[1].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[1].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[3].source", diags[1].Paths[0].String())
|
||||
|
||||
// Assert that the original paths are unchanged.
|
||||
a, ok := b.Config.Artifacts["test"]
|
||||
if !assert.True(t, ok) {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Len(t, a.Files, 4)
|
||||
assert.Equal(t, "a*.txt", filepath.Base(a.Files[0].Source))
|
||||
assert.Equal(t, "b*.txt", filepath.Base(a.Files[1].Source))
|
||||
assert.Equal(t, "c*.txt", filepath.Base(a.Files[2].Source))
|
||||
assert.Equal(t, "d*.txt", filepath.Base(a.Files[3].Source))
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
func PrepareAll() bundle.Mutator {
|
||||
return &all{
|
||||
name: "Prepare",
|
||||
fn: prepareArtifactByName,
|
||||
}
|
||||
}
|
||||
|
||||
type prepare struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func prepareArtifactByName(name string) (bundle.Mutator, error) {
|
||||
return &prepare{name}, nil
|
||||
}
|
||||
|
||||
func (m *prepare) Name() string {
|
||||
return fmt.Sprintf("artifacts.Prepare(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
l := b.Config.GetLocation("artifacts." + m.name)
|
||||
dirPath := filepath.Dir(l.File)
|
||||
|
||||
// Check if source paths are absolute, if not, make them absolute
|
||||
for k := range artifact.Files {
|
||||
f := &artifact.Files[k]
|
||||
if !filepath.IsAbs(f.Source) {
|
||||
f.Source = filepath.Join(dirPath, f.Source)
|
||||
}
|
||||
}
|
||||
|
||||
// If artifact path is not provided, use bundle root dir
|
||||
if artifact.Path == "" {
|
||||
artifact.Path = b.RootPath
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(artifact.Path) {
|
||||
artifact.Path = filepath.Join(dirPath, artifact.Path)
|
||||
}
|
||||
|
||||
return bundle.Apply(ctx, b, getPrepareMutator(artifact.Type, m.name))
|
||||
}
|
|
@ -2,49 +2,18 @@ package artifacts
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
func UploadAll() bundle.Mutator {
|
||||
return &all{
|
||||
name: "Upload",
|
||||
fn: uploadArtifactByName,
|
||||
}
|
||||
}
|
||||
|
||||
func CleanUp() bundle.Mutator {
|
||||
return &cleanUp{}
|
||||
}
|
||||
|
||||
type upload struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func uploadArtifactByName(name string) (bundle.Mutator, error) {
|
||||
return &upload{name}, nil
|
||||
}
|
||||
|
||||
func (m *upload) Name() string {
|
||||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
if len(artifact.Files) == 0 {
|
||||
return diag.Errorf("artifact source is not configured: %s", m.name)
|
||||
}
|
||||
|
||||
return bundle.Apply(ctx, b, getUploadMutator(artifact.Type, m.name))
|
||||
}
|
||||
|
||||
type cleanUp struct{}
|
||||
|
||||
func (m *cleanUp) Name() string {
|
||||
|
@ -52,17 +21,23 @@ func (m *cleanUp) Name() string {
|
|||
}
|
||||
|
||||
func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
uploadPath, err := getUploadBasePath(b)
|
||||
uploadPath, err := libraries.GetUploadBasePath(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{
|
||||
Path: uploadPath,
|
||||
Recursive: true,
|
||||
})
|
||||
client, err := libraries.GetFilerForLibraries(b.WorkspaceClient(), uploadPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, uploadPath)
|
||||
// We intentionally ignore the error because it is not critical to the deployment
|
||||
err = client.Delete(ctx, ".", filer.DeleteRecursively)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "failed to delete %s: %v", uploadPath, err)
|
||||
}
|
||||
|
||||
err = client.Mkdir(ctx, ".")
|
||||
if err != nil {
|
||||
return diag.Errorf("unable to create directory for %s: %v", uploadPath, err)
|
||||
}
|
||||
|
|
|
@ -1,109 +0,0 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/testfile"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type noop struct{}
|
||||
|
||||
func (n *noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noop) Name() string {
|
||||
return "noop"
|
||||
}
|
||||
|
||||
func TestExpandGlobFilesSource(t *testing.T) {
|
||||
rootPath := t.TempDir()
|
||||
err := os.Mkdir(filepath.Join(rootPath, "test"), 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
t1 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar1.jar"))
|
||||
t1.Close(t)
|
||||
|
||||
t2 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar2.jar"))
|
||||
t2.Close(t)
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: rootPath,
|
||||
Config: config.Root{
|
||||
Artifacts: map[string]*config.Artifact{
|
||||
"test": {
|
||||
Type: "custom",
|
||||
Files: []config.ArtifactFile{
|
||||
{
|
||||
Source: filepath.Join("..", "test", "*.jar"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml"))
|
||||
|
||||
u := &upload{"test"}
|
||||
uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||
return &noop{}
|
||||
}
|
||||
|
||||
bm := &build{"test"}
|
||||
buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||
return &noop{}
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
require.Equal(t, 2, len(b.Config.Artifacts["test"].Files))
|
||||
require.Equal(t, filepath.Join(rootPath, "test", "myjar1.jar"), b.Config.Artifacts["test"].Files[0].Source)
|
||||
require.Equal(t, filepath.Join(rootPath, "test", "myjar2.jar"), b.Config.Artifacts["test"].Files[1].Source)
|
||||
}
|
||||
|
||||
func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) {
|
||||
rootPath := t.TempDir()
|
||||
err := os.Mkdir(filepath.Join(rootPath, "test"), 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: rootPath,
|
||||
Config: config.Root{
|
||||
Artifacts: map[string]*config.Artifact{
|
||||
"test": {
|
||||
Type: "custom",
|
||||
Files: []config.ArtifactFile{
|
||||
{
|
||||
Source: filepath.Join("..", "test", "myjar.jar"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml"))
|
||||
|
||||
u := &upload{"test"}
|
||||
uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||
return &noop{}
|
||||
}
|
||||
|
||||
bm := &build{"test"}
|
||||
buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||
return &noop{}
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u))
|
||||
require.ErrorContains(t, diags.Error(), "no files found for")
|
||||
}
|
|
@ -27,9 +27,9 @@ func (m *detectPkg) Name() string {
|
|||
}
|
||||
|
||||
func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||
if len(wheelTasks) == 0 {
|
||||
log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect")
|
||||
tasks := libraries.FindTasksWithLocalLibraries(b)
|
||||
if len(tasks) == 0 {
|
||||
log.Infof(ctx, "No local tasks in databricks.yml config, skipping auto detect")
|
||||
return nil
|
||||
}
|
||||
log.Infof(ctx, "Detecting Python wheel project...")
|
||||
|
|
|
@ -3,7 +3,6 @@ package whl
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -36,18 +35,14 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
||||
|
||||
dir := artifact.Path
|
||||
|
||||
distPath := filepath.Join(dir, "dist")
|
||||
os.RemoveAll(distPath)
|
||||
python.CleanupWheelFolder(dir)
|
||||
|
||||
out, err := artifact.Build(ctx)
|
||||
if err != nil {
|
||||
return diag.Errorf("build failed %s, error: %v, output: %s", m.name, err, out)
|
||||
}
|
||||
log.Infof(ctx, "Build succeeded")
|
||||
|
||||
dir := artifact.Path
|
||||
distPath := filepath.Join(artifact.Path, "dist")
|
||||
wheels := python.FindFilesWithSuffixInPath(distPath, ".whl")
|
||||
if len(wheels) == 0 {
|
||||
return diag.Errorf("cannot find built wheel in %s for package %s", dir, m.name)
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
package whl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
type fromLibraries struct{}
|
||||
|
||||
func DefineArtifactsFromLibraries() bundle.Mutator {
|
||||
return &fromLibraries{}
|
||||
}
|
||||
|
||||
func (m *fromLibraries) Name() string {
|
||||
return "artifacts.whl.DefineArtifactsFromLibraries"
|
||||
}
|
||||
|
||||
func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if len(b.Config.Artifacts) != 0 {
|
||||
log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined")
|
||||
return nil
|
||||
}
|
||||
|
||||
tasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||
for _, task := range tasks {
|
||||
for _, lib := range task.Libraries {
|
||||
matchAndAdd(ctx, lib.Whl, b)
|
||||
}
|
||||
}
|
||||
|
||||
envs := libraries.FindAllEnvironments(b)
|
||||
for _, jobEnvs := range envs {
|
||||
for _, env := range jobEnvs {
|
||||
if env.Spec != nil {
|
||||
for _, dep := range env.Spec.Dependencies {
|
||||
if libraries.IsEnvironmentDependencyLocal(dep) {
|
||||
matchAndAdd(ctx, dep, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func matchAndAdd(ctx context.Context, lib string, b *bundle.Bundle) {
|
||||
matches, err := filepath.Glob(filepath.Join(b.RootPath, lib))
|
||||
// File referenced from libraries section does not exists, skipping
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
name := filepath.Base(match)
|
||||
if b.Config.Artifacts == nil {
|
||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||
}
|
||||
|
||||
log.Debugf(ctx, "Adding an artifact block for %s", match)
|
||||
b.Config.Artifacts[name] = &config.Artifact{
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: match},
|
||||
},
|
||||
Type: config.ArtifactPythonWheel,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -15,6 +15,8 @@ type infer struct {
|
|||
|
||||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
artifact := b.Config.Artifacts[m.name]
|
||||
|
||||
// TODO use python.DetectVEnvExecutable once bundle has a way to specify venv path
|
||||
py, err := python.DetectExecutable(ctx)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package whl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/python"
|
||||
)
|
||||
|
||||
type prepare struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func Prepare(name string) bundle.Mutator {
|
||||
return &prepare{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *prepare) Name() string {
|
||||
return fmt.Sprintf("artifacts.whl.Prepare(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
// If there is no build command for the artifact, we don't need to cleanup the dist folder before
|
||||
if artifact.BuildCommand == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
dir := artifact.Path
|
||||
|
||||
distPath := filepath.Join(dir, "dist")
|
||||
|
||||
// If we have multiple artifacts con figured, prepare will be called multiple times
|
||||
// The first time we will remove the folders, other times will be no-op.
|
||||
err := os.RemoveAll(distPath)
|
||||
if err != nil {
|
||||
log.Infof(ctx, "Failed to remove dist folder: %v", err)
|
||||
}
|
||||
python.CleanupWheelFolder(dir)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -39,6 +39,14 @@ type Bundle struct {
|
|||
// Exclusively use this field for filesystem operations.
|
||||
BundleRoot vfs.Path
|
||||
|
||||
// SyncRoot is a virtual filesystem path to the root directory of the files that are synchronized to the workspace.
|
||||
// It can be an ancestor to [BundleRoot], but not a descendant; that is, [SyncRoot] must contain [BundleRoot].
|
||||
SyncRoot vfs.Path
|
||||
|
||||
// SyncRootPath is the local path to the root directory of files that are synchronized to the workspace.
|
||||
// It is equal to `SyncRoot.Native()` and included as dedicated field for convenient access.
|
||||
SyncRootPath string
|
||||
|
||||
Config config.Root
|
||||
|
||||
// Metadata about the bundle deployment. This is the interface Databricks services
|
||||
|
|
|
@ -28,6 +28,10 @@ func (r ReadOnlyBundle) BundleRoot() vfs.Path {
|
|||
return r.b.BundleRoot
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) SyncRoot() vfs.Path {
|
||||
return r.b.SyncRoot
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||
return r.b.WorkspaceClient()
|
||||
}
|
||||
|
|
|
@ -4,18 +4,11 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/exec"
|
||||
)
|
||||
|
||||
type Artifacts map[string]*Artifact
|
||||
|
||||
func (artifacts Artifacts) ConfigureConfigFilePath() {
|
||||
for _, artifact := range artifacts {
|
||||
artifact.ConfigureConfigFilePath()
|
||||
}
|
||||
}
|
||||
|
||||
type ArtifactType string
|
||||
|
||||
const ArtifactPythonWheel ArtifactType = `whl`
|
||||
|
@ -40,8 +33,6 @@ type Artifact struct {
|
|||
BuildCommand string `json:"build,omitempty"`
|
||||
|
||||
Executable exec.ExecutableType `json:"executable,omitempty"`
|
||||
|
||||
paths.Paths
|
||||
}
|
||||
|
||||
func (a *Artifact) Build(ctx context.Context) ([]byte, error) {
|
||||
|
|
|
@ -36,9 +36,15 @@ type PyDABs struct {
|
|||
|
||||
// VEnvPath is path to the virtual environment.
|
||||
//
|
||||
// Required if PyDABs is enabled. PyDABs will load the code in the specified
|
||||
// environment.
|
||||
// If enabled, PyDABs will execute code within this environment. If disabled,
|
||||
// it defaults to using the Python interpreter available in the current shell.
|
||||
VEnvPath string `json:"venv_path,omitempty"`
|
||||
|
||||
// Import contains a list Python packages with PyDABs code.
|
||||
//
|
||||
// These packages are imported to discover resources, resource generators, and mutators.
|
||||
// This list can include namespace packages, which causes the import of nested packages.
|
||||
Import []string `json:"import,omitempty"`
|
||||
}
|
||||
|
||||
type Command string
|
||||
|
|
|
@ -22,7 +22,7 @@ func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) {
|
|||
tasks = append(tasks, v)
|
||||
}
|
||||
// We're using location lines to define the order of keys in exported YAML.
|
||||
value["tasks"] = dyn.NewValue(tasks, dyn.Location{Line: jobOrder.Get("tasks")})
|
||||
value["tasks"] = dyn.NewValue(tasks, []dyn.Location{{Line: jobOrder.Get("tasks")}})
|
||||
}
|
||||
|
||||
return yamlsaver.ConvertToMapValue(job.Settings, jobOrder, []string{"format", "new_cluster", "existing_cluster_id"}, value)
|
||||
|
|
|
@ -0,0 +1,208 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/textutil"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
)
|
||||
|
||||
type applyPresets struct{}
|
||||
|
||||
// Apply all presets, e.g. the prefix presets that
|
||||
// adds a prefix to all names of all resources.
|
||||
func ApplyPresets() *applyPresets {
|
||||
return &applyPresets{}
|
||||
}
|
||||
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (m *applyPresets) Name() string {
|
||||
return "ApplyPresets"
|
||||
}
|
||||
|
||||
func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if d := validatePauseStatus(b); d != nil {
|
||||
return d
|
||||
}
|
||||
|
||||
r := b.Config.Resources
|
||||
t := b.Config.Presets
|
||||
prefix := t.NamePrefix
|
||||
tags := toTagArray(t.Tags)
|
||||
|
||||
// Jobs presets: Prefix, Tags, JobsMaxConcurrentRuns, TriggerPauseStatus
|
||||
for _, j := range r.Jobs {
|
||||
j.Name = prefix + j.Name
|
||||
if j.Tags == nil {
|
||||
j.Tags = make(map[string]string)
|
||||
}
|
||||
for _, tag := range tags {
|
||||
if j.Tags[tag.Key] == "" {
|
||||
j.Tags[tag.Key] = tag.Value
|
||||
}
|
||||
}
|
||||
if j.MaxConcurrentRuns == 0 {
|
||||
j.MaxConcurrentRuns = t.JobsMaxConcurrentRuns
|
||||
}
|
||||
if t.TriggerPauseStatus != "" {
|
||||
paused := jobs.PauseStatusPaused
|
||||
if t.TriggerPauseStatus == config.Unpaused {
|
||||
paused = jobs.PauseStatusUnpaused
|
||||
}
|
||||
|
||||
if j.Schedule != nil && j.Schedule.PauseStatus == "" {
|
||||
j.Schedule.PauseStatus = paused
|
||||
}
|
||||
if j.Continuous != nil && j.Continuous.PauseStatus == "" {
|
||||
j.Continuous.PauseStatus = paused
|
||||
}
|
||||
if j.Trigger != nil && j.Trigger.PauseStatus == "" {
|
||||
j.Trigger.PauseStatus = paused
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pipelines presets: Prefix, PipelinesDevelopment
|
||||
for i := range r.Pipelines {
|
||||
r.Pipelines[i].Name = prefix + r.Pipelines[i].Name
|
||||
if config.IsExplicitlyEnabled(t.PipelinesDevelopment) {
|
||||
r.Pipelines[i].Development = true
|
||||
}
|
||||
if t.TriggerPauseStatus == config.Paused {
|
||||
r.Pipelines[i].Continuous = false
|
||||
}
|
||||
|
||||
// As of 2024-06, pipelines don't yet support tags
|
||||
}
|
||||
|
||||
// Models presets: Prefix, Tags
|
||||
for _, m := range r.Models {
|
||||
m.Name = prefix + m.Name
|
||||
for _, t := range tags {
|
||||
exists := slices.ContainsFunc(m.Tags, func(modelTag ml.ModelTag) bool {
|
||||
return modelTag.Key == t.Key
|
||||
})
|
||||
if !exists {
|
||||
// Only add this tag if the resource didn't include any tag that overrides its value.
|
||||
m.Tags = append(m.Tags, ml.ModelTag{Key: t.Key, Value: t.Value})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Experiments presets: Prefix, Tags
|
||||
for _, e := range r.Experiments {
|
||||
filepath := e.Name
|
||||
dir := path.Dir(filepath)
|
||||
base := path.Base(filepath)
|
||||
if dir == "." {
|
||||
e.Name = prefix + base
|
||||
} else {
|
||||
e.Name = dir + "/" + prefix + base
|
||||
}
|
||||
for _, t := range tags {
|
||||
exists := false
|
||||
for _, experimentTag := range e.Tags {
|
||||
if experimentTag.Key == t.Key {
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
e.Tags = append(e.Tags, ml.ExperimentTag{Key: t.Key, Value: t.Value})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Model serving endpoint presets: Prefix
|
||||
for i := range r.ModelServingEndpoints {
|
||||
r.ModelServingEndpoints[i].Name = normalizePrefix(prefix) + r.ModelServingEndpoints[i].Name
|
||||
|
||||
// As of 2024-06, model serving endpoints don't yet support tags
|
||||
}
|
||||
|
||||
// Registered models presets: Prefix
|
||||
for i := range r.RegisteredModels {
|
||||
r.RegisteredModels[i].Name = normalizePrefix(prefix) + r.RegisteredModels[i].Name
|
||||
|
||||
// As of 2024-06, registered models don't yet support tags
|
||||
}
|
||||
|
||||
// Quality monitors presets: Prefix
|
||||
if t.TriggerPauseStatus == config.Paused {
|
||||
for i := range r.QualityMonitors {
|
||||
// Remove all schedules from monitors, since they don't support pausing/unpausing.
|
||||
// Quality monitors might support the "pause" property in the future, so at the
|
||||
// CLI level we do respect that property if it is set to "unpaused."
|
||||
if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||
r.QualityMonitors[i].Schedule = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Schemas: Prefix
|
||||
for i := range r.Schemas {
|
||||
r.Schemas[i].Name = normalizePrefix(prefix) + r.Schemas[i].Name
|
||||
// HTTP API for schemas doesn't yet support tags. It's only supported in
|
||||
// the Databricks UI and via the SQL API.
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validatePauseStatus(b *bundle.Bundle) diag.Diagnostics {
|
||||
p := b.Config.Presets.TriggerPauseStatus
|
||||
if p == "" || p == config.Paused || p == config.Unpaused {
|
||||
return nil
|
||||
}
|
||||
return diag.Diagnostics{{
|
||||
Summary: "Invalid value for trigger_pause_status, should be PAUSED or UNPAUSED",
|
||||
Severity: diag.Error,
|
||||
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
||||
}}
|
||||
}
|
||||
|
||||
// toTagArray converts a map of tags to an array of tags.
|
||||
// We sort tags so ensure stable ordering.
|
||||
func toTagArray(tags map[string]string) []Tag {
|
||||
var tagArray []Tag
|
||||
if tags == nil {
|
||||
return tagArray
|
||||
}
|
||||
for key, value := range tags {
|
||||
tagArray = append(tagArray, Tag{Key: key, Value: value})
|
||||
}
|
||||
sort.Slice(tagArray, func(i, j int) bool {
|
||||
return tagArray[i].Key < tagArray[j].Key
|
||||
})
|
||||
return tagArray
|
||||
}
|
||||
|
||||
// normalizePrefix prefixes strings like '[dev lennart] ' to 'dev_lennart_'.
|
||||
// We leave unicode letters and numbers but remove all "special characters."
|
||||
func normalizePrefix(prefix string) string {
|
||||
prefix = strings.ReplaceAll(prefix, "[", "")
|
||||
prefix = strings.Trim(prefix, " ")
|
||||
|
||||
// If the prefix ends with a ']', we add an underscore to the end.
|
||||
// This makes sure that we get names like "dev_user_endpoint" instead of "dev_userendpoint"
|
||||
suffix := ""
|
||||
if strings.HasSuffix(prefix, "]") {
|
||||
suffix = "_"
|
||||
}
|
||||
|
||||
return textutil.NormalizeString(prefix) + suffix
|
||||
}
|
|
@ -0,0 +1,253 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestApplyPresetsPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefix string
|
||||
job *resources.Job
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "add prefix to job",
|
||||
prefix: "prefix-",
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
},
|
||||
},
|
||||
want: "prefix-job1",
|
||||
},
|
||||
{
|
||||
name: "add empty prefix to job",
|
||||
prefix: "",
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
},
|
||||
},
|
||||
want: "job1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": tt.job,
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
NamePrefix: tt.prefix,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.want, b.Config.Resources.Jobs["job1"].Name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsPrefixForUcSchema(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefix string
|
||||
schema *resources.Schema
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "add prefix to schema",
|
||||
prefix: "[prefix]",
|
||||
schema: &resources.Schema{
|
||||
CreateSchema: &catalog.CreateSchema{
|
||||
Name: "schema1",
|
||||
},
|
||||
},
|
||||
want: "prefix_schema1",
|
||||
},
|
||||
{
|
||||
name: "add empty prefix to schema",
|
||||
prefix: "",
|
||||
schema: &resources.Schema{
|
||||
CreateSchema: &catalog.CreateSchema{
|
||||
Name: "schema1",
|
||||
},
|
||||
},
|
||||
want: "schema1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Schemas: map[string]*resources.Schema{
|
||||
"schema1": tt.schema,
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
NamePrefix: tt.prefix,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.want, b.Config.Resources.Schemas["schema1"].Name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsTags(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
job *resources.Job
|
||||
want map[string]string
|
||||
}{
|
||||
{
|
||||
name: "add tags to job",
|
||||
tags: map[string]string{"env": "dev"},
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
Tags: nil,
|
||||
},
|
||||
},
|
||||
want: map[string]string{"env": "dev"},
|
||||
},
|
||||
{
|
||||
name: "merge tags with existing job tags",
|
||||
tags: map[string]string{"env": "dev"},
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
Tags: map[string]string{"team": "data"},
|
||||
},
|
||||
},
|
||||
want: map[string]string{"env": "dev", "team": "data"},
|
||||
},
|
||||
{
|
||||
name: "don't override existing job tags",
|
||||
tags: map[string]string{"env": "dev"},
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
Tags: map[string]string{"env": "prod"},
|
||||
},
|
||||
},
|
||||
want: map[string]string{"env": "prod"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": tt.job,
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
Tags: tt.tags,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
tags := b.Config.Resources.Jobs["job1"].Tags
|
||||
require.Equal(t, tt.want, tags)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsJobsMaxConcurrentRuns(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
job *resources.Job
|
||||
setting int
|
||||
want int
|
||||
}{
|
||||
{
|
||||
name: "set max concurrent runs",
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
MaxConcurrentRuns: 0,
|
||||
},
|
||||
},
|
||||
setting: 5,
|
||||
want: 5,
|
||||
},
|
||||
{
|
||||
name: "do not override existing max concurrent runs",
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
MaxConcurrentRuns: 3,
|
||||
},
|
||||
},
|
||||
setting: 5,
|
||||
want: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": tt.job,
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
JobsMaxConcurrentRuns: tt.setting,
|
||||
},
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.want, b.Config.Resources.Jobs["job1"].MaxConcurrentRuns)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -24,7 +24,7 @@ func (m *configureWSFS) Name() string {
|
|||
}
|
||||
|
||||
func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
root := b.BundleRoot.Native()
|
||||
root := b.SyncRoot.Native()
|
||||
|
||||
// The bundle root must be located in /Workspace/
|
||||
if !strings.HasPrefix(root, "/Workspace/") {
|
||||
|
@ -39,12 +39,12 @@ func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
// If so, swap out vfs.Path instance of the sync root with one that
|
||||
// makes all Workspace File System interactions extension aware.
|
||||
p, err := vfs.NewFilerPath(ctx, root, func(path string) (filer.Filer, error) {
|
||||
return filer.NewWorkspaceFilesExtensionsClient(b.WorkspaceClient(), path)
|
||||
return filer.NewReadOnlyWorkspaceFilesExtensionsClient(b.WorkspaceClient(), path)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
b.BundleRoot = p
|
||||
b.SyncRoot = p
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func (m *expandPipelineGlobPaths) expandLibrary(v dyn.Value) ([]dyn.Value, error
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nv, err := dyn.SetByPath(v, p, dyn.NewValue(m, pv.Location()))
|
||||
nv, err := dyn.SetByPath(v, p, dyn.NewValue(m, pv.Locations()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ func (m *expandPipelineGlobPaths) expandSequence(p dyn.Path, v dyn.Value) (dyn.V
|
|||
vs = append(vs, v...)
|
||||
}
|
||||
|
||||
return dyn.NewValue(vs, v.Location()), nil
|
||||
return dyn.NewValue(vs, v.Locations()), nil
|
||||
}
|
||||
|
||||
func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
)
|
||||
|
||||
type ifMutator struct {
|
||||
condition func(*bundle.Bundle) bool
|
||||
onTrueMutator bundle.Mutator
|
||||
onFalseMutator bundle.Mutator
|
||||
}
|
||||
|
||||
func If(
|
||||
condition func(*bundle.Bundle) bool,
|
||||
onTrueMutator bundle.Mutator,
|
||||
onFalseMutator bundle.Mutator,
|
||||
) bundle.Mutator {
|
||||
return &ifMutator{
|
||||
condition, onTrueMutator, onFalseMutator,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if m.condition(b) {
|
||||
return bundle.Apply(ctx, b, m.onTrueMutator)
|
||||
} else {
|
||||
return bundle.Apply(ctx, b, m.onFalseMutator)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ifMutator) Name() string {
|
||||
return "If"
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/merge"
|
||||
)
|
||||
|
||||
type mergeJobParameters struct{}
|
||||
|
||||
func MergeJobParameters() bundle.Mutator {
|
||||
return &mergeJobParameters{}
|
||||
}
|
||||
|
||||
func (m *mergeJobParameters) Name() string {
|
||||
return "MergeJobParameters"
|
||||
}
|
||||
|
||||
func (m *mergeJobParameters) parameterNameString(v dyn.Value) string {
|
||||
switch v.Kind() {
|
||||
case dyn.KindInvalid, dyn.KindNil:
|
||||
return ""
|
||||
case dyn.KindString:
|
||||
return v.MustString()
|
||||
default:
|
||||
panic("task key must be a string")
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mergeJobParameters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
if v.Kind() == dyn.KindNil {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return dyn.Map(v, "resources.jobs", dyn.Foreach(func(_ dyn.Path, job dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Map(job, "parameters", merge.ElementsByKey("name", m.parameterNameString))
|
||||
}))
|
||||
})
|
||||
|
||||
return diag.FromErr(err)
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMergeJobParameters(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Parameters: []jobs.JobParameterDefinition{
|
||||
{
|
||||
Name: "foo",
|
||||
Default: "v1",
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Default: "v1",
|
||||
},
|
||||
{
|
||||
Name: "foo",
|
||||
Default: "v2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.MergeJobParameters())
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
j := b.Config.Resources.Jobs["foo"]
|
||||
|
||||
assert.Len(t, j.Parameters, 2)
|
||||
assert.Equal(t, "foo", j.Parameters[0].Name)
|
||||
assert.Equal(t, "v2", j.Parameters[0].Default)
|
||||
assert.Equal(t, "bar", j.Parameters[1].Name)
|
||||
assert.Equal(t, "v1", j.Parameters[1].Default)
|
||||
}
|
||||
|
||||
func TestMergeJobParametersWithNilKey(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Parameters: []jobs.JobParameterDefinition{
|
||||
{
|
||||
Default: "v1",
|
||||
},
|
||||
{
|
||||
Default: "v2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.MergeJobParameters())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Len(t, b.Config.Resources.Jobs["foo"].Parameters, 1)
|
||||
}
|
|
@ -5,6 +5,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/loader"
|
||||
pythonmutator "github.com/databricks/cli/bundle/config/mutator/python"
|
||||
"github.com/databricks/cli/bundle/config/validate"
|
||||
"github.com/databricks/cli/bundle/scripts"
|
||||
)
|
||||
|
||||
|
@ -26,5 +27,9 @@ func DefaultMutators() []bundle.Mutator {
|
|||
DefineDefaultTarget(),
|
||||
LoadGitDetails(),
|
||||
pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoad),
|
||||
|
||||
// Note: This mutator must run before the target overrides are merged.
|
||||
// See the mutator for more details.
|
||||
validate.UniqueResourceKeys(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,17 +2,14 @@ package mutator
|
|||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/auth"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
)
|
||||
|
||||
type processTargetMode struct{}
|
||||
|
@ -30,95 +27,75 @@ func (m *processTargetMode) Name() string {
|
|||
// Mark all resources as being for 'development' purposes, i.e.
|
||||
// changing their their name, adding tags, and (in the future)
|
||||
// marking them as 'hidden' in the UI.
|
||||
func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) {
|
||||
if !b.Config.Bundle.Deployment.Lock.IsExplicitlyEnabled() {
|
||||
log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true")
|
||||
disabled := false
|
||||
b.Config.Bundle.Deployment.Lock.Enabled = &disabled
|
||||
}
|
||||
|
||||
r := b.Config.Resources
|
||||
t := &b.Config.Presets
|
||||
shortName := b.Config.Workspace.CurrentUser.ShortName
|
||||
prefix := "[dev " + shortName + "] "
|
||||
|
||||
// Generate a normalized version of the short name that can be used as a tag value.
|
||||
tagValue := b.Tagging.NormalizeValue(shortName)
|
||||
|
||||
for i := range r.Jobs {
|
||||
r.Jobs[i].Name = prefix + r.Jobs[i].Name
|
||||
if r.Jobs[i].Tags == nil {
|
||||
r.Jobs[i].Tags = make(map[string]string)
|
||||
}
|
||||
r.Jobs[i].Tags["dev"] = tagValue
|
||||
if r.Jobs[i].MaxConcurrentRuns == 0 {
|
||||
r.Jobs[i].MaxConcurrentRuns = developmentConcurrentRuns
|
||||
}
|
||||
|
||||
// Pause each job. As an exception, we don't pause jobs that are explicitly
|
||||
// marked as "unpaused". This allows users to override the default behavior
|
||||
// of the development mode.
|
||||
if r.Jobs[i].Schedule != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused {
|
||||
r.Jobs[i].Schedule.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if r.Jobs[i].Continuous != nil && r.Jobs[i].Continuous.PauseStatus != jobs.PauseStatusUnpaused {
|
||||
r.Jobs[i].Continuous.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if r.Jobs[i].Trigger != nil && r.Jobs[i].Trigger.PauseStatus != jobs.PauseStatusUnpaused {
|
||||
r.Jobs[i].Trigger.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if t.NamePrefix == "" {
|
||||
t.NamePrefix = "[dev " + shortName + "] "
|
||||
}
|
||||
|
||||
for i := range r.Pipelines {
|
||||
r.Pipelines[i].Name = prefix + r.Pipelines[i].Name
|
||||
r.Pipelines[i].Development = true
|
||||
// (pipelines don't yet support tags)
|
||||
if t.Tags == nil {
|
||||
t.Tags = map[string]string{}
|
||||
}
|
||||
_, exists := t.Tags["dev"]
|
||||
if !exists {
|
||||
t.Tags["dev"] = b.Tagging.NormalizeValue(shortName)
|
||||
}
|
||||
|
||||
for i := range r.Models {
|
||||
r.Models[i].Name = prefix + r.Models[i].Name
|
||||
r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: tagValue})
|
||||
if t.JobsMaxConcurrentRuns == 0 {
|
||||
t.JobsMaxConcurrentRuns = developmentConcurrentRuns
|
||||
}
|
||||
|
||||
for i := range r.Experiments {
|
||||
filepath := r.Experiments[i].Name
|
||||
dir := path.Dir(filepath)
|
||||
base := path.Base(filepath)
|
||||
if dir == "." {
|
||||
r.Experiments[i].Name = prefix + base
|
||||
} else {
|
||||
r.Experiments[i].Name = dir + "/" + prefix + base
|
||||
}
|
||||
r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: tagValue})
|
||||
if t.TriggerPauseStatus == "" {
|
||||
t.TriggerPauseStatus = config.Paused
|
||||
}
|
||||
|
||||
for i := range r.ModelServingEndpoints {
|
||||
prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_"
|
||||
r.ModelServingEndpoints[i].Name = prefix + r.ModelServingEndpoints[i].Name
|
||||
// (model serving doesn't yet support tags)
|
||||
if !config.IsExplicitlyDisabled(t.PipelinesDevelopment) {
|
||||
enabled := true
|
||||
t.PipelinesDevelopment = &enabled
|
||||
}
|
||||
|
||||
for i := range r.RegisteredModels {
|
||||
prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_"
|
||||
r.RegisteredModels[i].Name = prefix + r.RegisteredModels[i].Name
|
||||
// (registered models in Unity Catalog don't yet support tags)
|
||||
}
|
||||
|
||||
for i := range r.QualityMonitors {
|
||||
// Remove all schedules from monitors, since they don't support pausing/unpausing.
|
||||
// Quality monitors might support the "pause" property in the future, so at the
|
||||
// CLI level we do respect that property if it is set to "unpaused".
|
||||
if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||
r.QualityMonitors[i].Schedule = nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||
p := b.Config.Presets
|
||||
u := b.Config.Workspace.CurrentUser
|
||||
|
||||
// Make sure presets don't set the trigger status to UNPAUSED;
|
||||
// this could be surprising since most users (and tools) expect triggers
|
||||
// to be paused in development.
|
||||
// (Note that there still is an exceptional case where users set the trigger
|
||||
// status to UNPAUSED at the level of an individual object, whic hwas
|
||||
// historically allowed.)
|
||||
if p.TriggerPauseStatus == config.Unpaused {
|
||||
return diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
||||
}}
|
||||
}
|
||||
|
||||
// Make sure this development copy has unique names and paths to avoid conflicts
|
||||
if path := findNonUserPath(b); path != "" {
|
||||
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
||||
}
|
||||
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
|
||||
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
|
||||
// For this reason we require the name prefix to contain the current username;
|
||||
// it's a pitfall for users if they don't include it and later find out that
|
||||
// only a single user can do development deployments.
|
||||
return diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
|
||||
}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -175,10 +152,11 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di
|
|||
switch b.Config.Bundle.Mode {
|
||||
case config.Development:
|
||||
diags := validateDevelopmentMode(b)
|
||||
if diags != nil {
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
return transformDevelopmentMode(ctx, b)
|
||||
transformDevelopmentMode(ctx, b)
|
||||
return diags
|
||||
case config.Production:
|
||||
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
|
||||
return validateProductionMode(ctx, b, isPrincipal)
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/tags"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
|
@ -51,6 +52,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
Schedule: &jobs.CronSchedule{
|
||||
QuartzCronExpression: "* * * * *",
|
||||
},
|
||||
Tags: map[string]string{"existing": "tag"},
|
||||
},
|
||||
},
|
||||
"job2": {
|
||||
|
@ -82,7 +84,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}},
|
||||
"pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1", Continuous: true}},
|
||||
},
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"experiment1": {Experiment: &ml.Experiment{Name: "/Users/lennart.kats@databricks.com/experiment1"}},
|
||||
|
@ -114,6 +116,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
},
|
||||
},
|
||||
},
|
||||
Schemas: map[string]*resources.Schema{
|
||||
"schema1": {CreateSchema: &catalog.CreateSchema{Name: "schema1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Use AWS implementation for testing.
|
||||
|
@ -126,12 +131,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
func TestProcessTargetModeDevelopment(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
|
||||
m := ProcessTargetMode()
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Job 1
|
||||
assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name)
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["existing"], "tag")
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["dev"], "lennart")
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job1"].Schedule.PauseStatus, jobs.PauseStatusPaused)
|
||||
|
||||
|
@ -142,6 +148,7 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
|||
|
||||
// Pipeline 1
|
||||
assert.Equal(t, "[dev lennart] pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||
assert.Equal(t, false, b.Config.Resources.Pipelines["pipeline1"].Continuous)
|
||||
assert.True(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
|
||||
// Experiment 1
|
||||
|
@ -167,6 +174,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
|||
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||
assert.Nil(t, b.Config.Resources.QualityMonitors["qualityMonitor2"].Schedule)
|
||||
assert.Equal(t, catalog.MonitorCronSchedulePauseStatusUnpaused, b.Config.Resources.QualityMonitors["qualityMonitor3"].Schedule.PauseStatus)
|
||||
|
||||
// Schema 1
|
||||
assert.Equal(t, "dev_lennart_schema1", b.Config.Resources.Schemas["schema1"].Name)
|
||||
}
|
||||
|
||||
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
||||
|
@ -176,7 +186,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
|||
})
|
||||
|
||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that tag normalization took place.
|
||||
|
@ -190,7 +201,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) {
|
|||
})
|
||||
|
||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that tag normalization took place (Azure allows more characters than AWS).
|
||||
|
@ -204,17 +216,53 @@ func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) {
|
|||
})
|
||||
|
||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that tag normalization took place.
|
||||
assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||
}
|
||||
|
||||
func TestValidateDevelopmentMode(t *testing.T) {
|
||||
// Test with a valid development mode bundle
|
||||
b := mockBundle(config.Development)
|
||||
diags := validateDevelopmentMode(b)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Test with a bundle that has a non-user path
|
||||
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.ErrorContains(t, diags.Error(), "root_path")
|
||||
|
||||
// Test with a bundle that has an unpaused trigger pause status
|
||||
b = mockBundle(config.Development)
|
||||
b.Config.Presets.TriggerPauseStatus = config.Unpaused
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.ErrorContains(t, diags.Error(), "UNPAUSED")
|
||||
|
||||
// Test with a bundle that has a prefix not containing the username or short name
|
||||
b = mockBundle(config.Development)
|
||||
b.Config.Presets.NamePrefix = "[prod]"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, diag.Error, diags[0].Severity)
|
||||
assert.Contains(t, diags[0].Summary, "")
|
||||
|
||||
// Test with a bundle that has valid user paths
|
||||
b = mockBundle(config.Development)
|
||||
b.Config.Workspace.RootPath = "/Users/lennart@company.com/.bundle/x/y/state"
|
||||
b.Config.Workspace.StatePath = "/Users/lennart@company.com/.bundle/x/y/state"
|
||||
b.Config.Workspace.FilePath = "/Users/lennart@company.com/.bundle/x/y/files"
|
||||
b.Config.Workspace.ArtifactPath = "/Users/lennart@company.com/.bundle/x/y/artifacts"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
||||
|
||||
func TestProcessTargetModeDefault(t *testing.T) {
|
||||
b := mockBundle("")
|
||||
|
||||
m := ProcessTargetMode()
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
||||
|
@ -300,7 +348,7 @@ func TestAllResourcesMocked(t *testing.T) {
|
|||
func TestAllResourcesRenamed(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
|
||||
m := ProcessTargetMode()
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
|
@ -330,8 +378,7 @@ func TestDisableLocking(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
b := mockBundle(config.Development)
|
||||
|
||||
err := bundle.Apply(ctx, b, ProcessTargetMode())
|
||||
require.Nil(t, err)
|
||||
transformDevelopmentMode(ctx, b)
|
||||
assert.False(t, b.Config.Bundle.Deployment.Lock.IsEnabled())
|
||||
}
|
||||
|
||||
|
@ -341,7 +388,97 @@ func TestDisableLockingDisabled(t *testing.T) {
|
|||
explicitlyEnabled := true
|
||||
b.Config.Bundle.Deployment.Lock.Enabled = &explicitlyEnabled
|
||||
|
||||
err := bundle.Apply(ctx, b, ProcessTargetMode())
|
||||
require.Nil(t, err)
|
||||
transformDevelopmentMode(ctx, b)
|
||||
assert.True(t, b.Config.Bundle.Deployment.Lock.IsEnabled(), "Deployment lock should remain enabled in development mode when explicitly enabled")
|
||||
}
|
||||
|
||||
func TestPrefixAlreadySet(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.NamePrefix = "custom_lennart_deploy_"
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "custom_lennart_deploy_job1", b.Config.Resources.Jobs["job1"].Name)
|
||||
}
|
||||
|
||||
func TestTagsAlreadySet(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.Tags = map[string]string{
|
||||
"custom": "tag",
|
||||
"dev": "foo",
|
||||
}
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "tag", b.Config.Resources.Jobs["job1"].Tags["custom"])
|
||||
assert.Equal(t, "foo", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||
}
|
||||
|
||||
func TestTagsNil(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.Tags = nil
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "lennart", b.Config.Resources.Jobs["job2"].Tags["dev"])
|
||||
}
|
||||
|
||||
func TestTagsEmptySet(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.Tags = map[string]string{}
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "lennart", b.Config.Resources.Jobs["job2"].Tags["dev"])
|
||||
}
|
||||
|
||||
func TestJobsMaxConcurrentRunsAlreadySet(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.JobsMaxConcurrentRuns = 10
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, 10, b.Config.Resources.Jobs["job1"].MaxConcurrentRuns)
|
||||
}
|
||||
|
||||
func TestJobsMaxConcurrentRunsDisabled(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.JobsMaxConcurrentRuns = 1
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].MaxConcurrentRuns)
|
||||
}
|
||||
|
||||
func TestTriggerPauseStatusWhenUnpaused(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.TriggerPauseStatus = config.Unpaused
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.ErrorContains(t, diags.Error(), "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default")
|
||||
}
|
||||
|
||||
func TestPipelinesDevelopmentDisabled(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
notEnabled := false
|
||||
b.Config.Presets.PipelinesDevelopment = ¬Enabled
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
}
|
||||
|
|
|
@ -54,13 +54,23 @@ func parsePythonDiagnostics(input io.Reader) (diag.Diagnostics, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse path: %s", err)
|
||||
}
|
||||
var paths []dyn.Path
|
||||
if path != nil {
|
||||
paths = []dyn.Path{path}
|
||||
}
|
||||
|
||||
var locations []dyn.Location
|
||||
location := convertPythonLocation(parsedLine.Location)
|
||||
if location != (dyn.Location{}) {
|
||||
locations = append(locations, location)
|
||||
}
|
||||
|
||||
diag := diag.Diagnostic{
|
||||
Severity: severity,
|
||||
Summary: parsedLine.Summary,
|
||||
Detail: parsedLine.Detail,
|
||||
Location: convertPythonLocation(parsedLine.Location),
|
||||
Path: path,
|
||||
Severity: severity,
|
||||
Summary: parsedLine.Summary,
|
||||
Detail: parsedLine.Detail,
|
||||
Locations: locations,
|
||||
Paths: paths,
|
||||
}
|
||||
|
||||
diags = diags.Append(diag)
|
||||
|
|
|
@ -39,10 +39,12 @@ func TestParsePythonDiagnostics(t *testing.T) {
|
|||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "error summary",
|
||||
Location: dyn.Location{
|
||||
File: "src/examples/file.py",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
Locations: []dyn.Location{
|
||||
{
|
||||
File: "src/examples/file.py",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -54,7 +56,7 @@ func TestParsePythonDiagnostics(t *testing.T) {
|
|||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "error summary",
|
||||
Path: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.job0.name")},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/databricks/cli/libs/python"
|
||||
"github.com/databricks/databricks-sdk-go/logger"
|
||||
|
||||
"github.com/databricks/cli/bundle/env"
|
||||
|
@ -86,23 +86,15 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
return nil
|
||||
}
|
||||
|
||||
if experimental.PyDABs.VEnvPath == "" {
|
||||
return diag.Errorf("\"experimental.pydabs.enabled\" can only be used when \"experimental.pydabs.venv_path\" is set")
|
||||
}
|
||||
|
||||
// mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics'
|
||||
var mutateDiags diag.Diagnostics
|
||||
var mutateDiagsHasError = errors.New("unexpected error")
|
||||
|
||||
err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) {
|
||||
pythonPath := interpreterPath(experimental.PyDABs.VEnvPath)
|
||||
pythonPath, err := detectExecutable(ctx, experimental.PyDABs.VEnvPath)
|
||||
|
||||
if _, err := os.Stat(pythonPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return dyn.InvalidValue, fmt.Errorf("can't find %q, check if venv is created", pythonPath)
|
||||
} else {
|
||||
return dyn.InvalidValue, fmt.Errorf("can't find %q: %w", pythonPath, err)
|
||||
}
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("failed to get Python interpreter path: %w", err)
|
||||
}
|
||||
|
||||
cacheDir, err := createCacheDir(ctx)
|
||||
|
@ -423,11 +415,16 @@ func isOmitemptyDelete(left dyn.Value) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// interpreterPath returns platform-specific path to Python interpreter in the virtual environment.
|
||||
func interpreterPath(venvPath string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(venvPath, "Scripts", "python3.exe")
|
||||
} else {
|
||||
return filepath.Join(venvPath, "bin", "python3")
|
||||
// detectExecutable lookups Python interpreter in virtual environment, or if not set, in PATH.
|
||||
func detectExecutable(ctx context.Context, venvPath string) (string, error) {
|
||||
if venvPath == "" {
|
||||
interpreter, err := python.DetectExecutable(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return interpreter, nil
|
||||
}
|
||||
|
||||
return python.DetectVEnvExecutable(venvPath)
|
||||
}
|
||||
|
|
|
@ -97,11 +97,14 @@ func TestPythonMutator_load(t *testing.T) {
|
|||
|
||||
assert.Equal(t, 1, len(diags))
|
||||
assert.Equal(t, "job doesn't have any tasks", diags[0].Summary)
|
||||
assert.Equal(t, dyn.Location{
|
||||
File: "src/examples/file.py",
|
||||
Line: 10,
|
||||
Column: 5,
|
||||
}, diags[0].Location)
|
||||
assert.Equal(t, []dyn.Location{
|
||||
{
|
||||
File: "src/examples/file.py",
|
||||
Line: 10,
|
||||
Column: 5,
|
||||
},
|
||||
}, diags[0].Locations)
|
||||
|
||||
}
|
||||
|
||||
func TestPythonMutator_load_disallowed(t *testing.T) {
|
||||
|
@ -279,7 +282,7 @@ func TestPythonMutator_venvRequired(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPythonMutator_venvNotFound(t *testing.T) {
|
||||
expectedError := fmt.Sprintf("can't find %q, check if venv is created", interpreterPath("bad_path"))
|
||||
expectedError := fmt.Sprintf("failed to get Python interpreter path: can't find %q, check if virtualenv is created", interpreterPath("bad_path"))
|
||||
|
||||
b := loadYaml("databricks.yml", `
|
||||
experimental:
|
||||
|
@ -305,8 +308,8 @@ type createOverrideVisitorTestCase struct {
|
|||
}
|
||||
|
||||
func TestCreateOverrideVisitor(t *testing.T) {
|
||||
left := dyn.NewValue(42, dyn.Location{})
|
||||
right := dyn.NewValue(1337, dyn.Location{})
|
||||
left := dyn.V(42)
|
||||
right := dyn.V(1337)
|
||||
|
||||
testCases := []createOverrideVisitorTestCase{
|
||||
{
|
||||
|
@ -470,21 +473,21 @@ func TestCreateOverrideVisitor_omitempty(t *testing.T) {
|
|||
// this is not happening, but adding for completeness
|
||||
name: "undo delete of empty variables",
|
||||
path: dyn.MustPathFromString("variables"),
|
||||
left: dyn.NewValue([]dyn.Value{}, location),
|
||||
left: dyn.NewValue([]dyn.Value{}, []dyn.Location{location}),
|
||||
expectedErr: merge.ErrOverrideUndoDelete,
|
||||
phases: allPhases,
|
||||
},
|
||||
{
|
||||
name: "undo delete of empty job clusters",
|
||||
path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"),
|
||||
left: dyn.NewValue([]dyn.Value{}, location),
|
||||
left: dyn.NewValue([]dyn.Value{}, []dyn.Location{location}),
|
||||
expectedErr: merge.ErrOverrideUndoDelete,
|
||||
phases: allPhases,
|
||||
},
|
||||
{
|
||||
name: "allow delete of non-empty job clusters",
|
||||
path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"),
|
||||
left: dyn.NewValue([]dyn.Value{dyn.NewValue("abc", location)}, location),
|
||||
left: dyn.NewValue([]dyn.Value{dyn.NewValue("abc", []dyn.Location{location})}, []dyn.Location{location}),
|
||||
expectedErr: nil,
|
||||
// deletions aren't allowed in 'load' phase
|
||||
phases: []phase{PythonMutatorPhaseInit},
|
||||
|
@ -492,17 +495,15 @@ func TestCreateOverrideVisitor_omitempty(t *testing.T) {
|
|||
{
|
||||
name: "undo delete of empty tags",
|
||||
path: dyn.MustPathFromString("resources.jobs.job0.tags"),
|
||||
left: dyn.NewValue(map[string]dyn.Value{}, location),
|
||||
left: dyn.NewValue(map[string]dyn.Value{}, []dyn.Location{location}),
|
||||
expectedErr: merge.ErrOverrideUndoDelete,
|
||||
phases: allPhases,
|
||||
},
|
||||
{
|
||||
name: "allow delete of non-empty tags",
|
||||
path: dyn.MustPathFromString("resources.jobs.job0.tags"),
|
||||
left: dyn.NewValue(
|
||||
map[string]dyn.Value{"dev": dyn.NewValue("true", location)},
|
||||
location,
|
||||
),
|
||||
left: dyn.NewValue(map[string]dyn.Value{"dev": dyn.NewValue("true", []dyn.Location{location})}, []dyn.Location{location}),
|
||||
|
||||
expectedErr: nil,
|
||||
// deletions aren't allowed in 'load' phase
|
||||
phases: []phase{PythonMutatorPhaseInit},
|
||||
|
@ -510,7 +511,7 @@ func TestCreateOverrideVisitor_omitempty(t *testing.T) {
|
|||
{
|
||||
name: "undo delete of nil",
|
||||
path: dyn.MustPathFromString("resources.jobs.job0.tags"),
|
||||
left: dyn.NilValue.WithLocation(location),
|
||||
left: dyn.NilValue.WithLocations([]dyn.Location{location}),
|
||||
expectedErr: merge.ErrOverrideUndoDelete,
|
||||
phases: allPhases,
|
||||
},
|
||||
|
@ -595,9 +596,7 @@ func loadYaml(name string, content string) *bundle.Bundle {
|
|||
}
|
||||
}
|
||||
|
||||
func withFakeVEnv(t *testing.T, path string) {
|
||||
interpreterPath := interpreterPath(path)
|
||||
|
||||
func withFakeVEnv(t *testing.T, venvPath string) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -607,6 +606,8 @@ func withFakeVEnv(t *testing.T, path string) {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
interpreterPath := interpreterPath(venvPath)
|
||||
|
||||
err = os.MkdirAll(filepath.Dir(interpreterPath), 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -617,9 +618,22 @@ func withFakeVEnv(t *testing.T, path string) {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := os.Chdir(cwd); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func interpreterPath(venvPath string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(venvPath, "Scripts", "python3.exe")
|
||||
} else {
|
||||
return filepath.Join(venvPath, "bin", "python3")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,13 +38,17 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc {
|
|||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
||||
return dyn.NewValue(filepath.Join(rel, v.MustString()), v.Location()), nil
|
||||
return dyn.NewValue(filepath.Join(rel, v.MustString()), v.Locations()), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
||||
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
|
|
|
@ -17,6 +17,10 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
RootPath: ".",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
".",
|
||||
"../common",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -29,6 +33,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths[0]", "./databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[1]", "./databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.include[0]", "./file.yml")
|
||||
bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
||||
|
@ -37,6 +43,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, filepath.Clean("."), b.Config.Sync.Paths[0])
|
||||
assert.Equal(t, filepath.Clean("../common"), b.Config.Sync.Paths[1])
|
||||
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
||||
|
@ -48,6 +56,10 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
RootPath: "/tmp/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
".",
|
||||
"../common",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -60,6 +72,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths[0]", "/tmp/dir/databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[1]", "/tmp/dir/databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml")
|
||||
bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
||||
|
@ -68,6 +82,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, filepath.Clean("."), b.Config.Sync.Paths[0])
|
||||
assert.Equal(t, filepath.Clean("../common"), b.Config.Sync.Paths[1])
|
||||
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
||||
|
|
|
@ -35,8 +35,8 @@ func reportRunAsNotSupported(resourceType string, location dyn.Location, current
|
|||
Summary: fmt.Sprintf("%s do not support a setting a run_as user that is different from the owner.\n"+
|
||||
"Current identity: %s. Run as identity: %s.\n"+
|
||||
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", resourceType, currentUser, runAsUser),
|
||||
Location: location,
|
||||
Severity: diag.Error,
|
||||
Locations: []dyn.Location{location},
|
||||
Severity: diag.Error,
|
||||
}}
|
||||
}
|
||||
|
||||
|
@ -44,9 +44,9 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
|
|||
diags := diag.Diagnostics{}
|
||||
|
||||
neitherSpecifiedErr := diag.Diagnostics{{
|
||||
Summary: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
|
||||
Location: b.Config.GetLocation("run_as"),
|
||||
Severity: diag.Error,
|
||||
Summary: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("run_as")},
|
||||
Severity: diag.Error,
|
||||
}}
|
||||
|
||||
// Fail fast if neither service_principal_name nor user_name are specified, but the
|
||||
|
@ -64,9 +64,9 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
|
|||
|
||||
if runAs.UserName != "" && runAs.ServicePrincipalName != "" {
|
||||
diags = diags.Extend(diag.Diagnostics{{
|
||||
Summary: "run_as section cannot specify both user_name and service_principal_name",
|
||||
Location: b.Config.GetLocation("run_as"),
|
||||
Severity: diag.Error,
|
||||
Summary: "run_as section cannot specify both user_name and service_principal_name",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("run_as")},
|
||||
Severity: diag.Error,
|
||||
}})
|
||||
}
|
||||
|
||||
|
@ -172,10 +172,10 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
setRunAsForJobs(b)
|
||||
return diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Warning,
|
||||
Summary: "You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC.",
|
||||
Path: dyn.MustPathFromString("experimental.use_legacy_run_as"),
|
||||
Location: b.Config.GetLocation("experimental.use_legacy_run_as"),
|
||||
Severity: diag.Warning,
|
||||
Summary: "You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC.",
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("experimental.use_legacy_run_as")},
|
||||
Locations: b.Config.GetLocations("experimental.use_legacy_run_as"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ func allResourceTypes(t *testing.T) []string {
|
|||
"pipelines",
|
||||
"quality_monitors",
|
||||
"registered_models",
|
||||
"schemas",
|
||||
},
|
||||
resourceTypes,
|
||||
)
|
||||
|
@ -136,6 +137,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
|||
"models",
|
||||
"registered_models",
|
||||
"experiments",
|
||||
"schemas",
|
||||
}
|
||||
|
||||
base := config.Root{
|
||||
|
|
|
@ -2,10 +2,12 @@ package mutator
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
)
|
||||
|
||||
|
@ -21,52 +23,63 @@ func (m *setVariables) Name() string {
|
|||
return "SetVariables"
|
||||
}
|
||||
|
||||
func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Diagnostics {
|
||||
func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable, name string) (dyn.Value, error) {
|
||||
// case: variable already has value initialized, so skip
|
||||
if v.HasValue() {
|
||||
return nil
|
||||
if variable.HasValue() {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// case: read and set variable value from process environment
|
||||
envVarName := bundleVarPrefix + name
|
||||
if val, ok := env.Lookup(ctx, envVarName); ok {
|
||||
if v.IsComplex() {
|
||||
return diag.Errorf(`setting via environment variables (%s) is not supported for complex variable %s`, envVarName, name)
|
||||
if variable.IsComplex() {
|
||||
return dyn.InvalidValue, fmt.Errorf(`setting via environment variables (%s) is not supported for complex variable %s`, envVarName, name)
|
||||
}
|
||||
|
||||
err := v.Set(val)
|
||||
v, err := dyn.Set(v, "value", dyn.V(val))
|
||||
if err != nil {
|
||||
return diag.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %v`, val, name, envVarName, err)
|
||||
return dyn.InvalidValue, fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %v`, val, name, envVarName, err)
|
||||
}
|
||||
return nil
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// case: Defined a variable for named lookup for a resource
|
||||
// It will be resolved later in ResolveResourceReferences mutator
|
||||
if v.Lookup != nil {
|
||||
return nil
|
||||
if variable.Lookup != nil {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// case: Set the variable to its default value
|
||||
if v.HasDefault() {
|
||||
err := v.Set(v.Default)
|
||||
if variable.HasDefault() {
|
||||
vDefault, err := dyn.Get(v, "default")
|
||||
if err != nil {
|
||||
return diag.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, v.Default, name, err)
|
||||
return dyn.InvalidValue, fmt.Errorf(`failed to get default value from config "%s" for variable %s with error: %v`, variable.Default, name, err)
|
||||
}
|
||||
return nil
|
||||
|
||||
v, err := dyn.Set(v, "value", vDefault)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, variable.Default, name, err)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// We should have had a value to set for the variable at this point.
|
||||
return diag.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
|
||||
return dyn.InvalidValue, fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
|
||||
|
||||
}
|
||||
|
||||
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
for name, variable := range b.Config.Variables {
|
||||
diags = diags.Extend(setVariable(ctx, variable, name))
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
}
|
||||
return diags
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Map(v, "variables", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) {
|
||||
name := p[1].Key()
|
||||
v, ok := b.Config.Variables[name]
|
||||
if !ok {
|
||||
return dyn.InvalidValue, fmt.Errorf(`variable "%s" is not defined`, name)
|
||||
}
|
||||
|
||||
return setVariable(ctx, variable, v, name)
|
||||
}))
|
||||
})
|
||||
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -7,6 +7,8 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -20,9 +22,14 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) {
|
|||
|
||||
// set value for variable as an environment variable
|
||||
t.Setenv("BUNDLE_VAR_foo", "process-env")
|
||||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
diags := setVariable(context.Background(), &variable, "foo")
|
||||
require.NoError(t, diags.Error())
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, variable.Value, "process-env")
|
||||
}
|
||||
|
||||
|
@ -33,8 +40,14 @@ func TestSetVariableUsingDefaultValue(t *testing.T) {
|
|||
Default: defaultVal,
|
||||
}
|
||||
|
||||
diags := setVariable(context.Background(), &variable, "foo")
|
||||
require.NoError(t, diags.Error())
|
||||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, variable.Value, "default")
|
||||
}
|
||||
|
||||
|
@ -49,8 +62,14 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) {
|
|||
|
||||
// since a value is already assigned to the variable, it would not be overridden
|
||||
// by the default value
|
||||
diags := setVariable(context.Background(), &variable, "foo")
|
||||
require.NoError(t, diags.Error())
|
||||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, variable.Value, "assigned-value")
|
||||
}
|
||||
|
||||
|
@ -68,8 +87,14 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) {
|
|||
|
||||
// since a value is already assigned to the variable, it would not be overridden
|
||||
// by the value from environment
|
||||
diags := setVariable(context.Background(), &variable, "foo")
|
||||
require.NoError(t, diags.Error())
|
||||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = convert.ToTyped(&variable, v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, variable.Value, "assigned-value")
|
||||
}
|
||||
|
||||
|
@ -79,8 +104,11 @@ func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) {
|
|||
}
|
||||
|
||||
// fails because we could not resolve a value for the variable
|
||||
diags := setVariable(context.Background(), &variable, "foo")
|
||||
assert.ErrorContains(t, diags.Error(), "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable")
|
||||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable")
|
||||
}
|
||||
|
||||
func TestSetVariablesMutator(t *testing.T) {
|
||||
|
@ -126,6 +154,9 @@ func TestSetComplexVariablesViaEnvVariablesIsNotAllowed(t *testing.T) {
|
|||
// set value for variable as an environment variable
|
||||
t.Setenv("BUNDLE_VAR_foo", "process-env")
|
||||
|
||||
diags := setVariable(context.Background(), &variable, "foo")
|
||||
assert.ErrorContains(t, diags.Error(), "setting via environment variables (BUNDLE_VAR_foo) is not supported for complex variable foo")
|
||||
v, err := convert.FromTyped(variable, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = setVariable(context.Background(), v, &variable, "foo")
|
||||
assert.ErrorContains(t, err, "setting via environment variables (BUNDLE_VAR_foo) is not supported for complex variable foo")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type syncDefaultPath struct{}
|
||||
|
||||
// SyncDefaultPath configures the default sync path to be equal to the bundle root.
|
||||
func SyncDefaultPath() bundle.Mutator {
|
||||
return &syncDefaultPath{}
|
||||
}
|
||||
|
||||
func (m *syncDefaultPath) Name() string {
|
||||
return "SyncDefaultPath"
|
||||
}
|
||||
|
||||
func (m *syncDefaultPath) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
isset := false
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
pv, _ := dyn.Get(v, "sync.paths")
|
||||
|
||||
// If the sync paths field is already set, do nothing.
|
||||
// We know it is set if its value is either a nil or a sequence (empty or not).
|
||||
switch pv.Kind() {
|
||||
case dyn.KindNil, dyn.KindSequence:
|
||||
isset = true
|
||||
}
|
||||
|
||||
return v, nil
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// If the sync paths field is already set, do nothing.
|
||||
if isset {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the sync paths to the default value.
|
||||
b.Config.Sync.Paths = []string{"."}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSyncDefaultPath_DefaultIfUnset(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncDefaultPath())
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||
}
|
||||
|
||||
func TestSyncDefaultPath_SkipIfSet(t *testing.T) {
|
||||
tcases := []struct {
|
||||
name string
|
||||
paths dyn.Value
|
||||
expect []string
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
paths: dyn.V(nil),
|
||||
expect: nil,
|
||||
},
|
||||
{
|
||||
name: "empty sequence",
|
||||
paths: dyn.V([]dyn.Value{}),
|
||||
expect: []string{},
|
||||
},
|
||||
{
|
||||
name: "non-empty sequence",
|
||||
paths: dyn.V([]dyn.Value{dyn.V("something")}),
|
||||
expect: []string{"something"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tcase := range tcases {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
}
|
||||
|
||||
diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
v, err := dyn.Set(v, "sync", dyn.V(dyn.NewMapping()))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = dyn.Set(v, "sync.paths", tcase.paths)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
return diag.FromErr(err)
|
||||
})
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
ctx := context.Background()
|
||||
diags = bundle.Apply(ctx, b, mutator.SyncDefaultPath())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// If the sync paths field is already set, do nothing.
|
||||
assert.Equal(t, tcase.expect, b.Config.Sync.Paths)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
)
|
||||
|
||||
type syncInferRoot struct{}
|
||||
|
||||
// SyncInferRoot is a mutator that infers the root path of all files to synchronize by looking at the
|
||||
// paths in the sync configuration. The sync root may be different from the bundle root
|
||||
// when the user intends to synchronize files outside the bundle root.
|
||||
//
|
||||
// The sync root can be equivalent to or an ancestor of the bundle root, but not a descendant.
|
||||
// That is, the sync root must contain the bundle root.
|
||||
//
|
||||
// This mutator requires all sync-related paths and patterns to be relative to the bundle root path.
|
||||
// This is done by the [RewriteSyncPaths] mutator, which must run before this mutator.
|
||||
func SyncInferRoot() bundle.Mutator {
|
||||
return &syncInferRoot{}
|
||||
}
|
||||
|
||||
func (m *syncInferRoot) Name() string {
|
||||
return "SyncInferRoot"
|
||||
}
|
||||
|
||||
// computeRoot finds the innermost path that contains the specified path.
|
||||
// It traverses up the root path until it finds the innermost path.
|
||||
// If the path does not exist, it returns an empty string.
|
||||
//
|
||||
// See "sync_infer_root_internal_test.go" for examples.
|
||||
func (m *syncInferRoot) computeRoot(path string, root string) string {
|
||||
for !filepath.IsLocal(path) {
|
||||
// Break if we have reached the root of the filesystem.
|
||||
dir := filepath.Dir(root)
|
||||
if dir == root {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Update the sync path as we navigate up the directory tree.
|
||||
path = filepath.Join(filepath.Base(root), path)
|
||||
|
||||
// Move up the directory tree.
|
||||
root = dir
|
||||
}
|
||||
|
||||
return filepath.Clean(root)
|
||||
}
|
||||
|
||||
func (m *syncInferRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
// Use the bundle root path as the starting point for inferring the sync root path.
|
||||
bundleRootPath := filepath.Clean(b.RootPath)
|
||||
|
||||
// Infer the sync root path by looking at each one of the sync paths.
|
||||
// Every sync path must be a descendant of the final sync root path.
|
||||
syncRootPath := bundleRootPath
|
||||
for _, path := range b.Config.Sync.Paths {
|
||||
computedPath := m.computeRoot(path, bundleRootPath)
|
||||
if computedPath == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update sync root path if the computed root path is an ancestor of the current sync root path.
|
||||
if len(computedPath) < len(syncRootPath) {
|
||||
syncRootPath = computedPath
|
||||
}
|
||||
}
|
||||
|
||||
// The new sync root path can only be an ancestor of the previous root path.
|
||||
// Compute the relative path from the sync root to the bundle root.
|
||||
rel, err := filepath.Rel(syncRootPath, bundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// If during computation of the sync root path we hit the root of the filesystem,
|
||||
// then one or more of the sync paths are outside the filesystem.
|
||||
// Check if this happened by verifying that none of the paths escape the root
|
||||
// when joined with the sync root path.
|
||||
for i, path := range b.Config.Sync.Paths {
|
||||
if filepath.IsLocal(filepath.Join(rel, path)) {
|
||||
continue
|
||||
}
|
||||
|
||||
diags = append(diags, diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("invalid sync path %q", path),
|
||||
Locations: b.Config.GetLocations(fmt.Sprintf("sync.paths[%d]", i)),
|
||||
Paths: []dyn.Path{dyn.NewPath(dyn.Key("sync"), dyn.Key("paths"), dyn.Index(i))},
|
||||
})
|
||||
}
|
||||
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
// Update all paths in the sync configuration to be relative to the sync root.
|
||||
for i, p := range b.Config.Sync.Paths {
|
||||
b.Config.Sync.Paths[i] = filepath.Join(rel, p)
|
||||
}
|
||||
for i, p := range b.Config.Sync.Include {
|
||||
b.Config.Sync.Include[i] = filepath.Join(rel, p)
|
||||
}
|
||||
for i, p := range b.Config.Sync.Exclude {
|
||||
b.Config.Sync.Exclude[i] = filepath.Join(rel, p)
|
||||
}
|
||||
|
||||
// Configure the sync root path.
|
||||
b.SyncRoot = vfs.MustNew(syncRootPath)
|
||||
b.SyncRootPath = syncRootPath
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSyncInferRootInternal_ComputeRoot(t *testing.T) {
|
||||
s := syncInferRoot{}
|
||||
|
||||
tcases := []struct {
|
||||
path string
|
||||
root string
|
||||
out string
|
||||
}{
|
||||
{
|
||||
// Test that "." doesn't change the root.
|
||||
path: ".",
|
||||
root: "/tmp/some/dir",
|
||||
out: "/tmp/some/dir",
|
||||
},
|
||||
{
|
||||
// Test that a subdirectory doesn't change the root.
|
||||
path: "sub",
|
||||
root: "/tmp/some/dir",
|
||||
out: "/tmp/some/dir",
|
||||
},
|
||||
{
|
||||
// Test that a parent directory changes the root.
|
||||
path: "../common",
|
||||
root: "/tmp/some/dir",
|
||||
out: "/tmp/some",
|
||||
},
|
||||
{
|
||||
// Test that a deeply nested parent directory changes the root.
|
||||
path: "../../../../../../common",
|
||||
root: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||
out: "/tmp/some",
|
||||
},
|
||||
{
|
||||
// Test that a parent directory changes the root at the filesystem root boundary.
|
||||
path: "../common",
|
||||
root: "/tmp",
|
||||
out: "/",
|
||||
},
|
||||
{
|
||||
// Test that an invalid parent directory doesn't change the root and returns an empty string.
|
||||
path: "../common",
|
||||
root: "/",
|
||||
out: "",
|
||||
},
|
||||
{
|
||||
// Test that the returned path is cleaned even if the root doesn't change.
|
||||
path: "sub",
|
||||
root: "/tmp/some/../dir",
|
||||
out: "/tmp/dir",
|
||||
},
|
||||
{
|
||||
// Test that a relative root path also works.
|
||||
path: "../common",
|
||||
root: "foo/bar",
|
||||
out: "foo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcases {
|
||||
out := s.computeRoot(tc.path, tc.root)
|
||||
assert.Equal(t, tc.out, filepath.ToSlash(out))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,198 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
".",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
},
|
||||
Exclude: []string{
|
||||
"baz",
|
||||
"qux",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("/tmp/some/dir"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are unchanged.
|
||||
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||
assert.Equal(t, []string{"foo", "bar"}, b.Config.Sync.Include)
|
||||
assert.Equal(t, []string{"baz", "qux"}, b.Config.Sync.Exclude)
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "./some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
".",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
},
|
||||
Exclude: []string{
|
||||
"baz",
|
||||
"qux",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("some/dir"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are unchanged.
|
||||
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||
assert.Equal(t, []string{"foo", "bar"}, b.Config.Sync.Include)
|
||||
assert.Equal(t, []string{"baz", "qux"}, b.Config.Sync.Exclude)
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
"../common",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
},
|
||||
Exclude: []string{
|
||||
"baz",
|
||||
"qux",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are updated.
|
||||
assert.Equal(t, []string{"common"}, b.Config.Sync.Paths)
|
||||
assert.Equal(t, []string{filepath.FromSlash("dir/foo"), filepath.FromSlash("dir/bar")}, b.Config.Sync.Include)
|
||||
assert.Equal(t, []string{filepath.FromSlash("dir/baz"), filepath.FromSlash("dir/qux")}, b.Config.Sync.Exclude)
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
"../../../../../../common",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
},
|
||||
Exclude: []string{
|
||||
"baz",
|
||||
"qux",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are updated.
|
||||
assert.Equal(t, []string{"common"}, b.Config.Sync.Paths)
|
||||
assert.Equal(t, []string{
|
||||
filepath.FromSlash("dir/that/is/very/deeply/nested/foo"),
|
||||
filepath.FromSlash("dir/that/is/very/deeply/nested/bar"),
|
||||
}, b.Config.Sync.Include)
|
||||
assert.Equal(t, []string{
|
||||
filepath.FromSlash("dir/that/is/very/deeply/nested/baz"),
|
||||
filepath.FromSlash("dir/that/is/very/deeply/nested/qux"),
|
||||
}, b.Config.Sync.Exclude)
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/bundle/root",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
"./foo",
|
||||
"../common",
|
||||
"./bar",
|
||||
"../../baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are updated.
|
||||
assert.Equal(t, filepath.FromSlash("bundle/root/foo"), b.Config.Sync.Paths[0])
|
||||
assert.Equal(t, filepath.FromSlash("bundle/common"), b.Config.Sync.Paths[1])
|
||||
assert.Equal(t, filepath.FromSlash("bundle/root/bar"), b.Config.Sync.Paths[2])
|
||||
assert.Equal(t, filepath.FromSlash("baz"), b.Config.Sync.Paths[3])
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_Error(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
"../../../../error",
|
||||
"../../../thisworks",
|
||||
"../../../../../error",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths", "databricks.yml")
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
require.Len(t, diags, 2)
|
||||
assert.Equal(t, `invalid sync path "../../../../error"`, diags[0].Summary)
|
||||
assert.Equal(t, "databricks.yml:0:0", diags[0].Locations[0].String())
|
||||
assert.Equal(t, "sync.paths[0]", diags[0].Paths[0].String())
|
||||
assert.Equal(t, `invalid sync path "../../../../../error"`, diags[1].Summary)
|
||||
assert.Equal(t, "databricks.yml:0:0", diags[1].Locations[0].String())
|
||||
assert.Equal(t, "sync.paths[2]", diags[1].Paths[0].String())
|
||||
}
|
|
@ -82,7 +82,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund
|
|||
return err
|
||||
}
|
||||
|
||||
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
|
||||
internalDirRel, err := filepath.Rel(b.SyncRootPath, internalDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -57,17 +56,18 @@ func TestGenerateTrampoline(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/Workspace/files",
|
||||
},
|
||||
Bundle: config.Bundle{
|
||||
Target: "development",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"test": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: tmpDir,
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: tasks,
|
||||
},
|
||||
|
@ -93,6 +93,6 @@ func TestGenerateTrampoline(t *testing.T) {
|
|||
require.Equal(t, "Hello from Trampoline", string(bytes))
|
||||
|
||||
task := b.Config.Resources.Jobs["test"].Tasks[0]
|
||||
require.Equal(t, task.NotebookTask.NotebookPath, ".databricks/bundle/development/.internal/notebook_test_to_trampoline")
|
||||
require.Equal(t, "/Workspace/files/my_bundle/.databricks/bundle/development/.internal/notebook_test_to_trampoline", task.NotebookTask.NotebookPath)
|
||||
require.Nil(t, task.PythonWheelTask)
|
||||
}
|
||||
|
|
|
@ -93,14 +93,14 @@ func (t *translateContext) rewritePath(
|
|||
return nil
|
||||
}
|
||||
|
||||
// Local path must be contained in the bundle root.
|
||||
// Local path must be contained in the sync root.
|
||||
// If it isn't, it won't be synchronized into the workspace.
|
||||
localRelPath, err := filepath.Rel(t.b.RootPath, localPath)
|
||||
localRelPath, err := filepath.Rel(t.b.SyncRootPath, localPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasPrefix(localRelPath, "..") {
|
||||
return fmt.Errorf("path %s is not contained in bundle root path", localPath)
|
||||
return fmt.Errorf("path %s is not contained in sync root path", localPath)
|
||||
}
|
||||
|
||||
// Prefix remote path with its remote root path.
|
||||
|
@ -118,7 +118,7 @@ func (t *translateContext) rewritePath(
|
|||
}
|
||||
|
||||
func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath))
|
||||
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", fmt.Errorf("notebook %s not found", literal)
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func (t *translateContext) translateNotebookPath(literal, localFullPath, localRe
|
|||
}
|
||||
|
||||
func (t *translateContext) translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath))
|
||||
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", fmt.Errorf("file %s not found", literal)
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ func (t *translateContext) translateFilePath(literal, localFullPath, localRelPat
|
|||
}
|
||||
|
||||
func (t *translateContext) translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
info, err := t.b.BundleRoot.Stat(filepath.ToSlash(localRelPath))
|
||||
info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ func (t *translateContext) rewriteValue(p dyn.Path, v dyn.Value, fn rewriteFunc,
|
|||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
||||
return dyn.NewValue(out, v.Location()), nil
|
||||
return dyn.NewValue(out, v.Locations()), nil
|
||||
}
|
||||
|
||||
func (t *translateContext) rewriteRelativeTo(p dyn.Path, v dyn.Value, fn rewriteFunc, dir, fallback string) (dyn.Value, error) {
|
||||
|
|
|
@ -50,6 +50,11 @@ func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern
|
|||
t.translateNoOp,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
|
||||
t.translateFilePath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,7 +83,7 @@ func (t *translateContext) jobRewritePatterns() []jobRewritePattern {
|
|||
),
|
||||
t.translateNoOpWithPrefix,
|
||||
func(s string) bool {
|
||||
return !libraries.IsEnvironmentDependencyLocal(s)
|
||||
return !libraries.IsLibraryLocal(s)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -11,7 +11,10 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
|
@ -38,8 +41,8 @@ func touchEmptyFile(t *testing.T, path string) {
|
|||
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -107,10 +110,11 @@ func TestTranslatePaths(t *testing.T) {
|
|||
touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "my_python_file.py"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "requirements.txt"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -137,6 +141,9 @@ func TestTranslatePaths(t *testing.T) {
|
|||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "./my_job_notebook.py",
|
||||
},
|
||||
Libraries: []compute.Library{
|
||||
{Requirements: "./requirements.txt"},
|
||||
},
|
||||
},
|
||||
{
|
||||
PythonWheelTask: &jobs.PythonWheelTask{
|
||||
|
@ -229,6 +236,11 @@ func TestTranslatePaths(t *testing.T) {
|
|||
"/bundle/my_job_notebook",
|
||||
b.Config.Resources.Jobs["job"].Tasks[2].NotebookTask.NotebookPath,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"/bundle/requirements.txt",
|
||||
b.Config.Resources.Jobs["job"].Tasks[2].Libraries[0].Requirements,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"/bundle/my_python_file.py",
|
||||
|
@ -277,8 +289,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -368,12 +380,12 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
)
|
||||
}
|
||||
|
||||
func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
||||
func TestTranslatePathsOutsideSyncRoot(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -399,15 +411,15 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
|||
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), "is not contained in bundle root")
|
||||
assert.ErrorContains(t, diags.Error(), "is not contained in sync root path")
|
||||
}
|
||||
|
||||
func TestJobNotebookDoesNotExistError(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
|
@ -437,8 +449,8 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
|||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
|
@ -468,8 +480,8 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
|||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
|
@ -499,8 +511,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
|||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
|
@ -531,8 +543,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
|||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -566,8 +578,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -601,8 +613,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -636,8 +648,8 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
|||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -672,8 +684,8 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "env2.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
|
@ -708,3 +720,64 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
|
|||
assert.Equal(t, "simplejson", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[2])
|
||||
assert.Equal(t, "/Workspace/Users/foo@bar.com/test.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[3])
|
||||
}
|
||||
|
||||
func TestTranslatePathWithComplexVariables(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Variables: map[string]*variable.Variable{
|
||||
"cluster_libraries": {
|
||||
Type: variable.VariableTypeComplex,
|
||||
Default: [](map[string]string){
|
||||
{
|
||||
"whl": "./local/whl.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
TaskKey: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "variables", filepath.Join(dir, "variables/variables.yml"))
|
||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
||||
|
||||
ctx := context.Background()
|
||||
// Assign the variables to the dynamic configuration.
|
||||
diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
p := dyn.MustPathFromString("resources.jobs.job.tasks[0]")
|
||||
return dyn.SetByPath(v, p.Append(dyn.Key("libraries")), dyn.V("${var.cluster_libraries}"))
|
||||
})
|
||||
return diag.FromErr(err)
|
||||
})
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
diags = bundle.Apply(ctx, b,
|
||||
bundle.Seq(
|
||||
mutator.SetVariables(),
|
||||
mutator.ResolveVariableReferences("variables"),
|
||||
mutator.TranslatePaths(),
|
||||
))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
filepath.Join("variables", "local", "whl.whl"),
|
||||
b.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
package paths
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type Paths struct {
|
||||
// Absolute path on the local file system to the configuration file that holds
|
||||
// the definition of this resource.
|
||||
ConfigFilePath string `json:"-" bundle:"readonly"`
|
||||
|
||||
// DynamicValue stores the [dyn.Value] of the containing struct.
|
||||
// This assumes that this struct is always embedded.
|
||||
DynamicValue dyn.Value `json:"-"`
|
||||
}
|
||||
|
||||
func (p *Paths) ConfigureConfigFilePath() {
|
||||
if !p.DynamicValue.IsValid() {
|
||||
panic("DynamicValue not set")
|
||||
}
|
||||
p.ConfigFilePath = p.DynamicValue.Location().File
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package config
|
||||
|
||||
const Paused = "PAUSED"
|
||||
const Unpaused = "UNPAUSED"
|
||||
|
||||
type Presets struct {
|
||||
// NamePrefix to prepend to all resource names.
|
||||
NamePrefix string `json:"name_prefix,omitempty"`
|
||||
|
||||
// PipelinesDevelopment is the default value for the development field of pipelines.
|
||||
PipelinesDevelopment *bool `json:"pipelines_development,omitempty"`
|
||||
|
||||
// TriggerPauseStatus is the default value for the pause status of all triggers and schedules.
|
||||
// Either config.Paused, config.Unpaused, or empty.
|
||||
TriggerPauseStatus string `json:"trigger_pause_status,omitempty"`
|
||||
|
||||
// JobsMaxConcurrentRuns is the default value for the max concurrent runs of jobs.
|
||||
JobsMaxConcurrentRuns int `json:"jobs_max_concurrent_runs,omitempty"`
|
||||
|
||||
// Tags to add to all resources.
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
// IsExplicitlyEnabled tests whether this feature is explicitly enabled.
|
||||
func IsExplicitlyEnabled(feature *bool) bool {
|
||||
return feature != nil && *feature
|
||||
}
|
||||
|
||||
// IsExplicitlyDisabled tests whether this feature is explicitly disabled.
|
||||
func IsExplicitlyDisabled(feature *bool) bool {
|
||||
return feature != nil && !*feature
|
||||
}
|
|
@ -2,7 +2,6 @@ package config
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
|
@ -19,206 +18,17 @@ type Resources struct {
|
|||
ModelServingEndpoints map[string]*resources.ModelServingEndpoint `json:"model_serving_endpoints,omitempty"`
|
||||
RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"`
|
||||
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
|
||||
}
|
||||
|
||||
type UniqueResourceIdTracker struct {
|
||||
Type map[string]string
|
||||
ConfigPath map[string]string
|
||||
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
|
||||
}
|
||||
|
||||
type ConfigResource interface {
|
||||
// Function to assert if the resource exists in the workspace configured in
|
||||
// the input workspace client.
|
||||
Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error)
|
||||
|
||||
// Terraform equivalent name of the resource. For example "databricks_job"
|
||||
// for jobs and "databricks_pipeline" for pipelines.
|
||||
TerraformResourceName() string
|
||||
Validate() error
|
||||
|
||||
json.Marshaler
|
||||
json.Unmarshaler
|
||||
}
|
||||
|
||||
// verifies merging is safe by checking no duplicate identifiers exist
|
||||
func (r *Resources) VerifySafeMerge(other *Resources) error {
|
||||
rootTracker, err := r.VerifyUniqueResourceIdentifiers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
otherTracker, err := other.VerifyUniqueResourceIdentifiers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k := range otherTracker.Type {
|
||||
if _, ok := rootTracker.Type[k]; ok {
|
||||
return fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||
k,
|
||||
rootTracker.Type[k],
|
||||
rootTracker.ConfigPath[k],
|
||||
otherTracker.Type[k],
|
||||
otherTracker.ConfigPath[k],
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function verifies there are no duplicate names used for the resource definations
|
||||
func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker, error) {
|
||||
tracker := &UniqueResourceIdTracker{
|
||||
Type: make(map[string]string),
|
||||
ConfigPath: make(map[string]string),
|
||||
}
|
||||
for k := range r.Jobs {
|
||||
tracker.Type[k] = "job"
|
||||
tracker.ConfigPath[k] = r.Jobs[k].ConfigFilePath
|
||||
}
|
||||
for k := range r.Pipelines {
|
||||
if _, ok := tracker.Type[k]; ok {
|
||||
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||
k,
|
||||
tracker.Type[k],
|
||||
tracker.ConfigPath[k],
|
||||
"pipeline",
|
||||
r.Pipelines[k].ConfigFilePath,
|
||||
)
|
||||
}
|
||||
tracker.Type[k] = "pipeline"
|
||||
tracker.ConfigPath[k] = r.Pipelines[k].ConfigFilePath
|
||||
}
|
||||
for k := range r.Models {
|
||||
if _, ok := tracker.Type[k]; ok {
|
||||
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||
k,
|
||||
tracker.Type[k],
|
||||
tracker.ConfigPath[k],
|
||||
"mlflow_model",
|
||||
r.Models[k].ConfigFilePath,
|
||||
)
|
||||
}
|
||||
tracker.Type[k] = "mlflow_model"
|
||||
tracker.ConfigPath[k] = r.Models[k].ConfigFilePath
|
||||
}
|
||||
for k := range r.Experiments {
|
||||
if _, ok := tracker.Type[k]; ok {
|
||||
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||
k,
|
||||
tracker.Type[k],
|
||||
tracker.ConfigPath[k],
|
||||
"mlflow_experiment",
|
||||
r.Experiments[k].ConfigFilePath,
|
||||
)
|
||||
}
|
||||
tracker.Type[k] = "mlflow_experiment"
|
||||
tracker.ConfigPath[k] = r.Experiments[k].ConfigFilePath
|
||||
}
|
||||
for k := range r.ModelServingEndpoints {
|
||||
if _, ok := tracker.Type[k]; ok {
|
||||
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||
k,
|
||||
tracker.Type[k],
|
||||
tracker.ConfigPath[k],
|
||||
"model_serving_endpoint",
|
||||
r.ModelServingEndpoints[k].ConfigFilePath,
|
||||
)
|
||||
}
|
||||
tracker.Type[k] = "model_serving_endpoint"
|
||||
tracker.ConfigPath[k] = r.ModelServingEndpoints[k].ConfigFilePath
|
||||
}
|
||||
for k := range r.RegisteredModels {
|
||||
if _, ok := tracker.Type[k]; ok {
|
||||
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||
k,
|
||||
tracker.Type[k],
|
||||
tracker.ConfigPath[k],
|
||||
"registered_model",
|
||||
r.RegisteredModels[k].ConfigFilePath,
|
||||
)
|
||||
}
|
||||
tracker.Type[k] = "registered_model"
|
||||
tracker.ConfigPath[k] = r.RegisteredModels[k].ConfigFilePath
|
||||
}
|
||||
for k := range r.QualityMonitors {
|
||||
if _, ok := tracker.Type[k]; ok {
|
||||
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||
k,
|
||||
tracker.Type[k],
|
||||
tracker.ConfigPath[k],
|
||||
"quality_monitor",
|
||||
r.QualityMonitors[k].ConfigFilePath,
|
||||
)
|
||||
}
|
||||
tracker.Type[k] = "quality_monitor"
|
||||
tracker.ConfigPath[k] = r.QualityMonitors[k].ConfigFilePath
|
||||
}
|
||||
return tracker, nil
|
||||
}
|
||||
|
||||
type resource struct {
|
||||
resource ConfigResource
|
||||
resource_type string
|
||||
key string
|
||||
}
|
||||
|
||||
func (r *Resources) allResources() []resource {
|
||||
all := make([]resource, 0)
|
||||
for k, e := range r.Jobs {
|
||||
all = append(all, resource{resource_type: "job", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.Pipelines {
|
||||
all = append(all, resource{resource_type: "pipeline", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.Models {
|
||||
all = append(all, resource{resource_type: "model", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.Experiments {
|
||||
all = append(all, resource{resource_type: "experiment", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.ModelServingEndpoints {
|
||||
all = append(all, resource{resource_type: "serving endpoint", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.RegisteredModels {
|
||||
all = append(all, resource{resource_type: "registered model", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.QualityMonitors {
|
||||
all = append(all, resource{resource_type: "quality monitor", resource: e, key: k})
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
func (r *Resources) VerifyAllResourcesDefined() error {
|
||||
all := r.allResources()
|
||||
for _, e := range all {
|
||||
err := e.resource.Validate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s %s is not defined", e.resource_type, e.key)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigureConfigFilePath sets the specified path for all resources contained in this instance.
|
||||
// This property is used to correctly resolve paths relative to the path
|
||||
// of the configuration file they were defined in.
|
||||
func (r *Resources) ConfigureConfigFilePath() {
|
||||
for _, e := range r.Jobs {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.Pipelines {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.Models {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.Experiments {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.ModelServingEndpoints {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.RegisteredModels {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.QualityMonitors {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) {
|
||||
|
|
|
@ -2,10 +2,8 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -17,8 +15,6 @@ type Job struct {
|
|||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
|
||||
paths.Paths
|
||||
|
||||
*jobs.JobSettings
|
||||
}
|
||||
|
||||
|
@ -48,11 +44,3 @@ func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id stri
|
|||
func (j *Job) TerraformResourceName() string {
|
||||
return "databricks_job"
|
||||
}
|
||||
|
||||
func (j *Job) Validate() error {
|
||||
if j == nil || !j.DynamicValue.IsValid() || j.JobSettings == nil {
|
||||
return fmt.Errorf("job is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -16,8 +14,6 @@ type MlflowExperiment struct {
|
|||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
|
||||
paths.Paths
|
||||
|
||||
*ml.Experiment
|
||||
}
|
||||
|
||||
|
@ -43,11 +39,3 @@ func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceCl
|
|||
func (s *MlflowExperiment) TerraformResourceName() string {
|
||||
return "databricks_mlflow_experiment"
|
||||
}
|
||||
|
||||
func (s *MlflowExperiment) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("experiment is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -16,8 +14,6 @@ type MlflowModel struct {
|
|||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
|
||||
paths.Paths
|
||||
|
||||
*ml.Model
|
||||
}
|
||||
|
||||
|
@ -43,11 +39,3 @@ func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient,
|
|||
func (s *MlflowModel) TerraformResourceName() string {
|
||||
return "databricks_mlflow_model"
|
||||
}
|
||||
|
||||
func (s *MlflowModel) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("model is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -20,10 +18,6 @@ type ModelServingEndpoint struct {
|
|||
// as a reference in other resources. This value is returned by terraform.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
// Path to config file where the resource is defined. All bundle resources
|
||||
// include this for interpolation purposes.
|
||||
paths.Paths
|
||||
|
||||
// This is a resource agnostic implementation of permissions for ACLs.
|
||||
// Implementation could be different based on the resource type.
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
|
@ -53,11 +47,3 @@ func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.Workspa
|
|||
func (s *ModelServingEndpoint) TerraformResourceName() string {
|
||||
return "databricks_model_serving"
|
||||
}
|
||||
|
||||
func (s *ModelServingEndpoint) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("serving endpoint is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -16,8 +14,6 @@ type Pipeline struct {
|
|||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
|
||||
paths.Paths
|
||||
|
||||
*pipelines.PipelineSpec
|
||||
}
|
||||
|
||||
|
@ -43,11 +39,3 @@ func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id
|
|||
func (p *Pipeline) TerraformResourceName() string {
|
||||
return "databricks_pipeline"
|
||||
}
|
||||
|
||||
func (p *Pipeline) Validate() error {
|
||||
if p == nil || !p.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("pipeline is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -21,10 +19,6 @@ type QualityMonitor struct {
|
|||
// as a reference in other resources. This value is returned by terraform.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
// Path to config file where the resource is defined. All bundle resources
|
||||
// include this for interpolation purposes.
|
||||
paths.Paths
|
||||
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
}
|
||||
|
||||
|
@ -50,11 +44,3 @@ func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClie
|
|||
func (s *QualityMonitor) TerraformResourceName() string {
|
||||
return "databricks_quality_monitor"
|
||||
}
|
||||
|
||||
func (s *QualityMonitor) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("quality monitor is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -21,10 +19,6 @@ type RegisteredModel struct {
|
|||
// as a reference in other resources. This value is returned by terraform.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
// Path to config file where the resource is defined. All bundle resources
|
||||
// include this for interpolation purposes.
|
||||
paths.Paths
|
||||
|
||||
// This represents the input args for terraform, and will get converted
|
||||
// to a HCL representation for CRUD
|
||||
*catalog.CreateRegisteredModelRequest
|
||||
|
@ -54,11 +48,3 @@ func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceCli
|
|||
func (s *RegisteredModel) TerraformResourceName() string {
|
||||
return "databricks_registered_model"
|
||||
}
|
||||
|
||||
func (s *RegisteredModel) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("registered model is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
)
|
||||
|
||||
type Schema struct {
|
||||
// List of grants to apply on this schema.
|
||||
Grants []Grant `json:"grants,omitempty"`
|
||||
|
||||
// Full name of the schema (catalog_name.schema_name). This value is read from
|
||||
// the terraform state after deployment succeeds.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
*catalog.CreateSchema
|
||||
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
}
|
||||
|
||||
func (s *Schema) UnmarshalJSON(b []byte) error {
|
||||
return marshal.Unmarshal(b, s)
|
||||
}
|
||||
|
||||
func (s Schema) MarshalJSON() ([]byte, error) {
|
||||
return marshal.Marshal(s)
|
||||
}
|
|
@ -5,129 +5,9 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestVerifyUniqueResourceIdentifiers(t *testing.T) {
|
||||
r := Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"bar": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "bar.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"foo": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo2.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := r.VerifyUniqueResourceIdentifiers()
|
||||
assert.ErrorContains(t, err, "multiple resources named foo (job at foo.yml, mlflow_experiment at foo2.yml)")
|
||||
}
|
||||
|
||||
func TestVerifySafeMerge(t *testing.T) {
|
||||
r := Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"bar": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "bar.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
other := Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"foo": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo2.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := r.VerifySafeMerge(&other)
|
||||
assert.ErrorContains(t, err, "multiple resources named foo (job at foo.yml, pipeline at foo2.yml)")
|
||||
}
|
||||
|
||||
func TestVerifySafeMergeForSameResourceType(t *testing.T) {
|
||||
r := Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"bar": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "bar.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
other := Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo2.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := r.VerifySafeMerge(&other)
|
||||
assert.ErrorContains(t, err, "multiple resources named foo (job at foo.yml, job at foo2.yml)")
|
||||
}
|
||||
|
||||
func TestVerifySafeMergeForRegisteredModels(t *testing.T) {
|
||||
r := Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||
"bar": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "bar.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
other := Resources{
|
||||
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||
"bar": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "bar2.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := r.VerifySafeMerge(&other)
|
||||
assert.ErrorContains(t, err, "multiple resources named bar (registered_model at bar.yml, registered_model at bar2.yml)")
|
||||
}
|
||||
|
||||
// This test ensures that all resources have a custom marshaller and unmarshaller.
|
||||
// This is required because DABs resources map to Databricks APIs, and they do so
|
||||
// by embedding the corresponding Go SDK structs.
|
||||
|
|
|
@ -60,6 +60,10 @@ type Root struct {
|
|||
// RunAs section allows to define an execution identity for jobs and pipelines runs
|
||||
RunAs *jobs.JobRunAs `json:"run_as,omitempty"`
|
||||
|
||||
// Presets applies preset transformations throughout the bundle, e.g.
|
||||
// adding a name prefix to deployed resources.
|
||||
Presets Presets `json:"presets,omitempty"`
|
||||
|
||||
Experimental *Experimental `json:"experimental,omitempty"`
|
||||
|
||||
// Permissions section allows to define permissions which will be
|
||||
|
@ -100,11 +104,6 @@ func LoadFromBytes(path string, raw []byte) (*Root, diag.Diagnostics) {
|
|||
if err != nil {
|
||||
return nil, diag.Errorf("failed to load %s: %v", path, err)
|
||||
}
|
||||
|
||||
_, err = r.Resources.VerifyUniqueResourceIdentifiers()
|
||||
if err != nil {
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
}
|
||||
return &r, diags
|
||||
}
|
||||
|
||||
|
@ -141,17 +140,6 @@ func (r *Root) updateWithDynamicValue(nv dyn.Value) error {
|
|||
|
||||
// Assign the normalized configuration tree.
|
||||
r.value = nv
|
||||
|
||||
// At the moment the check has to be done as part of updateWithDynamicValue
|
||||
// because otherwise ConfigureConfigFilePath will fail with a panic.
|
||||
// In the future, we should move this check to a separate mutator in initialise phase.
|
||||
err = r.Resources.VerifyAllResourcesDefined()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Assign config file paths after converting to typed configuration.
|
||||
r.ConfigureConfigFilePath()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -243,15 +231,6 @@ func (r *Root) MarkMutatorExit(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetConfigFilePath configures the path that its configuration
|
||||
// was loaded from in configuration leafs that require it.
|
||||
func (r *Root) ConfigureConfigFilePath() {
|
||||
r.Resources.ConfigureConfigFilePath()
|
||||
if r.Artifacts != nil {
|
||||
r.Artifacts.ConfigureConfigFilePath()
|
||||
}
|
||||
}
|
||||
|
||||
// Initializes variables using values passed from the command line flag
|
||||
// Input has to be a string of the form `foo=bar`. In this case the variable with
|
||||
// name `foo` is assigned the value `bar`
|
||||
|
@ -281,12 +260,6 @@ func (r *Root) InitializeVariables(vars []string) error {
|
|||
}
|
||||
|
||||
func (r *Root) Merge(other *Root) error {
|
||||
// Check for safe merge, protecting against duplicate resource identifiers
|
||||
err := r.Resources.VerifySafeMerge(&other.Resources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Merge dynamic configuration values.
|
||||
return r.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||
return merge.Merge(root, other.value)
|
||||
|
@ -338,6 +311,7 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
|||
"resources",
|
||||
"sync",
|
||||
"permissions",
|
||||
"presets",
|
||||
} {
|
||||
if root, err = mergeField(root, target, f); err != nil {
|
||||
return err
|
||||
|
@ -378,7 +352,7 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
|||
|
||||
// Below, we're setting fields on the bundle key, so make sure it exists.
|
||||
if root.Get("bundle").Kind() == dyn.KindInvalid {
|
||||
root, err = dyn.Set(root, "bundle", dyn.NewValue(map[string]dyn.Value{}, dyn.Location{}))
|
||||
root, err = dyn.Set(root, "bundle", dyn.V(map[string]dyn.Value{}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -404,7 +378,7 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
|||
if v := target.Get("git"); v.Kind() != dyn.KindInvalid {
|
||||
ref, err := dyn.GetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git")))
|
||||
if err != nil {
|
||||
ref = dyn.NewValue(map[string]dyn.Value{}, dyn.Location{})
|
||||
ref = dyn.V(map[string]dyn.Value{})
|
||||
}
|
||||
|
||||
// Merge the override into the reference.
|
||||
|
@ -415,7 +389,7 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
|||
|
||||
// If the branch was overridden, we need to clear the inferred flag.
|
||||
if branch := v.Get("branch"); branch.Kind() != dyn.KindInvalid {
|
||||
out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.NewValue(false, dyn.Location{}))
|
||||
out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.V(false))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -456,7 +430,7 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
|||
// configuration will convert this to a string if necessary.
|
||||
return dyn.NewValue(map[string]dyn.Value{
|
||||
"default": variable,
|
||||
}, variable.Location()), nil
|
||||
}, variable.Locations()), nil
|
||||
|
||||
case dyn.KindMap, dyn.KindSequence:
|
||||
// Check if the original definition of variable has a type field.
|
||||
|
@ -469,7 +443,7 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
|
|||
return dyn.NewValue(map[string]dyn.Value{
|
||||
"type": typeV,
|
||||
"default": variable,
|
||||
}, variable.Location()), nil
|
||||
}, variable.Locations()), nil
|
||||
}
|
||||
|
||||
return variable, nil
|
||||
|
@ -524,6 +498,17 @@ func (r Root) GetLocation(path string) dyn.Location {
|
|||
return v.Location()
|
||||
}
|
||||
|
||||
// Get all locations of the configuration value at the specified path. We need both
|
||||
// this function and it's singular version (GetLocation) because some diagnostics just need
|
||||
// the primary location and some need all locations associated with a configuration value.
|
||||
func (r Root) GetLocations(path string) []dyn.Location {
|
||||
v, err := dyn.Get(r.value, path)
|
||||
if err != nil {
|
||||
return []dyn.Location{}
|
||||
}
|
||||
return v.Locations()
|
||||
}
|
||||
|
||||
// Value returns the dynamic configuration value of the root object. This value
|
||||
// is the source of truth and is kept in sync with values in the typed configuration.
|
||||
func (r Root) Value() dyn.Value {
|
||||
|
|
|
@ -30,22 +30,6 @@ func TestRootLoad(t *testing.T) {
|
|||
assert.Equal(t, "basic", root.Bundle.Name)
|
||||
}
|
||||
|
||||
func TestDuplicateIdOnLoadReturnsError(t *testing.T) {
|
||||
_, diags := Load("./testdata/duplicate_resource_names_in_root/databricks.yml")
|
||||
assert.ErrorContains(t, diags.Error(), "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)")
|
||||
}
|
||||
|
||||
func TestDuplicateIdOnMergeReturnsError(t *testing.T) {
|
||||
root, diags := Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml")
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
other, diags := Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml")
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
err := root.Merge(other)
|
||||
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml, pipeline at ./testdata/duplicate_resource_name_in_subconfiguration/resources.yml)")
|
||||
}
|
||||
|
||||
func TestInitializeVariables(t *testing.T) {
|
||||
fooDefault := "abc"
|
||||
root := &Root{
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
package config
|
||||
|
||||
type Sync struct {
|
||||
// Paths contains a list of paths to synchronize relative to the bundle root path.
|
||||
// If not configured, this defaults to synchronizing everything in the bundle root path (i.e. `.`).
|
||||
Paths []string `json:"paths,omitempty"`
|
||||
|
||||
// Include contains a list of globs evaluated relative to the bundle root path
|
||||
// to explicitly include files that were excluded by the user's gitignore.
|
||||
Include []string `json:"include,omitempty"`
|
||||
|
|
|
@ -20,6 +20,10 @@ type Target struct {
|
|||
// development purposes.
|
||||
Mode Mode `json:"mode,omitempty"`
|
||||
|
||||
// Mutator configurations that e.g. change the
|
||||
// name prefix of deployed resources.
|
||||
Presets Presets `json:"presets,omitempty"`
|
||||
|
||||
// Overrides the compute used for jobs and other supported assets.
|
||||
ComputeID string `json:"compute_id,omitempty"`
|
||||
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
func AllResourcesHaveValues() bundle.Mutator {
|
||||
return &allResourcesHaveValues{}
|
||||
}
|
||||
|
||||
type allResourcesHaveValues struct{}
|
||||
|
||||
func (m *allResourcesHaveValues) Name() string {
|
||||
return "validate:AllResourcesHaveValues"
|
||||
}
|
||||
|
||||
func (m *allResourcesHaveValues) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
_, err := dyn.MapByPattern(
|
||||
b.Config.Value(),
|
||||
dyn.NewPattern(dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
|
||||
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
if v.Kind() != dyn.KindNil {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Type of the resource, stripped of the trailing 's' to make it
|
||||
// singular.
|
||||
rType := strings.TrimSuffix(p[1].Key(), "s")
|
||||
|
||||
// Name of the resource. Eg: "foo" in "jobs.foo".
|
||||
rName := p[2].Key()
|
||||
|
||||
diags = append(diags, diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("%s %s is not defined", rType, rName),
|
||||
Locations: v.Locations(),
|
||||
Paths: []dyn.Path{slices.Clone(p)},
|
||||
})
|
||||
|
||||
return v, nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/deploy/files"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
func FilesToSync() bundle.ReadOnlyMutator {
|
||||
|
@ -45,8 +46,10 @@ func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.
|
|||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Warning,
|
||||
Summary: "There are no files to sync, please check your .gitignore and sync.exclude configuration",
|
||||
Location: loc.Location(),
|
||||
Path: loc.Path(),
|
||||
// Show all locations where sync.exclude is defined, since merging
|
||||
// sync.exclude is additive.
|
||||
Locations: loc.Locations(),
|
||||
Paths: []dyn.Path{loc.Path()},
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
func JobClusterKeyDefined() bundle.ReadOnlyMutator {
|
||||
|
@ -41,8 +42,11 @@ func (v *jobClusterKeyDefined) Apply(ctx context.Context, rb bundle.ReadOnlyBund
|
|||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Warning,
|
||||
Summary: fmt.Sprintf("job_cluster_key %s is not defined", task.JobClusterKey),
|
||||
Location: loc.Location(),
|
||||
Path: loc.Path(),
|
||||
// Show only the location where the job_cluster_key is defined.
|
||||
// Other associated locations are not relevant since they are
|
||||
// overridden during merging.
|
||||
Locations: []dyn.Location{loc.Location()},
|
||||
Paths: []dyn.Path{loc.Path()},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
// This mutator validates that:
|
||||
//
|
||||
// 1. Each resource key is unique across different resource types. No two resources
|
||||
// of the same type can have the same key. This is because command like "bundle run"
|
||||
// rely on the resource key to identify the resource to run.
|
||||
// Eg: jobs.foo and pipelines.foo are not allowed simultaneously.
|
||||
//
|
||||
// 2. Each resource definition is contained within a single file, and is not spread
|
||||
// across multiple files. Note: This is not applicable to resource configuration
|
||||
// defined in a target override. That is why this mutator MUST run before the target
|
||||
// overrides are merged.
|
||||
func UniqueResourceKeys() bundle.Mutator {
|
||||
return &uniqueResourceKeys{}
|
||||
}
|
||||
|
||||
type uniqueResourceKeys struct{}
|
||||
|
||||
func (m *uniqueResourceKeys) Name() string {
|
||||
return "validate:unique_resource_keys"
|
||||
}
|
||||
|
||||
func (m *uniqueResourceKeys) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
type metadata struct {
|
||||
locations []dyn.Location
|
||||
paths []dyn.Path
|
||||
}
|
||||
|
||||
// Maps of resource key to the paths and locations the resource is defined at.
|
||||
resourceMetadata := map[string]*metadata{}
|
||||
|
||||
rv := b.Config.Value().Get("resources")
|
||||
|
||||
// return early if no resources are defined or the resources block is empty.
|
||||
if rv.Kind() == dyn.KindInvalid || rv.Kind() == dyn.KindNil {
|
||||
return diags
|
||||
}
|
||||
|
||||
// Gather the paths and locations of all resources.
|
||||
_, err := dyn.MapByPattern(
|
||||
rv,
|
||||
dyn.NewPattern(dyn.AnyKey(), dyn.AnyKey()),
|
||||
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
// The key for the resource. Eg: "my_job" for jobs.my_job.
|
||||
k := p[1].Key()
|
||||
|
||||
m, ok := resourceMetadata[k]
|
||||
if !ok {
|
||||
m = &metadata{
|
||||
paths: []dyn.Path{},
|
||||
locations: []dyn.Location{},
|
||||
}
|
||||
}
|
||||
|
||||
// dyn.Path under the hood is a slice. The code that walks the configuration
|
||||
// tree uses the same underlying slice to track the path as it walks
|
||||
// the tree. So, we need to clone it here.
|
||||
m.paths = append(m.paths, slices.Clone(p))
|
||||
m.locations = append(m.locations, v.Locations()...)
|
||||
|
||||
resourceMetadata[k] = m
|
||||
return v, nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
for k, v := range resourceMetadata {
|
||||
if len(v.locations) <= 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Sort the locations and paths for consistent error messages. This helps
|
||||
// with unit testing.
|
||||
sort.Slice(v.locations, func(i, j int) bool {
|
||||
l1 := v.locations[i]
|
||||
l2 := v.locations[j]
|
||||
|
||||
if l1.File != l2.File {
|
||||
return l1.File < l2.File
|
||||
}
|
||||
if l1.Line != l2.Line {
|
||||
return l1.Line < l2.Line
|
||||
}
|
||||
return l1.Column < l2.Column
|
||||
})
|
||||
sort.Slice(v.paths, func(i, j int) bool {
|
||||
return v.paths[i].String() < v.paths[j].String()
|
||||
})
|
||||
|
||||
// If there are multiple resources with the same key, report an error.
|
||||
diags = append(diags, diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("multiple resources have been defined with the same key: %s", k),
|
||||
Locations: v.locations,
|
||||
Paths: v.paths,
|
||||
})
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
|
@ -20,6 +20,10 @@ func (l location) Location() dyn.Location {
|
|||
return l.rb.Config().GetLocation(l.path)
|
||||
}
|
||||
|
||||
func (l location) Locations() []dyn.Location {
|
||||
return l.rb.Config().GetLocations(l.path)
|
||||
}
|
||||
|
||||
func (l location) Path() dyn.Path {
|
||||
return dyn.MustPathFromString(l.path)
|
||||
}
|
||||
|
|
|
@ -3,10 +3,12 @@ package validate
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/fileset"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
@ -48,14 +50,20 @@ func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (di
|
|||
|
||||
for i, pattern := range patterns {
|
||||
index := i
|
||||
p := pattern
|
||||
fullPattern := pattern
|
||||
// If the pattern is negated, strip the negation prefix
|
||||
// and check if the pattern matches any files.
|
||||
// Negation in gitignore syntax means "don't look at this path'
|
||||
// So if p matches nothing it's useless negation, but if there are matches,
|
||||
// it means: do not include these files into result set
|
||||
p := strings.TrimPrefix(fullPattern, "!")
|
||||
errs.Go(func() error {
|
||||
fs, err := fileset.NewGlobSet(rb.BundleRoot(), []string{p})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
all, err := fs.All()
|
||||
all, err := fs.Files()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -64,10 +72,10 @@ func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (di
|
|||
loc := location{path: fmt.Sprintf("%s[%d]", path, index), rb: rb}
|
||||
mu.Lock()
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Warning,
|
||||
Summary: fmt.Sprintf("Pattern %s does not match any files", p),
|
||||
Location: loc.Location(),
|
||||
Path: loc.Path(),
|
||||
Severity: diag.Warning,
|
||||
Summary: fmt.Sprintf("Pattern %s does not match any files", fullPattern),
|
||||
Locations: []dyn.Location{loc.Location()},
|
||||
Paths: []dyn.Path{loc.Path()},
|
||||
})
|
||||
mu.Unlock()
|
||||
}
|
||||
|
|
|
@ -220,7 +220,7 @@ type resolvers struct {
|
|||
func allResolvers() *resolvers {
|
||||
r := &resolvers{}
|
||||
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||
entity, err := w.Alerts.GetByName(ctx, name)
|
||||
entity, err := w.Alerts.GetByDisplayName(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ func allResolvers() *resolvers {
|
|||
return fmt.Sprint(entity.PipelineId), nil
|
||||
}
|
||||
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||
entity, err := w.Queries.GetByName(ctx, name)
|
||||
entity, err := w.Queries.GetByDisplayName(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
type delete struct{}
|
||||
|
@ -22,24 +21,7 @@ func (m *delete) Name() string {
|
|||
}
|
||||
|
||||
func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// Do not delete files if terraform destroy was not consented
|
||||
if !b.Plan.IsEmpty && !b.Plan.ConfirmApply {
|
||||
return nil
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Starting deletion of remote bundle files")
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Bundle remote directory is %s", b.Config.Workspace.RootPath))
|
||||
|
||||
red := color.New(color.FgRed).SprintFunc()
|
||||
if !b.AutoApprove {
|
||||
proceed, err := cmdio.AskYesOrNo(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?", b.Config.Workspace.RootPath, red("deleted permanently!")))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
if !proceed {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
cmdio.LogString(ctx, "Deleting files...")
|
||||
|
||||
err := b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{
|
||||
Path: b.Config.Workspace.RootPath,
|
||||
|
@ -54,8 +36,6 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Successfully deleted files!")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -28,10 +28,12 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp
|
|||
}
|
||||
|
||||
opts := &sync.SyncOptions{
|
||||
LocalPath: rb.BundleRoot(),
|
||||
LocalRoot: rb.SyncRoot(),
|
||||
Paths: rb.Config().Sync.Paths,
|
||||
Include: includes,
|
||||
Exclude: rb.Config().Sync.Exclude,
|
||||
|
||||
RemotePath: rb.Config().Workspace.FilePath,
|
||||
Include: includes,
|
||||
Exclude: rb.Config().Sync.Exclude,
|
||||
Host: rb.WorkspaceClient().Config.Host,
|
||||
|
||||
Full: false,
|
||||
|
|
|
@ -39,7 +39,8 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
for name, job := range b.Config.Resources.Jobs {
|
||||
// Compute config file path the job is defined in, relative to the bundle
|
||||
// root
|
||||
relativePath, err := filepath.Rel(b.RootPath, job.ConfigFilePath)
|
||||
l := b.Config.GetLocation("resources.jobs." + name)
|
||||
relativePath, err := filepath.Rel(b.RootPath, l.File)
|
||||
if err != nil {
|
||||
return diag.Errorf("failed to compute relative path for job %s: %v", name, err)
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/fileset"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const DeploymentStateFileName = "deployment.json"
|
||||
|
@ -46,6 +47,9 @@ type DeploymentState struct {
|
|||
|
||||
// Files is a list of files which has been deployed as part of this deployment.
|
||||
Files Filelist `json:"files"`
|
||||
|
||||
// UUID uniquely identifying the deployment.
|
||||
ID uuid.UUID `json:"id"`
|
||||
}
|
||||
|
||||
// We use this entry type as a proxy to fs.DirEntry.
|
||||
|
|
|
@ -85,7 +85,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
}
|
||||
|
||||
log.Infof(ctx, "Creating new snapshot")
|
||||
snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.BundleRoot), opts)
|
||||
snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.SyncRoot), opts)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -64,6 +64,10 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
|||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRoot: vfs.MustNew(tmpDir),
|
||||
|
||||
SyncRootPath: tmpDir,
|
||||
SyncRoot: vfs.MustNew(tmpDir),
|
||||
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
@ -81,11 +85,11 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
|||
ctx := context.Background()
|
||||
|
||||
for _, file := range opts.localFiles {
|
||||
testutil.Touch(t, b.RootPath, "bar", file)
|
||||
testutil.Touch(t, b.SyncRootPath, "bar", file)
|
||||
}
|
||||
|
||||
for _, file := range opts.localNotebooks {
|
||||
testutil.TouchNotebook(t, b.RootPath, "bar", file)
|
||||
testutil.TouchNotebook(t, b.SyncRootPath, "bar", file)
|
||||
}
|
||||
|
||||
if opts.withExistingSnapshot {
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestFromSlice(t *testing.T) {
|
|||
testutil.Touch(t, tmpDir, "test2.py")
|
||||
testutil.Touch(t, tmpDir, "test3.py")
|
||||
|
||||
files, err := fileset.All()
|
||||
files, err := fileset.Files()
|
||||
require.NoError(t, err)
|
||||
|
||||
f, err := FromSlice(files)
|
||||
|
@ -38,7 +38,7 @@ func TestToSlice(t *testing.T) {
|
|||
testutil.Touch(t, tmpDir, "test2.py")
|
||||
testutil.Touch(t, tmpDir, "test3.py")
|
||||
|
||||
files, err := fileset.All()
|
||||
files, err := fileset.Files()
|
||||
require.NoError(t, err)
|
||||
|
||||
f, err := FromSlice(files)
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/databricks/cli/internal/build"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type stateUpdate struct {
|
||||
|
@ -46,6 +47,11 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost
|
|||
}
|
||||
state.Files = fl
|
||||
|
||||
// Generate a UUID for the deployment, if one does not already exist
|
||||
if state.ID == uuid.Nil {
|
||||
state.ID = uuid.New()
|
||||
}
|
||||
|
||||
statePath, err := getPathToStateFile(ctx, b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/databricks/cli/libs/fileset"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -22,7 +23,7 @@ func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle {
|
|||
testutil.Touch(t, tmpDir, "test1.py")
|
||||
testutil.TouchNotebook(t, tmpDir, "test2.py")
|
||||
|
||||
files, err := fileset.New(vfs.MustNew(tmpDir)).All()
|
||||
files, err := fileset.New(vfs.MustNew(tmpDir)).Files()
|
||||
require.NoError(t, err)
|
||||
|
||||
return &bundle.Bundle{
|
||||
|
@ -88,6 +89,9 @@ func TestStateUpdate(t *testing.T) {
|
|||
},
|
||||
})
|
||||
require.Equal(t, build.GetInfo().Version, state.CliVersion)
|
||||
|
||||
// Valid non-empty UUID is generated.
|
||||
require.NotEqual(t, uuid.Nil, state.ID)
|
||||
}
|
||||
|
||||
func TestStateUpdateWithExistingState(t *testing.T) {
|
||||
|
@ -109,6 +113,7 @@ func TestStateUpdateWithExistingState(t *testing.T) {
|
|||
LocalPath: "bar/t1.py",
|
||||
},
|
||||
},
|
||||
ID: uuid.MustParse("123e4567-e89b-12d3-a456-426614174000"),
|
||||
}
|
||||
|
||||
data, err := json.Marshal(state)
|
||||
|
@ -135,4 +140,7 @@ func TestStateUpdateWithExistingState(t *testing.T) {
|
|||
},
|
||||
})
|
||||
require.Equal(t, build.GetInfo().Version, state.CliVersion)
|
||||
|
||||
// Existing UUID is not overwritten.
|
||||
require.Equal(t, uuid.MustParse("123e4567-e89b-12d3-a456-426614174000"), state.ID)
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
|
@ -18,19 +17,23 @@ func (w *apply) Name() string {
|
|||
}
|
||||
|
||||
func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// return early if plan is empty
|
||||
if b.Plan.IsEmpty {
|
||||
log.Debugf(ctx, "No changes in plan. Skipping terraform apply.")
|
||||
return nil
|
||||
}
|
||||
|
||||
tf := b.Terraform
|
||||
if tf == nil {
|
||||
return diag.Errorf("terraform not initialized")
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Deploying resources...")
|
||||
|
||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||
if err != nil {
|
||||
return diag.Errorf("terraform init: %v", err)
|
||||
if b.Plan.Path == "" {
|
||||
return diag.Errorf("no plan found")
|
||||
}
|
||||
|
||||
err = tf.Apply(ctx)
|
||||
// Apply terraform according to the computed plan
|
||||
err := tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
||||
if err != nil {
|
||||
diags := permissions.TryExtendTerraformPermissionError(ctx, b, err)
|
||||
if diags != nil {
|
||||
|
@ -39,11 +42,11 @@ func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
return diag.Errorf("terraform apply: %v", err)
|
||||
}
|
||||
|
||||
log.Infof(ctx, "Resource deployment completed")
|
||||
log.Infof(ctx, "terraform apply completed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply returns a [bundle.Mutator] that runs the equivalent of `terraform apply`
|
||||
// Apply returns a [bundle.Mutator] that runs the equivalent of `terraform apply ./plan`
|
||||
// from the bundle's ephemeral working directory for Terraform.
|
||||
func Apply() bundle.Mutator {
|
||||
return &apply{}
|
||||
|
|
|
@ -66,8 +66,10 @@ func convGrants(acl []resources.Grant) *schema.ResourceGrants {
|
|||
// BundleToTerraform converts resources in a bundle configuration
|
||||
// to the equivalent Terraform JSON representation.
|
||||
//
|
||||
// NOTE: THIS IS CURRENTLY A HACK. WE NEED A BETTER WAY TO
|
||||
// CONVERT TO/FROM TERRAFORM COMPATIBLE FORMAT.
|
||||
// Note: This function is an older implementation of the conversion logic. It is
|
||||
// no longer used in any code paths. It is kept around to be used in tests.
|
||||
// New resources do not need to modify this function and can instead can define
|
||||
// the conversion login in the tfdyn package.
|
||||
func BundleToTerraform(config *config.Root) *schema.Root {
|
||||
tfroot := schema.NewRoot()
|
||||
tfroot.Provider = schema.NewProviders()
|
||||
|
@ -382,6 +384,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.QualityMonitors[resource.Name] = cur
|
||||
case "databricks_schema":
|
||||
if config.Resources.Schemas == nil {
|
||||
config.Resources.Schemas = make(map[string]*resources.Schema)
|
||||
}
|
||||
cur := config.Resources.Schemas[resource.Name]
|
||||
if cur == nil {
|
||||
cur = &resources.Schema{ModifiedStatus: resources.ModifiedStatusDeleted}
|
||||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Schemas[resource.Name] = cur
|
||||
case "databricks_permissions":
|
||||
case "databricks_grants":
|
||||
// Ignore; no need to pull these back into the configuration.
|
||||
|
@ -426,6 +438,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
for _, src := range config.Resources.Schemas {
|
||||
if src.ModifiedStatus == "" && src.ID == "" {
|
||||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -655,6 +655,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_schema",
|
||||
Mode: "managed",
|
||||
Name: "test_schema",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := TerraformToBundle(&tfState, &config)
|
||||
|
@ -681,6 +689,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
assert.Equal(t, "1", config.Resources.QualityMonitors["test_monitor"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.QualityMonitors["test_monitor"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Schemas["test_schema"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
@ -736,6 +747,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Schemas: map[string]*resources.Schema{
|
||||
"test_schema": {
|
||||
CreateSchema: &catalog.CreateSchema{
|
||||
Name: "test_schema",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
|
@ -765,6 +783,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.QualityMonitors["test_monitor"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "", config.Resources.Schemas["test_schema"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
@ -855,6 +876,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Schemas: map[string]*resources.Schema{
|
||||
"test_schema": {
|
||||
CreateSchema: &catalog.CreateSchema{
|
||||
Name: "test_schema",
|
||||
},
|
||||
},
|
||||
"test_schema_new": {
|
||||
CreateSchema: &catalog.CreateSchema{
|
||||
Name: "test_schema_new",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
|
@ -971,6 +1004,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "test_monitor_old"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_schema",
|
||||
Mode: "managed",
|
||||
Name: "test_schema",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_schema",
|
||||
Mode: "managed",
|
||||
Name: "test_schema_old",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := TerraformToBundle(&tfState, &config)
|
||||
|
@ -1024,6 +1073,14 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.QualityMonitors["test_monitor_old"].ModifiedStatus)
|
||||
assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.QualityMonitors["test_monitor_new"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Schemas["test_schema"].ID)
|
||||
assert.Equal(t, "", config.Resources.Schemas["test_schema"].ModifiedStatus)
|
||||
assert.Equal(t, "2", config.Resources.Schemas["test_schema_old"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Schemas["test_schema_old"].ModifiedStatus)
|
||||
assert.Equal(t, "", config.Resources.Schemas["test_schema_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema_new"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,124 +0,0 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/fatih/color"
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
tfjson "github.com/hashicorp/terraform-json"
|
||||
)
|
||||
|
||||
type PlanResourceChange struct {
|
||||
ResourceType string `json:"resource_type"`
|
||||
Action string `json:"action"`
|
||||
ResourceName string `json:"resource_name"`
|
||||
}
|
||||
|
||||
func (c *PlanResourceChange) String() string {
|
||||
result := strings.Builder{}
|
||||
switch c.Action {
|
||||
case "delete":
|
||||
result.WriteString(" delete ")
|
||||
default:
|
||||
result.WriteString(c.Action + " ")
|
||||
}
|
||||
switch c.ResourceType {
|
||||
case "databricks_job":
|
||||
result.WriteString("job ")
|
||||
case "databricks_pipeline":
|
||||
result.WriteString("pipeline ")
|
||||
default:
|
||||
result.WriteString(c.ResourceType + " ")
|
||||
}
|
||||
result.WriteString(c.ResourceName)
|
||||
return result.String()
|
||||
}
|
||||
|
||||
func (c *PlanResourceChange) IsInplaceSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func logDestroyPlan(ctx context.Context, changes []*tfjson.ResourceChange) error {
|
||||
cmdio.LogString(ctx, "The following resources will be removed:")
|
||||
for _, c := range changes {
|
||||
if c.Change.Actions.Delete() {
|
||||
cmdio.Log(ctx, &PlanResourceChange{
|
||||
ResourceType: c.Type,
|
||||
Action: "delete",
|
||||
ResourceName: c.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type destroy struct{}
|
||||
|
||||
func (w *destroy) Name() string {
|
||||
return "terraform.Destroy"
|
||||
}
|
||||
|
||||
func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// return early if plan is empty
|
||||
if b.Plan.IsEmpty {
|
||||
cmdio.LogString(ctx, "No resources to destroy in plan. Skipping destroy!")
|
||||
return nil
|
||||
}
|
||||
|
||||
tf := b.Terraform
|
||||
if tf == nil {
|
||||
return diag.Errorf("terraform not initialized")
|
||||
}
|
||||
|
||||
// read plan file
|
||||
plan, err := tf.ShowPlanFile(ctx, b.Plan.Path)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// print the resources that will be destroyed
|
||||
err = logDestroyPlan(ctx, plan.ResourceChanges)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// Ask for confirmation, if needed
|
||||
if !b.Plan.ConfirmApply {
|
||||
red := color.New(color.FgRed).SprintFunc()
|
||||
b.Plan.ConfirmApply, err = cmdio.AskYesOrNo(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed?", red("destroy")))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// return if confirmation was not provided
|
||||
if !b.Plan.ConfirmApply {
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.Plan.Path == "" {
|
||||
return diag.Errorf("no plan found")
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Starting to destroy resources")
|
||||
|
||||
// Apply terraform according to the computed destroy plan
|
||||
err = tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
||||
if err != nil {
|
||||
return diag.Errorf("terraform destroy: %v", err)
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Successfully destroyed resources!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Destroy returns a [bundle.Mutator] that runs the conceptual equivalent of
|
||||
// `terraform destroy ./plan` from the bundle's ephemeral working directory for Terraform.
|
||||
func Destroy() bundle.Mutator {
|
||||
return &destroy{}
|
||||
}
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/internal/build"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
|
@ -219,8 +220,10 @@ func setProxyEnvVars(ctx context.Context, environ map[string]string, b *bundle.B
|
|||
}
|
||||
|
||||
func setUserAgentExtraEnvVar(environ map[string]string, b *bundle.Bundle) error {
|
||||
var products []string
|
||||
|
||||
// Add "cli" to the user agent in set by the Databricks Terraform provider.
|
||||
// This will allow us to attribute downstream requests made by the Databricks
|
||||
// Terraform provider to the CLI.
|
||||
products := []string{fmt.Sprintf("cli/%s", build.GetInfo().Version)}
|
||||
if experimental := b.Config.Experimental; experimental != nil {
|
||||
if experimental.PyDABs.Enabled {
|
||||
products = append(products, "databricks-pydabs/0.0.0")
|
||||
|
|
|
@ -262,10 +262,9 @@ func TestSetUserAgentExtraEnvVar(t *testing.T) {
|
|||
|
||||
env := make(map[string]string, 0)
|
||||
err := setUserAgentExtraEnvVar(env, b)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, map[string]string{
|
||||
"DATABRICKS_USER_AGENT_EXTRA": "databricks-pydabs/0.0.0",
|
||||
"DATABRICKS_USER_AGENT_EXTRA": "cli/0.0.0-dev databricks-pydabs/0.0.0",
|
||||
}, env)
|
||||
}
|
||||
|
||||
|
|
|
@ -56,6 +56,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
|||
path = dyn.NewPath(dyn.Key("databricks_registered_model")).Append(path[2:]...)
|
||||
case dyn.Key("quality_monitors"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...)
|
||||
case dyn.Key("schemas"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...)
|
||||
default:
|
||||
// Trigger "key not found" for unknown resource types.
|
||||
return dyn.GetByPath(root, path)
|
||||
|
|
|
@ -30,6 +30,7 @@ func TestInterpolate(t *testing.T) {
|
|||
"other_experiment": "${resources.experiments.other_experiment.id}",
|
||||
"other_model_serving": "${resources.model_serving_endpoints.other_model_serving.id}",
|
||||
"other_registered_model": "${resources.registered_models.other_registered_model.id}",
|
||||
"other_schema": "${resources.schemas.other_schema.id}",
|
||||
},
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
|
@ -65,6 +66,7 @@ func TestInterpolate(t *testing.T) {
|
|||
assert.Equal(t, "${databricks_mlflow_experiment.other_experiment.id}", j.Tags["other_experiment"])
|
||||
assert.Equal(t, "${databricks_model_serving.other_model_serving.id}", j.Tags["other_model_serving"])
|
||||
assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"])
|
||||
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
|
||||
|
||||
m := b.Config.Resources.Models["my_model"]
|
||||
assert.Equal(t, "my_model", m.Model.Name)
|
||||
|
|
|
@ -6,8 +6,8 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/terraform"
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
)
|
||||
|
@ -33,8 +33,6 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
return diag.Errorf("terraform not initialized")
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Starting plan computation")
|
||||
|
||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||
if err != nil {
|
||||
return diag.Errorf("terraform init: %v", err)
|
||||
|
@ -55,12 +53,11 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
|
||||
// Set plan in main bundle struct for downstream mutators
|
||||
b.Plan = &terraform.Plan{
|
||||
Path: planPath,
|
||||
ConfirmApply: b.AutoApprove,
|
||||
IsEmpty: !notEmpty,
|
||||
Path: planPath,
|
||||
IsEmpty: !notEmpty,
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath))
|
||||
log.Debugf(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
|
@ -12,10 +12,14 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/deploy"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
type tfState struct {
|
||||
Serial int64 `json:"serial"`
|
||||
Lineage string `json:"lineage"`
|
||||
}
|
||||
|
||||
type statePull struct {
|
||||
filerFactory deploy.FilerFactory
|
||||
}
|
||||
|
@ -24,74 +28,105 @@ func (l *statePull) Name() string {
|
|||
return "terraform:state-pull"
|
||||
}
|
||||
|
||||
func (l *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buffer, error) {
|
||||
// Download state file from filer to local cache directory.
|
||||
remote, err := f.Read(ctx, TerraformStateFileName)
|
||||
func (l *statePull) remoteState(ctx context.Context, b *bundle.Bundle) (*tfState, []byte, error) {
|
||||
f, err := l.filerFactory(b)
|
||||
if err != nil {
|
||||
// On first deploy this state file doesn't yet exist.
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer remote.Close()
|
||||
r, err := f.Read(ctx, TerraformStateFileName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, remote)
|
||||
content, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
state := &tfState{}
|
||||
err = json.Unmarshal(content, state)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return state, content, nil
|
||||
}
|
||||
|
||||
func (l *statePull) localState(ctx context.Context, b *bundle.Bundle) (*tfState, error) {
|
||||
dir, err := Dir(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &buf, nil
|
||||
content, err := os.ReadFile(filepath.Join(dir, TerraformStateFileName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state := &tfState{}
|
||||
err = json.Unmarshal(content, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
f, err := l.filerFactory(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
dir, err := Dir(ctx, b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// Download state file from filer to local cache directory.
|
||||
log.Infof(ctx, "Opening remote state file")
|
||||
remote, err := l.remoteState(ctx, f)
|
||||
if err != nil {
|
||||
log.Infof(ctx, "Unable to open remote state file: %s", err)
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
if remote == nil {
|
||||
log.Infof(ctx, "Remote state file does not exist")
|
||||
localStatePath := filepath.Join(dir, TerraformStateFileName)
|
||||
|
||||
// Case: Remote state file does not exist. In this case we fallback to using the
|
||||
// local Terraform state. This allows users to change the "root_path" their bundle is
|
||||
// configured with.
|
||||
remoteState, remoteContent, err := l.remoteState(ctx, b)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
log.Infof(ctx, "Remote state file does not exist. Using local Terraform state.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Expect the state file to live under dir.
|
||||
local, err := os.OpenFile(filepath.Join(dir, TerraformStateFileName), os.O_CREATE|os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
return diag.Errorf("failed to read remote state file: %v", err)
|
||||
}
|
||||
|
||||
// Expected invariant: remote state file should have a lineage UUID. Error
|
||||
// if that's not the case.
|
||||
if remoteState.Lineage == "" {
|
||||
return diag.Errorf("remote state file does not have a lineage")
|
||||
}
|
||||
|
||||
// Case: Local state file does not exist. In this case we should rely on the remote state file.
|
||||
localState, err := l.localState(ctx, b)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
log.Infof(ctx, "Local state file does not exist. Using remote Terraform state.")
|
||||
err := os.WriteFile(localStatePath, remoteContent, 0600)
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
defer local.Close()
|
||||
|
||||
if !IsLocalStateStale(local, bytes.NewReader(remote.Bytes())) {
|
||||
log.Infof(ctx, "Local state is the same or newer, ignoring remote state")
|
||||
return nil
|
||||
if err != nil {
|
||||
return diag.Errorf("failed to read local state file: %v", err)
|
||||
}
|
||||
|
||||
// Truncating the file before writing
|
||||
local.Truncate(0)
|
||||
local.Seek(0, 0)
|
||||
|
||||
// Write file to disk.
|
||||
log.Infof(ctx, "Writing remote state file to local cache directory")
|
||||
_, err = io.Copy(local, bytes.NewReader(remote.Bytes()))
|
||||
if err != nil {
|
||||
// If the lineage does not match, the Terraform state files do not correspond to the same deployment.
|
||||
if localState.Lineage != remoteState.Lineage {
|
||||
log.Infof(ctx, "Remote and local state lineages do not match. Using remote Terraform state. Invalidating local Terraform state.")
|
||||
err := os.WriteFile(localStatePath, remoteContent, 0600)
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// If the remote state is newer than the local state, we should use the remote state.
|
||||
if remoteState.Serial > localState.Serial {
|
||||
log.Infof(ctx, "Remote state is newer than local state. Using remote Terraform state.")
|
||||
err := os.WriteFile(localStatePath, remoteContent, 0600)
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// default: local state is newer or equal to remote state in terms of serial sequence.
|
||||
// It is also of the same lineage. Keep using the local state.
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
func mockStateFilerForPull(t *testing.T, contents map[string]int, merr error) filer.Filer {
|
||||
func mockStateFilerForPull(t *testing.T, contents map[string]any, merr error) filer.Filer {
|
||||
buf, err := json.Marshal(contents)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -41,86 +41,123 @@ func statePullTestBundle(t *testing.T) *bundle.Bundle {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStatePullLocalMissingRemoteMissing(t *testing.T) {
|
||||
m := &statePull{
|
||||
identityFiler(mockStateFilerForPull(t, nil, os.ErrNotExist)),
|
||||
}
|
||||
func TestStatePullLocalErrorWhenRemoteHasNoLineage(t *testing.T) {
|
||||
m := &statePull{}
|
||||
|
||||
ctx := context.Background()
|
||||
b := statePullTestBundle(t)
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.NoError(t, diags.Error())
|
||||
t.Run("no local state", func(t *testing.T) {
|
||||
// setup remote state.
|
||||
m.filerFactory = identityFiler(mockStateFilerForPull(t, map[string]any{"serial": 5}, nil))
|
||||
|
||||
// Confirm that no local state file has been written.
|
||||
_, err := os.Stat(localStateFile(t, ctx, b))
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
ctx := context.Background()
|
||||
b := statePullTestBundle(t)
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.EqualError(t, diags.Error(), "remote state file does not have a lineage")
|
||||
})
|
||||
|
||||
t.Run("local state with lineage", func(t *testing.T) {
|
||||
// setup remote state.
|
||||
m.filerFactory = identityFiler(mockStateFilerForPull(t, map[string]any{"serial": 5}, nil))
|
||||
|
||||
ctx := context.Background()
|
||||
b := statePullTestBundle(t)
|
||||
writeLocalState(t, ctx, b, map[string]any{"serial": 5, "lineage": "aaaa"})
|
||||
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.EqualError(t, diags.Error(), "remote state file does not have a lineage")
|
||||
})
|
||||
}
|
||||
|
||||
func TestStatePullLocalMissingRemotePresent(t *testing.T) {
|
||||
m := &statePull{
|
||||
identityFiler(mockStateFilerForPull(t, map[string]int{"serial": 5}, nil)),
|
||||
func TestStatePullLocal(t *testing.T) {
|
||||
tcases := []struct {
|
||||
name string
|
||||
|
||||
// remote state before applying the pull mutators
|
||||
remote map[string]any
|
||||
|
||||
// local state before applying the pull mutators
|
||||
local map[string]any
|
||||
|
||||
// expected local state after applying the pull mutators
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "remote missing, local missing",
|
||||
remote: nil,
|
||||
local: nil,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "remote missing, local present",
|
||||
remote: nil,
|
||||
local: map[string]any{"serial": 5, "lineage": "aaaa"},
|
||||
// fallback to local state, since remote state is missing.
|
||||
expected: map[string]any{"serial": float64(5), "lineage": "aaaa"},
|
||||
},
|
||||
{
|
||||
name: "local stale",
|
||||
remote: map[string]any{"serial": 10, "lineage": "aaaa", "some_other_key": 123},
|
||||
local: map[string]any{"serial": 5, "lineage": "aaaa"},
|
||||
// use remote, since remote is newer.
|
||||
expected: map[string]any{"serial": float64(10), "lineage": "aaaa", "some_other_key": float64(123)},
|
||||
},
|
||||
{
|
||||
name: "local equal",
|
||||
remote: map[string]any{"serial": 5, "lineage": "aaaa", "some_other_key": 123},
|
||||
local: map[string]any{"serial": 5, "lineage": "aaaa"},
|
||||
// use local state, since they are equal in terms of serial sequence.
|
||||
expected: map[string]any{"serial": float64(5), "lineage": "aaaa"},
|
||||
},
|
||||
{
|
||||
name: "local newer",
|
||||
remote: map[string]any{"serial": 5, "lineage": "aaaa", "some_other_key": 123},
|
||||
local: map[string]any{"serial": 6, "lineage": "aaaa"},
|
||||
// use local state, since local is newer.
|
||||
expected: map[string]any{"serial": float64(6), "lineage": "aaaa"},
|
||||
},
|
||||
{
|
||||
name: "remote and local have different lineages",
|
||||
remote: map[string]any{"serial": 5, "lineage": "aaaa"},
|
||||
local: map[string]any{"serial": 10, "lineage": "bbbb"},
|
||||
// use remote, since lineages do not match.
|
||||
expected: map[string]any{"serial": float64(5), "lineage": "aaaa"},
|
||||
},
|
||||
{
|
||||
name: "local is missing lineage",
|
||||
remote: map[string]any{"serial": 5, "lineage": "aaaa"},
|
||||
local: map[string]any{"serial": 10},
|
||||
// use remote, since local does not have lineage.
|
||||
expected: map[string]any{"serial": float64(5), "lineage": "aaaa"},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
b := statePullTestBundle(t)
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.NoError(t, diags.Error())
|
||||
for _, tc := range tcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := &statePull{}
|
||||
if tc.remote == nil {
|
||||
// nil represents no remote state file.
|
||||
m.filerFactory = identityFiler(mockStateFilerForPull(t, nil, os.ErrNotExist))
|
||||
} else {
|
||||
m.filerFactory = identityFiler(mockStateFilerForPull(t, tc.remote, nil))
|
||||
}
|
||||
|
||||
// Confirm that the local state file has been updated.
|
||||
localState := readLocalState(t, ctx, b)
|
||||
assert.Equal(t, map[string]int{"serial": 5}, localState)
|
||||
}
|
||||
ctx := context.Background()
|
||||
b := statePullTestBundle(t)
|
||||
if tc.local != nil {
|
||||
writeLocalState(t, ctx, b, tc.local)
|
||||
}
|
||||
|
||||
func TestStatePullLocalStale(t *testing.T) {
|
||||
m := &statePull{
|
||||
identityFiler(mockStateFilerForPull(t, map[string]int{"serial": 5}, nil)),
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
if tc.expected == nil {
|
||||
// nil represents no local state file is expected.
|
||||
_, err := os.Stat(localStateFile(t, ctx, b))
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
} else {
|
||||
localState := readLocalState(t, ctx, b)
|
||||
assert.Equal(t, tc.expected, localState)
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
b := statePullTestBundle(t)
|
||||
|
||||
// Write a stale local state file.
|
||||
writeLocalState(t, ctx, b, map[string]int{"serial": 4})
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
// Confirm that the local state file has been updated.
|
||||
localState := readLocalState(t, ctx, b)
|
||||
assert.Equal(t, map[string]int{"serial": 5}, localState)
|
||||
}
|
||||
|
||||
func TestStatePullLocalEqual(t *testing.T) {
|
||||
m := &statePull{
|
||||
identityFiler(mockStateFilerForPull(t, map[string]int{"serial": 5, "some_other_key": 123}, nil)),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
b := statePullTestBundle(t)
|
||||
|
||||
// Write a local state file with the same serial as the remote.
|
||||
writeLocalState(t, ctx, b, map[string]int{"serial": 5})
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
// Confirm that the local state file has not been updated.
|
||||
localState := readLocalState(t, ctx, b)
|
||||
assert.Equal(t, map[string]int{"serial": 5}, localState)
|
||||
}
|
||||
|
||||
func TestStatePullLocalNewer(t *testing.T) {
|
||||
m := &statePull{
|
||||
identityFiler(mockStateFilerForPull(t, map[string]int{"serial": 5, "some_other_key": 123}, nil)),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
b := statePullTestBundle(t)
|
||||
|
||||
// Write a local state file with a newer serial as the remote.
|
||||
writeLocalState(t, ctx, b, map[string]int{"serial": 6})
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
// Confirm that the local state file has not been updated.
|
||||
localState := readLocalState(t, ctx, b)
|
||||
assert.Equal(t, map[string]int{"serial": 6}, localState)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ package terraform
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
|
@ -34,6 +36,12 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
|
||||
// Expect the state file to live under dir.
|
||||
local, err := os.Open(filepath.Join(dir, TerraformStateFileName))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// The state file can be absent if terraform apply is skipped because
|
||||
// there are no changes to apply in the plan.
|
||||
log.Debugf(ctx, "Local terraform state file does not exist.")
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func TestStatePush(t *testing.T) {
|
|||
b := statePushTestBundle(t)
|
||||
|
||||
// Write a stale local state file.
|
||||
writeLocalState(t, ctx, b, map[string]int{"serial": 4})
|
||||
writeLocalState(t, ctx, b, map[string]any{"serial": 4})
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.NoError(t, diags.Error())
|
||||
}
|
||||
|
|
|
@ -26,19 +26,19 @@ func localStateFile(t *testing.T, ctx context.Context, b *bundle.Bundle) string
|
|||
return filepath.Join(dir, TerraformStateFileName)
|
||||
}
|
||||
|
||||
func readLocalState(t *testing.T, ctx context.Context, b *bundle.Bundle) map[string]int {
|
||||
func readLocalState(t *testing.T, ctx context.Context, b *bundle.Bundle) map[string]any {
|
||||
f, err := os.Open(localStateFile(t, ctx, b))
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
var contents map[string]int
|
||||
var contents map[string]any
|
||||
dec := json.NewDecoder(f)
|
||||
err = dec.Decode(&contents)
|
||||
require.NoError(t, err)
|
||||
return contents
|
||||
}
|
||||
|
||||
func writeLocalState(t *testing.T, ctx context.Context, b *bundle.Bundle, contents map[string]int) {
|
||||
func writeLocalState(t *testing.T, ctx context.Context, b *bundle.Bundle, contents map[string]any) {
|
||||
f, err := os.Create(localStateFile(t, ctx, b))
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue