mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into prompt-on-pipeline-recreate
This commit is contained in:
commit
a0bc111b4d
|
@ -1 +1 @@
|
|||
7437dabb9dadee402c1fc060df4c1ce8cc5369f0
|
||||
3eae49b444cac5a0118a3503e5b7ecef7f96527a
|
|
@ -116,12 +116,12 @@ func allResolvers() *resolvers {
|
|||
{{range .Services -}}
|
||||
{{- if in $allowlist .KebabName -}}
|
||||
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||
entity, err := w.{{.PascalName}}.GetBy{{range .List.NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
||||
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .List.NamedIdMap.IdPath 0).PascalName) }}), nil
|
||||
return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .NamedIdMap.IdPath 0).PascalName) }}), nil
|
||||
}
|
||||
{{end -}}
|
||||
{{- end}}
|
||||
|
|
|
@ -154,6 +154,7 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
"provider-exchanges delete-listing-from-exchange"
|
||||
"provider-exchanges list-exchanges-for-listing"
|
||||
"provider-exchanges list-listings-for-exchange"
|
||||
"storage-credentials get"
|
||||
-}}
|
||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||
|
|
|
@ -24,10 +24,12 @@ cmd/account/service-principals/service-principals.go linguist-generated=true
|
|||
cmd/account/settings/settings.go linguist-generated=true
|
||||
cmd/account/storage-credentials/storage-credentials.go linguist-generated=true
|
||||
cmd/account/storage/storage.go linguist-generated=true
|
||||
cmd/account/usage-dashboards/usage-dashboards.go linguist-generated=true
|
||||
cmd/account/users/users.go linguist-generated=true
|
||||
cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
|
||||
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
|
||||
cmd/account/workspaces/workspaces.go linguist-generated=true
|
||||
cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true
|
||||
cmd/workspace/alerts/alerts.go linguist-generated=true
|
||||
cmd/workspace/apps/apps.go linguist-generated=true
|
||||
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
|
||||
|
@ -54,6 +56,7 @@ cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go lingu
|
|||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
||||
cmd/workspace/functions/functions.go linguist-generated=true
|
||||
cmd/workspace/genie/genie.go linguist-generated=true
|
||||
cmd/workspace/git-credentials/git-credentials.go linguist-generated=true
|
||||
cmd/workspace/global-init-scripts/global-init-scripts.go linguist-generated=true
|
||||
cmd/workspace/grants/grants.go linguist-generated=true
|
||||
|
@ -67,10 +70,13 @@ cmd/workspace/libraries/libraries.go linguist-generated=true
|
|||
cmd/workspace/metastores/metastores.go linguist-generated=true
|
||||
cmd/workspace/model-registry/model-registry.go linguist-generated=true
|
||||
cmd/workspace/model-versions/model-versions.go linguist-generated=true
|
||||
cmd/workspace/notification-destinations/notification-destinations.go linguist-generated=true
|
||||
cmd/workspace/online-tables/online-tables.go linguist-generated=true
|
||||
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
|
||||
cmd/workspace/permissions/permissions.go linguist-generated=true
|
||||
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
||||
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go linguist-generated=true
|
||||
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go linguist-generated=true
|
||||
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
||||
cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true
|
||||
cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true
|
||||
|
@ -81,13 +87,16 @@ cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics
|
|||
cmd/workspace/provider-providers/provider-providers.go linguist-generated=true
|
||||
cmd/workspace/providers/providers.go linguist-generated=true
|
||||
cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true
|
||||
cmd/workspace/queries-legacy/queries-legacy.go linguist-generated=true
|
||||
cmd/workspace/queries/queries.go linguist-generated=true
|
||||
cmd/workspace/query-history/query-history.go linguist-generated=true
|
||||
cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go linguist-generated=true
|
||||
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true
|
||||
cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=true
|
||||
cmd/workspace/recipients/recipients.go linguist-generated=true
|
||||
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
||||
cmd/workspace/repos/repos.go linguist-generated=true
|
||||
cmd/workspace/resource-quotas/resource-quotas.go linguist-generated=true
|
||||
cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true
|
||||
cmd/workspace/schemas/schemas.go linguist-generated=true
|
||||
cmd/workspace/secrets/secrets.go linguist-generated=true
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
"files.trimTrailingWhitespace": true,
|
||||
"files.insertFinalNewline": true,
|
||||
"files.trimFinalNewlines": true,
|
||||
"python.envFile": "${workspaceFolder}/.databricks/.databricks.env",
|
||||
"python.envFile": "${workspaceRoot}/.env",
|
||||
"databricks.python.envFile": "${workspaceFolder}/.env",
|
||||
"python.analysis.stubPath": ".vscode",
|
||||
"jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\<codecell\\>|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])",
|
||||
|
|
117
CHANGELOG.md
117
CHANGELOG.md
|
@ -1,5 +1,122 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.227.0
|
||||
|
||||
CLI:
|
||||
* Added filtering flags for cluster list commands ([#1703](https://github.com/databricks/cli/pull/1703)).
|
||||
|
||||
Bundles:
|
||||
* Allow users to configure paths (including outside of the bundle root) to synchronize to the workspace. ([#1694](https://github.com/databricks/cli/pull/1694)).
|
||||
* Add configurable presets for name prefixes, tags, etc. ([#1490](https://github.com/databricks/cli/pull/1490)).
|
||||
* Add support for requirements libraries in Job Tasks ([#1543](https://github.com/databricks/cli/pull/1543)).
|
||||
* Remove reference to "dbt" in the default-sql template ([#1696](https://github.com/databricks/cli/pull/1696)).
|
||||
* Pause continuous pipelines when 'mode: development' is used ([#1590](https://github.com/databricks/cli/pull/1590)).
|
||||
* Report all empty resources present in error diagnostic ([#1685](https://github.com/databricks/cli/pull/1685)).
|
||||
* Improves detection of PyPI package names in environment dependencies ([#1699](https://github.com/databricks/cli/pull/1699)).
|
||||
|
||||
Internal:
|
||||
* Add `import` option for PyDABs ([#1693](https://github.com/databricks/cli/pull/1693)).
|
||||
* Make fileset take optional list of paths to list ([#1684](https://github.com/databricks/cli/pull/1684)).
|
||||
* Pass through paths argument to libs/sync ([#1689](https://github.com/databricks/cli/pull/1689)).
|
||||
* Correctly mark package names with versions as remote libraries ([#1697](https://github.com/databricks/cli/pull/1697)).
|
||||
* Share test initializer in common helper function ([#1695](https://github.com/databricks/cli/pull/1695)).
|
||||
* Make `pydabs/venv_path` optional ([#1687](https://github.com/databricks/cli/pull/1687)).
|
||||
* Use API mocks for duplicate path errors in workspace files extensions client ([#1690](https://github.com/databricks/cli/pull/1690)).
|
||||
* Fix prefix preset used for UC schemas ([#1704](https://github.com/databricks/cli/pull/1704)).
|
||||
|
||||
|
||||
|
||||
## [Release] Release v0.226.0
|
||||
|
||||
CLI:
|
||||
* Add command line autocomplete to the fs commands ([#1622](https://github.com/databricks/cli/pull/1622)).
|
||||
* Add trailing slash to directory to produce completions for ([#1666](https://github.com/databricks/cli/pull/1666)).
|
||||
* Fix ability to import the CLI repository as module ([#1671](https://github.com/databricks/cli/pull/1671)).
|
||||
* Fix host resolution order in `auth login` ([#1370](https://github.com/databricks/cli/pull/1370)).
|
||||
* Print text logs in `import-dir` and `export-dir` commands ([#1682](https://github.com/databricks/cli/pull/1682)).
|
||||
|
||||
Bundles:
|
||||
* Expand and upload local wheel libraries for all task types ([#1649](https://github.com/databricks/cli/pull/1649)).
|
||||
* Clarify file format required for the `config-file` flag in `bundle init` ([#1651](https://github.com/databricks/cli/pull/1651)).
|
||||
* Fixed incorrectly cleaning up python wheel dist folder ([#1656](https://github.com/databricks/cli/pull/1656)).
|
||||
* Merge job parameters based on their name ([#1659](https://github.com/databricks/cli/pull/1659)).
|
||||
* Fix glob expansion after running a generic build command ([#1662](https://github.com/databricks/cli/pull/1662)).
|
||||
* Upload local libraries even if they don't have artifact defined ([#1664](https://github.com/databricks/cli/pull/1664)).
|
||||
|
||||
Internal:
|
||||
* Fix python wheel task integration tests ([#1648](https://github.com/databricks/cli/pull/1648)).
|
||||
* Skip pushing Terraform state after destroy ([#1667](https://github.com/databricks/cli/pull/1667)).
|
||||
* Enable Spark JAR task test ([#1658](https://github.com/databricks/cli/pull/1658)).
|
||||
* Run Spark JAR task test on multiple DBR versions ([#1665](https://github.com/databricks/cli/pull/1665)).
|
||||
* Stop tracking file path locations in bundle resources ([#1673](https://github.com/databricks/cli/pull/1673)).
|
||||
* Update VS Code settings to match latest value from IDE plugin ([#1677](https://github.com/databricks/cli/pull/1677)).
|
||||
* Use `service.NamedIdMap` to make lookup generation deterministic ([#1678](https://github.com/databricks/cli/pull/1678)).
|
||||
* [Internal] Remove dependency to the `openapi` package of the Go SDK ([#1676](https://github.com/databricks/cli/pull/1676)).
|
||||
* Upgrade TF provider to 1.50.0 ([#1681](https://github.com/databricks/cli/pull/1681)).
|
||||
* Upgrade Go SDK to 0.44.0 ([#1679](https://github.com/databricks/cli/pull/1679)).
|
||||
|
||||
API Changes:
|
||||
* Changed `databricks account budgets create` command . New request type is .
|
||||
* Changed `databricks account budgets create` command to return .
|
||||
* Changed `databricks account budgets delete` command . New request type is .
|
||||
* Changed `databricks account budgets delete` command to return .
|
||||
* Changed `databricks account budgets get` command . New request type is .
|
||||
* Changed `databricks account budgets get` command to return .
|
||||
* Changed `databricks account budgets list` command to require request of .
|
||||
* Changed `databricks account budgets list` command to return .
|
||||
* Changed `databricks account budgets update` command . New request type is .
|
||||
* Changed `databricks account budgets update` command to return .
|
||||
* Added `databricks account usage-dashboards` command group.
|
||||
* Changed `databricks model-versions get` command to return .
|
||||
* Changed `databricks cluster-policies create` command with new required argument order.
|
||||
* Changed `databricks cluster-policies edit` command with new required argument order.
|
||||
* Added `databricks clusters update` command.
|
||||
* Added `databricks genie` command group.
|
||||
* Changed `databricks permission-migration migrate-permissions` command . New request type is .
|
||||
* Changed `databricks permission-migration migrate-permissions` command to return .
|
||||
* Changed `databricks account workspace-assignment delete` command to return .
|
||||
* Changed `databricks account workspace-assignment update` command with new required argument order.
|
||||
* Changed `databricks account custom-app-integration create` command with new required argument order.
|
||||
* Changed `databricks account custom-app-integration list` command to require request of .
|
||||
* Changed `databricks account published-app-integration list` command to require request of .
|
||||
* Removed `databricks apps` command group.
|
||||
* Added `databricks notification-destinations` command group.
|
||||
* Changed `databricks shares list` command to require request of .
|
||||
* Changed `databricks alerts create` command . New request type is .
|
||||
* Changed `databricks alerts delete` command . New request type is .
|
||||
* Changed `databricks alerts delete` command to return .
|
||||
* Changed `databricks alerts get` command with new required argument order.
|
||||
* Changed `databricks alerts list` command to require request of .
|
||||
* Changed `databricks alerts list` command to return .
|
||||
* Changed `databricks alerts update` command . New request type is .
|
||||
* Changed `databricks alerts update` command to return .
|
||||
* Changed `databricks queries create` command . New request type is .
|
||||
* Changed `databricks queries delete` command . New request type is .
|
||||
* Changed `databricks queries delete` command to return .
|
||||
* Changed `databricks queries get` command with new required argument order.
|
||||
* Changed `databricks queries list` command to return .
|
||||
* Removed `databricks queries restore` command.
|
||||
* Changed `databricks queries update` command . New request type is .
|
||||
* Added `databricks queries list-visualizations` command.
|
||||
* Changed `databricks query-visualizations create` command . New request type is .
|
||||
* Changed `databricks query-visualizations delete` command . New request type is .
|
||||
* Changed `databricks query-visualizations delete` command to return .
|
||||
* Changed `databricks query-visualizations update` command . New request type is .
|
||||
* Changed `databricks statement-execution execute-statement` command to return .
|
||||
* Changed `databricks statement-execution get-statement` command to return .
|
||||
* Added `databricks alerts-legacy` command group.
|
||||
* Added `databricks queries-legacy` command group.
|
||||
* Added `databricks query-visualizations-legacy` command group.
|
||||
|
||||
OpenAPI commit f98c07f9c71f579de65d2587bb0292f83d10e55d (2024-08-12)
|
||||
Dependency updates:
|
||||
* Bump github.com/hashicorp/hc-install from 0.7.0 to 0.8.0 ([#1652](https://github.com/databricks/cli/pull/1652)).
|
||||
* Bump golang.org/x/sync from 0.7.0 to 0.8.0 ([#1655](https://github.com/databricks/cli/pull/1655)).
|
||||
* Bump golang.org/x/mod from 0.19.0 to 0.20.0 ([#1654](https://github.com/databricks/cli/pull/1654)).
|
||||
* Bump golang.org/x/oauth2 from 0.21.0 to 0.22.0 ([#1653](https://github.com/databricks/cli/pull/1653)).
|
||||
* Bump golang.org/x/text from 0.16.0 to 0.17.0 ([#1670](https://github.com/databricks/cli/pull/1670)).
|
||||
* Bump golang.org/x/term from 0.22.0 to 0.23.0 ([#1669](https://github.com/databricks/cli/pull/1669)).
|
||||
|
||||
## 0.225.0
|
||||
|
||||
Bundles:
|
||||
|
|
|
@ -1,25 +1,16 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
||||
type mutatorFactory = func(name string) bundle.Mutator
|
||||
|
@ -28,8 +19,6 @@ var buildMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactTy
|
|||
config.ArtifactPythonWheel: whl.Build,
|
||||
}
|
||||
|
||||
var uploadMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{}
|
||||
|
||||
var prepareMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{
|
||||
config.ArtifactPythonWheel: whl.Prepare,
|
||||
}
|
||||
|
@ -43,15 +32,6 @@ func getBuildMutator(t config.ArtifactType, name string) bundle.Mutator {
|
|||
return mutatorFactory(name)
|
||||
}
|
||||
|
||||
func getUploadMutator(t config.ArtifactType, name string) bundle.Mutator {
|
||||
mutatorFactory, ok := uploadMutators[t]
|
||||
if !ok {
|
||||
mutatorFactory = BasicUpload
|
||||
}
|
||||
|
||||
return mutatorFactory(name)
|
||||
}
|
||||
|
||||
func getPrepareMutator(t config.ArtifactType, name string) bundle.Mutator {
|
||||
mutatorFactory, ok := prepareMutators[t]
|
||||
if !ok {
|
||||
|
@ -92,174 +72,3 @@ func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnosti
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Basic Upload defines a general upload mutator which uploads artifact as a library to workspace
|
||||
type basicUpload struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func BasicUpload(name string) bundle.Mutator {
|
||||
return &basicUpload{name: name}
|
||||
}
|
||||
|
||||
func (m *basicUpload) Name() string {
|
||||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
if len(artifact.Files) == 0 {
|
||||
return diag.Errorf("artifact source is not configured: %s", m.name)
|
||||
}
|
||||
|
||||
uploadPath, err := getUploadBasePath(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = uploadArtifact(ctx, b, artifact, uploadPath, client)
|
||||
if err != nil {
|
||||
return diag.Errorf("upload for %s failed, error: %v", m.name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFilerForArtifacts(w *databricks.WorkspaceClient, uploadPath string) (filer.Filer, error) {
|
||||
if isVolumesPath(uploadPath) {
|
||||
return filer.NewFilesClient(w, uploadPath)
|
||||
}
|
||||
return filer.NewWorkspaceFilesClient(w, uploadPath)
|
||||
}
|
||||
|
||||
func isVolumesPath(path string) bool {
|
||||
return strings.HasPrefix(path, "/Volumes/")
|
||||
}
|
||||
|
||||
func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error {
|
||||
for i := range a.Files {
|
||||
f := &a.Files[i]
|
||||
|
||||
filename := filepath.Base(f.Source)
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename))
|
||||
|
||||
err := uploadArtifactFile(ctx, f.Source, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof(ctx, "Upload succeeded")
|
||||
f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source))
|
||||
remotePath := f.RemotePath
|
||||
|
||||
if !strings.HasPrefix(f.RemotePath, "/Workspace/") && !strings.HasPrefix(f.RemotePath, "/Volumes/") {
|
||||
wsfsBase := "/Workspace"
|
||||
remotePath = path.Join(wsfsBase, f.RemotePath)
|
||||
}
|
||||
|
||||
for _, job := range b.Config.Resources.Jobs {
|
||||
rewriteArtifactPath(b, f, job, remotePath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rewriteArtifactPath(b *bundle.Bundle, f *config.ArtifactFile, job *resources.Job, remotePath string) {
|
||||
// Rewrite artifact path in job task libraries
|
||||
for i := range job.Tasks {
|
||||
task := &job.Tasks[i]
|
||||
for j := range task.Libraries {
|
||||
lib := &task.Libraries[j]
|
||||
if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) {
|
||||
lib.Whl = remotePath
|
||||
}
|
||||
if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) {
|
||||
lib.Jar = remotePath
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite artifact path in job task libraries for ForEachTask
|
||||
if task.ForEachTask != nil {
|
||||
forEachTask := task.ForEachTask
|
||||
for j := range forEachTask.Task.Libraries {
|
||||
lib := &forEachTask.Task.Libraries[j]
|
||||
if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) {
|
||||
lib.Whl = remotePath
|
||||
}
|
||||
if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) {
|
||||
lib.Jar = remotePath
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite artifact path in job environments
|
||||
for i := range job.Environments {
|
||||
env := &job.Environments[i]
|
||||
if env.Spec == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := range env.Spec.Dependencies {
|
||||
lib := env.Spec.Dependencies[j]
|
||||
if isArtifactMatchLibrary(f, lib, b) {
|
||||
env.Spec.Dependencies[j] = remotePath
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isArtifactMatchLibrary(f *config.ArtifactFile, libPath string, b *bundle.Bundle) bool {
|
||||
if !filepath.IsAbs(libPath) {
|
||||
libPath = filepath.Join(b.RootPath, libPath)
|
||||
}
|
||||
|
||||
// libPath can be a glob pattern, so do the match first
|
||||
matches, err := filepath.Glob(libPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, m := range matches {
|
||||
if m == f.Source {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Function to upload artifact file to Workspace
|
||||
func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error {
|
||||
raw, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read %s: %w", file, errors.Unwrap(err))
|
||||
}
|
||||
|
||||
filename := filepath.Base(file)
|
||||
err = client.Write(ctx, filename, bytes.NewReader(raw), filer.OverwriteIfExists, filer.CreateParentDirectories)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to import %s: %w", filename, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getUploadBasePath(b *bundle.Bundle) (string, error) {
|
||||
artifactPath := b.Config.Workspace.ArtifactPath
|
||||
if artifactPath == "" {
|
||||
return "", fmt.Errorf("remote artifact path not configured")
|
||||
}
|
||||
|
||||
return path.Join(artifactPath, ".internal"), nil
|
||||
}
|
||||
|
|
|
@ -1,196 +0,0 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
mockfiler "github.com/databricks/cli/internal/mocks/libs/filer"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestArtifactUploadForWorkspace(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
whlFolder := filepath.Join(tmpDir, "whl")
|
||||
testutil.Touch(t, whlFolder, "source.whl")
|
||||
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/foo/bar/artifacts",
|
||||
},
|
||||
Artifacts: config.Artifacts{
|
||||
"whl": {
|
||||
Type: config.ArtifactPythonWheel,
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: whlLocalPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Environments: []jobs.JobEnvironment{
|
||||
{
|
||||
Spec: &compute.Environment{
|
||||
Dependencies: []string{
|
||||
filepath.Join("whl", "source.whl"),
|
||||
"/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
artifact := b.Config.Artifacts["whl"]
|
||||
mockFiler := mockfiler.NewMockFiler(t)
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source.whl"),
|
||||
mock.AnythingOfType("*bytes.Reader"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil)
|
||||
|
||||
err := uploadArtifact(context.Background(), b, artifact, "/foo/bar/artifacts", mockFiler)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that libraries path is updated
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl)
|
||||
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl)
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0])
|
||||
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl)
|
||||
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl)
|
||||
}
|
||||
|
||||
func TestArtifactUploadForVolumes(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
whlFolder := filepath.Join(tmpDir, "whl")
|
||||
testutil.Touch(t, whlFolder, "source.whl")
|
||||
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/foo/bar/artifacts",
|
||||
},
|
||||
Artifacts: config.Artifacts{
|
||||
"whl": {
|
||||
Type: config.ArtifactPythonWheel,
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: whlLocalPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Volumes/some/path/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Volumes/some/path/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Environments: []jobs.JobEnvironment{
|
||||
{
|
||||
Spec: &compute.Environment{
|
||||
Dependencies: []string{
|
||||
filepath.Join("whl", "source.whl"),
|
||||
"/Volumes/some/path/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
artifact := b.Config.Artifacts["whl"]
|
||||
mockFiler := mockfiler.NewMockFiler(t)
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source.whl"),
|
||||
mock.AnythingOfType("*bytes.Reader"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil)
|
||||
|
||||
err := uploadArtifact(context.Background(), b, artifact, "/Volumes/foo/bar/artifacts", mockFiler)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that libraries path is updated
|
||||
require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl)
|
||||
require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl)
|
||||
require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0])
|
||||
require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
|
||||
require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl)
|
||||
require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl)
|
||||
}
|
|
@ -29,6 +29,5 @@ func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnosti
|
|||
|
||||
return bundle.Apply(ctx, b, bundle.Seq(
|
||||
whl.DetectPackage(),
|
||||
whl.DefineArtifactsFromLibraries(),
|
||||
))
|
||||
}
|
||||
|
|
|
@ -34,11 +34,13 @@ func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
l := b.Config.GetLocation("artifacts." + m.name)
|
||||
dirPath := filepath.Dir(l.File)
|
||||
|
||||
// Check if source paths are absolute, if not, make them absolute
|
||||
for k := range artifact.Files {
|
||||
f := &artifact.Files[k]
|
||||
if !filepath.IsAbs(f.Source) {
|
||||
dirPath := filepath.Dir(artifact.ConfigFilePath)
|
||||
f.Source = filepath.Join(dirPath, f.Source)
|
||||
}
|
||||
}
|
||||
|
@ -49,7 +51,6 @@ func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
}
|
||||
|
||||
if !filepath.IsAbs(artifact.Path) {
|
||||
dirPath := filepath.Dir(artifact.ConfigFilePath)
|
||||
artifact.Path = filepath.Join(dirPath, artifact.Path)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,50 +2,18 @@ package artifacts
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
func UploadAll() bundle.Mutator {
|
||||
return &all{
|
||||
name: "Upload",
|
||||
fn: uploadArtifactByName,
|
||||
}
|
||||
}
|
||||
|
||||
func CleanUp() bundle.Mutator {
|
||||
return &cleanUp{}
|
||||
}
|
||||
|
||||
type upload struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func uploadArtifactByName(name string) (bundle.Mutator, error) {
|
||||
return &upload{name}, nil
|
||||
}
|
||||
|
||||
func (m *upload) Name() string {
|
||||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
if len(artifact.Files) == 0 {
|
||||
return diag.Errorf("artifact source is not configured: %s", m.name)
|
||||
}
|
||||
|
||||
return bundle.Apply(ctx, b, getUploadMutator(artifact.Type, m.name))
|
||||
}
|
||||
|
||||
type cleanUp struct{}
|
||||
|
||||
func (m *cleanUp) Name() string {
|
||||
|
@ -53,12 +21,12 @@ func (m *cleanUp) Name() string {
|
|||
}
|
||||
|
||||
func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
uploadPath, err := getUploadBasePath(b)
|
||||
uploadPath, err := libraries.GetUploadBasePath(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath)
|
||||
client, err := libraries.GetFilerForLibraries(b.WorkspaceClient(), uploadPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -1,114 +0,0 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/testfile"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type noop struct{}
|
||||
|
||||
func (n *noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noop) Name() string {
|
||||
return "noop"
|
||||
}
|
||||
|
||||
func TestExpandGlobFilesSource(t *testing.T) {
|
||||
rootPath := t.TempDir()
|
||||
err := os.Mkdir(filepath.Join(rootPath, "test"), 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
t1 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar1.jar"))
|
||||
t1.Close(t)
|
||||
|
||||
t2 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar2.jar"))
|
||||
t2.Close(t)
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: rootPath,
|
||||
Config: config.Root{
|
||||
Artifacts: map[string]*config.Artifact{
|
||||
"test": {
|
||||
Type: "custom",
|
||||
Files: []config.ArtifactFile{
|
||||
{
|
||||
Source: filepath.Join("..", "test", "*.jar"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml"))
|
||||
|
||||
u := &upload{"test"}
|
||||
uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||
return &noop{}
|
||||
}
|
||||
|
||||
bm := &build{"test"}
|
||||
buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||
return &noop{}
|
||||
}
|
||||
|
||||
pm := &prepare{"test"}
|
||||
prepareMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||
return &noop{}
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(pm, bm, u))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
require.Equal(t, 2, len(b.Config.Artifacts["test"].Files))
|
||||
require.Equal(t, filepath.Join(rootPath, "test", "myjar1.jar"), b.Config.Artifacts["test"].Files[0].Source)
|
||||
require.Equal(t, filepath.Join(rootPath, "test", "myjar2.jar"), b.Config.Artifacts["test"].Files[1].Source)
|
||||
}
|
||||
|
||||
func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) {
|
||||
rootPath := t.TempDir()
|
||||
err := os.Mkdir(filepath.Join(rootPath, "test"), 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: rootPath,
|
||||
Config: config.Root{
|
||||
Artifacts: map[string]*config.Artifact{
|
||||
"test": {
|
||||
Type: "custom",
|
||||
Files: []config.ArtifactFile{
|
||||
{
|
||||
Source: filepath.Join("..", "test", "myjar.jar"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml"))
|
||||
|
||||
u := &upload{"test"}
|
||||
uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||
return &noop{}
|
||||
}
|
||||
|
||||
bm := &build{"test"}
|
||||
buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||
return &noop{}
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u))
|
||||
require.ErrorContains(t, diags.Error(), "no matching files")
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
package whl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
type fromLibraries struct{}
|
||||
|
||||
func DefineArtifactsFromLibraries() bundle.Mutator {
|
||||
return &fromLibraries{}
|
||||
}
|
||||
|
||||
func (m *fromLibraries) Name() string {
|
||||
return "artifacts.whl.DefineArtifactsFromLibraries"
|
||||
}
|
||||
|
||||
func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if len(b.Config.Artifacts) != 0 {
|
||||
log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined")
|
||||
return nil
|
||||
}
|
||||
|
||||
tasks := libraries.FindTasksWithLocalLibraries(b)
|
||||
for _, task := range tasks {
|
||||
// Skip tasks that are not PythonWheelTasks for now, we can later support Jars too
|
||||
if task.PythonWheelTask == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, lib := range task.Libraries {
|
||||
matchAndAdd(ctx, lib.Whl, b)
|
||||
}
|
||||
}
|
||||
|
||||
envs := libraries.FindAllEnvironments(b)
|
||||
for _, jobEnvs := range envs {
|
||||
for _, env := range jobEnvs {
|
||||
if env.Spec != nil {
|
||||
for _, dep := range env.Spec.Dependencies {
|
||||
if libraries.IsEnvironmentDependencyLocal(dep) {
|
||||
matchAndAdd(ctx, dep, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func matchAndAdd(ctx context.Context, lib string, b *bundle.Bundle) {
|
||||
matches, err := filepath.Glob(filepath.Join(b.RootPath, lib))
|
||||
// File referenced from libraries section does not exists, skipping
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
name := filepath.Base(match)
|
||||
if b.Config.Artifacts == nil {
|
||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||
}
|
||||
|
||||
log.Debugf(ctx, "Adding an artifact block for %s", match)
|
||||
b.Config.Artifacts[name] = &config.Artifact{
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: match},
|
||||
},
|
||||
Type: config.ArtifactPythonWheel,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -15,6 +15,8 @@ type infer struct {
|
|||
|
||||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
artifact := b.Config.Artifacts[m.name]
|
||||
|
||||
// TODO use python.DetectVEnvExecutable once bundle has a way to specify venv path
|
||||
py, err := python.DetectExecutable(ctx)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
@ -39,6 +39,14 @@ type Bundle struct {
|
|||
// Exclusively use this field for filesystem operations.
|
||||
BundleRoot vfs.Path
|
||||
|
||||
// SyncRoot is a virtual filesystem path to the root directory of the files that are synchronized to the workspace.
|
||||
// It can be an ancestor to [BundleRoot], but not a descendant; that is, [SyncRoot] must contain [BundleRoot].
|
||||
SyncRoot vfs.Path
|
||||
|
||||
// SyncRootPath is the local path to the root directory of files that are synchronized to the workspace.
|
||||
// It is equal to `SyncRoot.Native()` and included as dedicated field for convenient access.
|
||||
SyncRootPath string
|
||||
|
||||
Config config.Root
|
||||
|
||||
// Metadata about the bundle deployment. This is the interface Databricks services
|
||||
|
|
|
@ -28,6 +28,10 @@ func (r ReadOnlyBundle) BundleRoot() vfs.Path {
|
|||
return r.b.BundleRoot
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) SyncRoot() vfs.Path {
|
||||
return r.b.SyncRoot
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||
return r.b.WorkspaceClient()
|
||||
}
|
||||
|
|
|
@ -4,18 +4,11 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/exec"
|
||||
)
|
||||
|
||||
type Artifacts map[string]*Artifact
|
||||
|
||||
func (artifacts Artifacts) ConfigureConfigFilePath() {
|
||||
for _, artifact := range artifacts {
|
||||
artifact.ConfigureConfigFilePath()
|
||||
}
|
||||
}
|
||||
|
||||
type ArtifactType string
|
||||
|
||||
const ArtifactPythonWheel ArtifactType = `whl`
|
||||
|
@ -40,8 +33,6 @@ type Artifact struct {
|
|||
BuildCommand string `json:"build,omitempty"`
|
||||
|
||||
Executable exec.ExecutableType `json:"executable,omitempty"`
|
||||
|
||||
paths.Paths
|
||||
}
|
||||
|
||||
func (a *Artifact) Build(ctx context.Context) ([]byte, error) {
|
||||
|
|
|
@ -36,9 +36,15 @@ type PyDABs struct {
|
|||
|
||||
// VEnvPath is path to the virtual environment.
|
||||
//
|
||||
// Required if PyDABs is enabled. PyDABs will load the code in the specified
|
||||
// environment.
|
||||
// If enabled, PyDABs will execute code within this environment. If disabled,
|
||||
// it defaults to using the Python interpreter available in the current shell.
|
||||
VEnvPath string `json:"venv_path,omitempty"`
|
||||
|
||||
// Import contains a list Python packages with PyDABs code.
|
||||
//
|
||||
// These packages are imported to discover resources, resource generators, and mutators.
|
||||
// This list can include namespace packages, which causes the import of nested packages.
|
||||
Import []string `json:"import,omitempty"`
|
||||
}
|
||||
|
||||
type Command string
|
||||
|
|
|
@ -0,0 +1,208 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/textutil"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
)
|
||||
|
||||
type applyPresets struct{}
|
||||
|
||||
// Apply all presets, e.g. the prefix presets that
|
||||
// adds a prefix to all names of all resources.
|
||||
func ApplyPresets() *applyPresets {
|
||||
return &applyPresets{}
|
||||
}
|
||||
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (m *applyPresets) Name() string {
|
||||
return "ApplyPresets"
|
||||
}
|
||||
|
||||
func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if d := validatePauseStatus(b); d != nil {
|
||||
return d
|
||||
}
|
||||
|
||||
r := b.Config.Resources
|
||||
t := b.Config.Presets
|
||||
prefix := t.NamePrefix
|
||||
tags := toTagArray(t.Tags)
|
||||
|
||||
// Jobs presets: Prefix, Tags, JobsMaxConcurrentRuns, TriggerPauseStatus
|
||||
for _, j := range r.Jobs {
|
||||
j.Name = prefix + j.Name
|
||||
if j.Tags == nil {
|
||||
j.Tags = make(map[string]string)
|
||||
}
|
||||
for _, tag := range tags {
|
||||
if j.Tags[tag.Key] == "" {
|
||||
j.Tags[tag.Key] = tag.Value
|
||||
}
|
||||
}
|
||||
if j.MaxConcurrentRuns == 0 {
|
||||
j.MaxConcurrentRuns = t.JobsMaxConcurrentRuns
|
||||
}
|
||||
if t.TriggerPauseStatus != "" {
|
||||
paused := jobs.PauseStatusPaused
|
||||
if t.TriggerPauseStatus == config.Unpaused {
|
||||
paused = jobs.PauseStatusUnpaused
|
||||
}
|
||||
|
||||
if j.Schedule != nil && j.Schedule.PauseStatus == "" {
|
||||
j.Schedule.PauseStatus = paused
|
||||
}
|
||||
if j.Continuous != nil && j.Continuous.PauseStatus == "" {
|
||||
j.Continuous.PauseStatus = paused
|
||||
}
|
||||
if j.Trigger != nil && j.Trigger.PauseStatus == "" {
|
||||
j.Trigger.PauseStatus = paused
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pipelines presets: Prefix, PipelinesDevelopment
|
||||
for i := range r.Pipelines {
|
||||
r.Pipelines[i].Name = prefix + r.Pipelines[i].Name
|
||||
if config.IsExplicitlyEnabled(t.PipelinesDevelopment) {
|
||||
r.Pipelines[i].Development = true
|
||||
}
|
||||
if t.TriggerPauseStatus == config.Paused {
|
||||
r.Pipelines[i].Continuous = false
|
||||
}
|
||||
|
||||
// As of 2024-06, pipelines don't yet support tags
|
||||
}
|
||||
|
||||
// Models presets: Prefix, Tags
|
||||
for _, m := range r.Models {
|
||||
m.Name = prefix + m.Name
|
||||
for _, t := range tags {
|
||||
exists := slices.ContainsFunc(m.Tags, func(modelTag ml.ModelTag) bool {
|
||||
return modelTag.Key == t.Key
|
||||
})
|
||||
if !exists {
|
||||
// Only add this tag if the resource didn't include any tag that overrides its value.
|
||||
m.Tags = append(m.Tags, ml.ModelTag{Key: t.Key, Value: t.Value})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Experiments presets: Prefix, Tags
|
||||
for _, e := range r.Experiments {
|
||||
filepath := e.Name
|
||||
dir := path.Dir(filepath)
|
||||
base := path.Base(filepath)
|
||||
if dir == "." {
|
||||
e.Name = prefix + base
|
||||
} else {
|
||||
e.Name = dir + "/" + prefix + base
|
||||
}
|
||||
for _, t := range tags {
|
||||
exists := false
|
||||
for _, experimentTag := range e.Tags {
|
||||
if experimentTag.Key == t.Key {
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
e.Tags = append(e.Tags, ml.ExperimentTag{Key: t.Key, Value: t.Value})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Model serving endpoint presets: Prefix
|
||||
for i := range r.ModelServingEndpoints {
|
||||
r.ModelServingEndpoints[i].Name = normalizePrefix(prefix) + r.ModelServingEndpoints[i].Name
|
||||
|
||||
// As of 2024-06, model serving endpoints don't yet support tags
|
||||
}
|
||||
|
||||
// Registered models presets: Prefix
|
||||
for i := range r.RegisteredModels {
|
||||
r.RegisteredModels[i].Name = normalizePrefix(prefix) + r.RegisteredModels[i].Name
|
||||
|
||||
// As of 2024-06, registered models don't yet support tags
|
||||
}
|
||||
|
||||
// Quality monitors presets: Prefix
|
||||
if t.TriggerPauseStatus == config.Paused {
|
||||
for i := range r.QualityMonitors {
|
||||
// Remove all schedules from monitors, since they don't support pausing/unpausing.
|
||||
// Quality monitors might support the "pause" property in the future, so at the
|
||||
// CLI level we do respect that property if it is set to "unpaused."
|
||||
if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||
r.QualityMonitors[i].Schedule = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Schemas: Prefix
|
||||
for i := range r.Schemas {
|
||||
r.Schemas[i].Name = normalizePrefix(prefix) + r.Schemas[i].Name
|
||||
// HTTP API for schemas doesn't yet support tags. It's only supported in
|
||||
// the Databricks UI and via the SQL API.
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validatePauseStatus(b *bundle.Bundle) diag.Diagnostics {
|
||||
p := b.Config.Presets.TriggerPauseStatus
|
||||
if p == "" || p == config.Paused || p == config.Unpaused {
|
||||
return nil
|
||||
}
|
||||
return diag.Diagnostics{{
|
||||
Summary: "Invalid value for trigger_pause_status, should be PAUSED or UNPAUSED",
|
||||
Severity: diag.Error,
|
||||
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
||||
}}
|
||||
}
|
||||
|
||||
// toTagArray converts a map of tags to an array of tags.
|
||||
// We sort tags so ensure stable ordering.
|
||||
func toTagArray(tags map[string]string) []Tag {
|
||||
var tagArray []Tag
|
||||
if tags == nil {
|
||||
return tagArray
|
||||
}
|
||||
for key, value := range tags {
|
||||
tagArray = append(tagArray, Tag{Key: key, Value: value})
|
||||
}
|
||||
sort.Slice(tagArray, func(i, j int) bool {
|
||||
return tagArray[i].Key < tagArray[j].Key
|
||||
})
|
||||
return tagArray
|
||||
}
|
||||
|
||||
// normalizePrefix prefixes strings like '[dev lennart] ' to 'dev_lennart_'.
|
||||
// We leave unicode letters and numbers but remove all "special characters."
|
||||
func normalizePrefix(prefix string) string {
|
||||
prefix = strings.ReplaceAll(prefix, "[", "")
|
||||
prefix = strings.Trim(prefix, " ")
|
||||
|
||||
// If the prefix ends with a ']', we add an underscore to the end.
|
||||
// This makes sure that we get names like "dev_user_endpoint" instead of "dev_userendpoint"
|
||||
suffix := ""
|
||||
if strings.HasSuffix(prefix, "]") {
|
||||
suffix = "_"
|
||||
}
|
||||
|
||||
return textutil.NormalizeString(prefix) + suffix
|
||||
}
|
|
@ -0,0 +1,253 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestApplyPresetsPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefix string
|
||||
job *resources.Job
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "add prefix to job",
|
||||
prefix: "prefix-",
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
},
|
||||
},
|
||||
want: "prefix-job1",
|
||||
},
|
||||
{
|
||||
name: "add empty prefix to job",
|
||||
prefix: "",
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
},
|
||||
},
|
||||
want: "job1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": tt.job,
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
NamePrefix: tt.prefix,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.want, b.Config.Resources.Jobs["job1"].Name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsPrefixForUcSchema(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefix string
|
||||
schema *resources.Schema
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "add prefix to schema",
|
||||
prefix: "[prefix]",
|
||||
schema: &resources.Schema{
|
||||
CreateSchema: &catalog.CreateSchema{
|
||||
Name: "schema1",
|
||||
},
|
||||
},
|
||||
want: "prefix_schema1",
|
||||
},
|
||||
{
|
||||
name: "add empty prefix to schema",
|
||||
prefix: "",
|
||||
schema: &resources.Schema{
|
||||
CreateSchema: &catalog.CreateSchema{
|
||||
Name: "schema1",
|
||||
},
|
||||
},
|
||||
want: "schema1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Schemas: map[string]*resources.Schema{
|
||||
"schema1": tt.schema,
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
NamePrefix: tt.prefix,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.want, b.Config.Resources.Schemas["schema1"].Name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsTags(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
job *resources.Job
|
||||
want map[string]string
|
||||
}{
|
||||
{
|
||||
name: "add tags to job",
|
||||
tags: map[string]string{"env": "dev"},
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
Tags: nil,
|
||||
},
|
||||
},
|
||||
want: map[string]string{"env": "dev"},
|
||||
},
|
||||
{
|
||||
name: "merge tags with existing job tags",
|
||||
tags: map[string]string{"env": "dev"},
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
Tags: map[string]string{"team": "data"},
|
||||
},
|
||||
},
|
||||
want: map[string]string{"env": "dev", "team": "data"},
|
||||
},
|
||||
{
|
||||
name: "don't override existing job tags",
|
||||
tags: map[string]string{"env": "dev"},
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
Tags: map[string]string{"env": "prod"},
|
||||
},
|
||||
},
|
||||
want: map[string]string{"env": "prod"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": tt.job,
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
Tags: tt.tags,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
tags := b.Config.Resources.Jobs["job1"].Tags
|
||||
require.Equal(t, tt.want, tags)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsJobsMaxConcurrentRuns(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
job *resources.Job
|
||||
setting int
|
||||
want int
|
||||
}{
|
||||
{
|
||||
name: "set max concurrent runs",
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
MaxConcurrentRuns: 0,
|
||||
},
|
||||
},
|
||||
setting: 5,
|
||||
want: 5,
|
||||
},
|
||||
{
|
||||
name: "do not override existing max concurrent runs",
|
||||
job: &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
MaxConcurrentRuns: 3,
|
||||
},
|
||||
},
|
||||
setting: 5,
|
||||
want: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": tt.job,
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
JobsMaxConcurrentRuns: tt.setting,
|
||||
},
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.want, b.Config.Resources.Jobs["job1"].MaxConcurrentRuns)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -24,7 +24,7 @@ func (m *configureWSFS) Name() string {
|
|||
}
|
||||
|
||||
func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
root := b.BundleRoot.Native()
|
||||
root := b.SyncRoot.Native()
|
||||
|
||||
// The bundle root must be located in /Workspace/
|
||||
if !strings.HasPrefix(root, "/Workspace/") {
|
||||
|
@ -45,6 +45,6 @@ func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
b.BundleRoot = p
|
||||
b.SyncRoot = p
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,17 +2,14 @@ package mutator
|
|||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/auth"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
)
|
||||
|
||||
type processTargetMode struct{}
|
||||
|
@ -30,102 +27,75 @@ func (m *processTargetMode) Name() string {
|
|||
// Mark all resources as being for 'development' purposes, i.e.
|
||||
// changing their their name, adding tags, and (in the future)
|
||||
// marking them as 'hidden' in the UI.
|
||||
func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) {
|
||||
if !b.Config.Bundle.Deployment.Lock.IsExplicitlyEnabled() {
|
||||
log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true")
|
||||
disabled := false
|
||||
b.Config.Bundle.Deployment.Lock.Enabled = &disabled
|
||||
}
|
||||
|
||||
r := b.Config.Resources
|
||||
t := &b.Config.Presets
|
||||
shortName := b.Config.Workspace.CurrentUser.ShortName
|
||||
prefix := "[dev " + shortName + "] "
|
||||
|
||||
// Generate a normalized version of the short name that can be used as a tag value.
|
||||
tagValue := b.Tagging.NormalizeValue(shortName)
|
||||
|
||||
for i := range r.Jobs {
|
||||
r.Jobs[i].Name = prefix + r.Jobs[i].Name
|
||||
if r.Jobs[i].Tags == nil {
|
||||
r.Jobs[i].Tags = make(map[string]string)
|
||||
}
|
||||
r.Jobs[i].Tags["dev"] = tagValue
|
||||
if r.Jobs[i].MaxConcurrentRuns == 0 {
|
||||
r.Jobs[i].MaxConcurrentRuns = developmentConcurrentRuns
|
||||
}
|
||||
|
||||
// Pause each job. As an exception, we don't pause jobs that are explicitly
|
||||
// marked as "unpaused". This allows users to override the default behavior
|
||||
// of the development mode.
|
||||
if r.Jobs[i].Schedule != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused {
|
||||
r.Jobs[i].Schedule.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if r.Jobs[i].Continuous != nil && r.Jobs[i].Continuous.PauseStatus != jobs.PauseStatusUnpaused {
|
||||
r.Jobs[i].Continuous.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if r.Jobs[i].Trigger != nil && r.Jobs[i].Trigger.PauseStatus != jobs.PauseStatusUnpaused {
|
||||
r.Jobs[i].Trigger.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if t.NamePrefix == "" {
|
||||
t.NamePrefix = "[dev " + shortName + "] "
|
||||
}
|
||||
|
||||
for i := range r.Pipelines {
|
||||
r.Pipelines[i].Name = prefix + r.Pipelines[i].Name
|
||||
r.Pipelines[i].Development = true
|
||||
// (pipelines don't yet support tags)
|
||||
if t.Tags == nil {
|
||||
t.Tags = map[string]string{}
|
||||
}
|
||||
_, exists := t.Tags["dev"]
|
||||
if !exists {
|
||||
t.Tags["dev"] = b.Tagging.NormalizeValue(shortName)
|
||||
}
|
||||
|
||||
for i := range r.Models {
|
||||
r.Models[i].Name = prefix + r.Models[i].Name
|
||||
r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: tagValue})
|
||||
if t.JobsMaxConcurrentRuns == 0 {
|
||||
t.JobsMaxConcurrentRuns = developmentConcurrentRuns
|
||||
}
|
||||
|
||||
for i := range r.Experiments {
|
||||
filepath := r.Experiments[i].Name
|
||||
dir := path.Dir(filepath)
|
||||
base := path.Base(filepath)
|
||||
if dir == "." {
|
||||
r.Experiments[i].Name = prefix + base
|
||||
} else {
|
||||
r.Experiments[i].Name = dir + "/" + prefix + base
|
||||
}
|
||||
r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: tagValue})
|
||||
if t.TriggerPauseStatus == "" {
|
||||
t.TriggerPauseStatus = config.Paused
|
||||
}
|
||||
|
||||
for i := range r.ModelServingEndpoints {
|
||||
prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_"
|
||||
r.ModelServingEndpoints[i].Name = prefix + r.ModelServingEndpoints[i].Name
|
||||
// (model serving doesn't yet support tags)
|
||||
if !config.IsExplicitlyDisabled(t.PipelinesDevelopment) {
|
||||
enabled := true
|
||||
t.PipelinesDevelopment = &enabled
|
||||
}
|
||||
|
||||
for i := range r.RegisteredModels {
|
||||
prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_"
|
||||
r.RegisteredModels[i].Name = prefix + r.RegisteredModels[i].Name
|
||||
// (registered models in Unity Catalog don't yet support tags)
|
||||
}
|
||||
|
||||
for i := range r.QualityMonitors {
|
||||
// Remove all schedules from monitors, since they don't support pausing/unpausing.
|
||||
// Quality monitors might support the "pause" property in the future, so at the
|
||||
// CLI level we do respect that property if it is set to "unpaused".
|
||||
if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||
r.QualityMonitors[i].Schedule = nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := range r.Schemas {
|
||||
prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_"
|
||||
r.Schemas[i].Name = prefix + r.Schemas[i].Name
|
||||
// HTTP API for schemas doesn't yet support tags. It's only supported in
|
||||
// the Databricks UI and via the SQL API.
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||
p := b.Config.Presets
|
||||
u := b.Config.Workspace.CurrentUser
|
||||
|
||||
// Make sure presets don't set the trigger status to UNPAUSED;
|
||||
// this could be surprising since most users (and tools) expect triggers
|
||||
// to be paused in development.
|
||||
// (Note that there still is an exceptional case where users set the trigger
|
||||
// status to UNPAUSED at the level of an individual object, whic hwas
|
||||
// historically allowed.)
|
||||
if p.TriggerPauseStatus == config.Unpaused {
|
||||
return diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
|
||||
}}
|
||||
}
|
||||
|
||||
// Make sure this development copy has unique names and paths to avoid conflicts
|
||||
if path := findNonUserPath(b); path != "" {
|
||||
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
||||
}
|
||||
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
|
||||
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
|
||||
// For this reason we require the name prefix to contain the current username;
|
||||
// it's a pitfall for users if they don't include it and later find out that
|
||||
// only a single user can do development deployments.
|
||||
return diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
|
||||
}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -182,10 +152,11 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di
|
|||
switch b.Config.Bundle.Mode {
|
||||
case config.Development:
|
||||
diags := validateDevelopmentMode(b)
|
||||
if diags != nil {
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
return transformDevelopmentMode(ctx, b)
|
||||
transformDevelopmentMode(ctx, b)
|
||||
return diags
|
||||
case config.Production:
|
||||
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
|
||||
return validateProductionMode(ctx, b, isPrincipal)
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/tags"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
|
@ -51,6 +52,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
Schedule: &jobs.CronSchedule{
|
||||
QuartzCronExpression: "* * * * *",
|
||||
},
|
||||
Tags: map[string]string{"existing": "tag"},
|
||||
},
|
||||
},
|
||||
"job2": {
|
||||
|
@ -82,7 +84,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}},
|
||||
"pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1", Continuous: true}},
|
||||
},
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"experiment1": {Experiment: &ml.Experiment{Name: "/Users/lennart.kats@databricks.com/experiment1"}},
|
||||
|
@ -129,12 +131,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
func TestProcessTargetModeDevelopment(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
|
||||
m := ProcessTargetMode()
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Job 1
|
||||
assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name)
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["existing"], "tag")
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["dev"], "lennart")
|
||||
assert.Equal(t, b.Config.Resources.Jobs["job1"].Schedule.PauseStatus, jobs.PauseStatusPaused)
|
||||
|
||||
|
@ -145,6 +148,7 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
|||
|
||||
// Pipeline 1
|
||||
assert.Equal(t, "[dev lennart] pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||
assert.Equal(t, false, b.Config.Resources.Pipelines["pipeline1"].Continuous)
|
||||
assert.True(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
|
||||
// Experiment 1
|
||||
|
@ -182,7 +186,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
|||
})
|
||||
|
||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that tag normalization took place.
|
||||
|
@ -196,7 +201,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) {
|
|||
})
|
||||
|
||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that tag normalization took place (Azure allows more characters than AWS).
|
||||
|
@ -210,17 +216,53 @@ func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) {
|
|||
})
|
||||
|
||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Assert that tag normalization took place.
|
||||
assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||
}
|
||||
|
||||
func TestValidateDevelopmentMode(t *testing.T) {
|
||||
// Test with a valid development mode bundle
|
||||
b := mockBundle(config.Development)
|
||||
diags := validateDevelopmentMode(b)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Test with a bundle that has a non-user path
|
||||
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.ErrorContains(t, diags.Error(), "root_path")
|
||||
|
||||
// Test with a bundle that has an unpaused trigger pause status
|
||||
b = mockBundle(config.Development)
|
||||
b.Config.Presets.TriggerPauseStatus = config.Unpaused
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.ErrorContains(t, diags.Error(), "UNPAUSED")
|
||||
|
||||
// Test with a bundle that has a prefix not containing the username or short name
|
||||
b = mockBundle(config.Development)
|
||||
b.Config.Presets.NamePrefix = "[prod]"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, diag.Error, diags[0].Severity)
|
||||
assert.Contains(t, diags[0].Summary, "")
|
||||
|
||||
// Test with a bundle that has valid user paths
|
||||
b = mockBundle(config.Development)
|
||||
b.Config.Workspace.RootPath = "/Users/lennart@company.com/.bundle/x/y/state"
|
||||
b.Config.Workspace.StatePath = "/Users/lennart@company.com/.bundle/x/y/state"
|
||||
b.Config.Workspace.FilePath = "/Users/lennart@company.com/.bundle/x/y/files"
|
||||
b.Config.Workspace.ArtifactPath = "/Users/lennart@company.com/.bundle/x/y/artifacts"
|
||||
diags = validateDevelopmentMode(b)
|
||||
require.NoError(t, diags.Error())
|
||||
}
|
||||
|
||||
func TestProcessTargetModeDefault(t *testing.T) {
|
||||
b := mockBundle("")
|
||||
|
||||
m := ProcessTargetMode()
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
||||
|
@ -306,7 +348,7 @@ func TestAllResourcesMocked(t *testing.T) {
|
|||
func TestAllResourcesRenamed(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
|
||||
m := ProcessTargetMode()
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
|
@ -336,8 +378,7 @@ func TestDisableLocking(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
b := mockBundle(config.Development)
|
||||
|
||||
err := bundle.Apply(ctx, b, ProcessTargetMode())
|
||||
require.Nil(t, err)
|
||||
transformDevelopmentMode(ctx, b)
|
||||
assert.False(t, b.Config.Bundle.Deployment.Lock.IsEnabled())
|
||||
}
|
||||
|
||||
|
@ -347,7 +388,97 @@ func TestDisableLockingDisabled(t *testing.T) {
|
|||
explicitlyEnabled := true
|
||||
b.Config.Bundle.Deployment.Lock.Enabled = &explicitlyEnabled
|
||||
|
||||
err := bundle.Apply(ctx, b, ProcessTargetMode())
|
||||
require.Nil(t, err)
|
||||
transformDevelopmentMode(ctx, b)
|
||||
assert.True(t, b.Config.Bundle.Deployment.Lock.IsEnabled(), "Deployment lock should remain enabled in development mode when explicitly enabled")
|
||||
}
|
||||
|
||||
func TestPrefixAlreadySet(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.NamePrefix = "custom_lennart_deploy_"
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "custom_lennart_deploy_job1", b.Config.Resources.Jobs["job1"].Name)
|
||||
}
|
||||
|
||||
func TestTagsAlreadySet(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.Tags = map[string]string{
|
||||
"custom": "tag",
|
||||
"dev": "foo",
|
||||
}
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "tag", b.Config.Resources.Jobs["job1"].Tags["custom"])
|
||||
assert.Equal(t, "foo", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||
}
|
||||
|
||||
func TestTagsNil(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.Tags = nil
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "lennart", b.Config.Resources.Jobs["job2"].Tags["dev"])
|
||||
}
|
||||
|
||||
func TestTagsEmptySet(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.Tags = map[string]string{}
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "lennart", b.Config.Resources.Jobs["job2"].Tags["dev"])
|
||||
}
|
||||
|
||||
func TestJobsMaxConcurrentRunsAlreadySet(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.JobsMaxConcurrentRuns = 10
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, 10, b.Config.Resources.Jobs["job1"].MaxConcurrentRuns)
|
||||
}
|
||||
|
||||
func TestJobsMaxConcurrentRunsDisabled(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.JobsMaxConcurrentRuns = 1
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].MaxConcurrentRuns)
|
||||
}
|
||||
|
||||
func TestTriggerPauseStatusWhenUnpaused(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
b.Config.Presets.TriggerPauseStatus = config.Unpaused
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.ErrorContains(t, diags.Error(), "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default")
|
||||
}
|
||||
|
||||
func TestPipelinesDevelopmentDisabled(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
notEnabled := false
|
||||
b.Config.Presets.PipelinesDevelopment = ¬Enabled
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
}
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/databricks/cli/libs/python"
|
||||
"github.com/databricks/databricks-sdk-go/logger"
|
||||
|
||||
"github.com/databricks/cli/bundle/env"
|
||||
|
@ -86,23 +86,15 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
return nil
|
||||
}
|
||||
|
||||
if experimental.PyDABs.VEnvPath == "" {
|
||||
return diag.Errorf("\"experimental.pydabs.enabled\" can only be used when \"experimental.pydabs.venv_path\" is set")
|
||||
}
|
||||
|
||||
// mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics'
|
||||
var mutateDiags diag.Diagnostics
|
||||
var mutateDiagsHasError = errors.New("unexpected error")
|
||||
|
||||
err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) {
|
||||
pythonPath := interpreterPath(experimental.PyDABs.VEnvPath)
|
||||
pythonPath, err := detectExecutable(ctx, experimental.PyDABs.VEnvPath)
|
||||
|
||||
if _, err := os.Stat(pythonPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return dyn.InvalidValue, fmt.Errorf("can't find %q, check if venv is created", pythonPath)
|
||||
} else {
|
||||
return dyn.InvalidValue, fmt.Errorf("can't find %q: %w", pythonPath, err)
|
||||
}
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, fmt.Errorf("failed to get Python interpreter path: %w", err)
|
||||
}
|
||||
|
||||
cacheDir, err := createCacheDir(ctx)
|
||||
|
@ -423,11 +415,16 @@ func isOmitemptyDelete(left dyn.Value) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// interpreterPath returns platform-specific path to Python interpreter in the virtual environment.
|
||||
func interpreterPath(venvPath string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(venvPath, "Scripts", "python3.exe")
|
||||
} else {
|
||||
return filepath.Join(venvPath, "bin", "python3")
|
||||
// detectExecutable lookups Python interpreter in virtual environment, or if not set, in PATH.
|
||||
func detectExecutable(ctx context.Context, venvPath string) (string, error) {
|
||||
if venvPath == "" {
|
||||
interpreter, err := python.DetectExecutable(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return interpreter, nil
|
||||
}
|
||||
|
||||
return python.DetectVEnvExecutable(venvPath)
|
||||
}
|
||||
|
|
|
@ -282,7 +282,7 @@ func TestPythonMutator_venvRequired(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPythonMutator_venvNotFound(t *testing.T) {
|
||||
expectedError := fmt.Sprintf("can't find %q, check if venv is created", interpreterPath("bad_path"))
|
||||
expectedError := fmt.Sprintf("failed to get Python interpreter path: can't find %q, check if virtualenv is created", interpreterPath("bad_path"))
|
||||
|
||||
b := loadYaml("databricks.yml", `
|
||||
experimental:
|
||||
|
@ -596,9 +596,7 @@ func loadYaml(name string, content string) *bundle.Bundle {
|
|||
}
|
||||
}
|
||||
|
||||
func withFakeVEnv(t *testing.T, path string) {
|
||||
interpreterPath := interpreterPath(path)
|
||||
|
||||
func withFakeVEnv(t *testing.T, venvPath string) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -608,6 +606,8 @@ func withFakeVEnv(t *testing.T, path string) {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
interpreterPath := interpreterPath(venvPath)
|
||||
|
||||
err = os.MkdirAll(filepath.Dir(interpreterPath), 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -618,9 +618,22 @@ func withFakeVEnv(t *testing.T, path string) {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := os.Chdir(cwd); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func interpreterPath(venvPath string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(venvPath, "Scripts", "python3.exe")
|
||||
} else {
|
||||
return filepath.Join(venvPath, "bin", "python3")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,10 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc {
|
|||
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
||||
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
|
|
|
@ -17,6 +17,10 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
RootPath: ".",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
".",
|
||||
"../common",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -29,6 +33,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths[0]", "./databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[1]", "./databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.include[0]", "./file.yml")
|
||||
bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
||||
|
@ -37,6 +43,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, filepath.Clean("."), b.Config.Sync.Paths[0])
|
||||
assert.Equal(t, filepath.Clean("../common"), b.Config.Sync.Paths[1])
|
||||
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
||||
|
@ -48,6 +56,10 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
RootPath: "/tmp/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
".",
|
||||
"../common",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -60,6 +72,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths[0]", "/tmp/dir/databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[1]", "/tmp/dir/databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml")
|
||||
bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
||||
|
@ -68,6 +82,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, filepath.Clean("."), b.Config.Sync.Paths[0])
|
||||
assert.Equal(t, filepath.Clean("../common"), b.Config.Sync.Paths[1])
|
||||
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type syncDefaultPath struct{}
|
||||
|
||||
// SyncDefaultPath configures the default sync path to be equal to the bundle root.
|
||||
func SyncDefaultPath() bundle.Mutator {
|
||||
return &syncDefaultPath{}
|
||||
}
|
||||
|
||||
func (m *syncDefaultPath) Name() string {
|
||||
return "SyncDefaultPath"
|
||||
}
|
||||
|
||||
func (m *syncDefaultPath) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
isset := false
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
pv, _ := dyn.Get(v, "sync.paths")
|
||||
|
||||
// If the sync paths field is already set, do nothing.
|
||||
// We know it is set if its value is either a nil or a sequence (empty or not).
|
||||
switch pv.Kind() {
|
||||
case dyn.KindNil, dyn.KindSequence:
|
||||
isset = true
|
||||
}
|
||||
|
||||
return v, nil
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// If the sync paths field is already set, do nothing.
|
||||
if isset {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the sync paths to the default value.
|
||||
b.Config.Sync.Paths = []string{"."}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSyncDefaultPath_DefaultIfUnset(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncDefaultPath())
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||
}
|
||||
|
||||
func TestSyncDefaultPath_SkipIfSet(t *testing.T) {
|
||||
tcases := []struct {
|
||||
name string
|
||||
paths dyn.Value
|
||||
expect []string
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
paths: dyn.V(nil),
|
||||
expect: nil,
|
||||
},
|
||||
{
|
||||
name: "empty sequence",
|
||||
paths: dyn.V([]dyn.Value{}),
|
||||
expect: []string{},
|
||||
},
|
||||
{
|
||||
name: "non-empty sequence",
|
||||
paths: dyn.V([]dyn.Value{dyn.V("something")}),
|
||||
expect: []string{"something"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tcase := range tcases {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
}
|
||||
|
||||
diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
v, err := dyn.Set(v, "sync", dyn.V(dyn.NewMapping()))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = dyn.Set(v, "sync.paths", tcase.paths)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
return diag.FromErr(err)
|
||||
})
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
ctx := context.Background()
|
||||
diags = bundle.Apply(ctx, b, mutator.SyncDefaultPath())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// If the sync paths field is already set, do nothing.
|
||||
assert.Equal(t, tcase.expect, b.Config.Sync.Paths)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
)
|
||||
|
||||
type syncInferRoot struct{}
|
||||
|
||||
// SyncInferRoot is a mutator that infers the root path of all files to synchronize by looking at the
|
||||
// paths in the sync configuration. The sync root may be different from the bundle root
|
||||
// when the user intends to synchronize files outside the bundle root.
|
||||
//
|
||||
// The sync root can be equivalent to or an ancestor of the bundle root, but not a descendant.
|
||||
// That is, the sync root must contain the bundle root.
|
||||
//
|
||||
// This mutator requires all sync-related paths and patterns to be relative to the bundle root path.
|
||||
// This is done by the [RewriteSyncPaths] mutator, which must run before this mutator.
|
||||
func SyncInferRoot() bundle.Mutator {
|
||||
return &syncInferRoot{}
|
||||
}
|
||||
|
||||
func (m *syncInferRoot) Name() string {
|
||||
return "SyncInferRoot"
|
||||
}
|
||||
|
||||
// computeRoot finds the innermost path that contains the specified path.
|
||||
// It traverses up the root path until it finds the innermost path.
|
||||
// If the path does not exist, it returns an empty string.
|
||||
//
|
||||
// See "sync_infer_root_internal_test.go" for examples.
|
||||
func (m *syncInferRoot) computeRoot(path string, root string) string {
|
||||
for !filepath.IsLocal(path) {
|
||||
// Break if we have reached the root of the filesystem.
|
||||
dir := filepath.Dir(root)
|
||||
if dir == root {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Update the sync path as we navigate up the directory tree.
|
||||
path = filepath.Join(filepath.Base(root), path)
|
||||
|
||||
// Move up the directory tree.
|
||||
root = dir
|
||||
}
|
||||
|
||||
return filepath.Clean(root)
|
||||
}
|
||||
|
||||
func (m *syncInferRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
// Use the bundle root path as the starting point for inferring the sync root path.
|
||||
bundleRootPath := filepath.Clean(b.RootPath)
|
||||
|
||||
// Infer the sync root path by looking at each one of the sync paths.
|
||||
// Every sync path must be a descendant of the final sync root path.
|
||||
syncRootPath := bundleRootPath
|
||||
for _, path := range b.Config.Sync.Paths {
|
||||
computedPath := m.computeRoot(path, bundleRootPath)
|
||||
if computedPath == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update sync root path if the computed root path is an ancestor of the current sync root path.
|
||||
if len(computedPath) < len(syncRootPath) {
|
||||
syncRootPath = computedPath
|
||||
}
|
||||
}
|
||||
|
||||
// The new sync root path can only be an ancestor of the previous root path.
|
||||
// Compute the relative path from the sync root to the bundle root.
|
||||
rel, err := filepath.Rel(syncRootPath, bundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// If during computation of the sync root path we hit the root of the filesystem,
|
||||
// then one or more of the sync paths are outside the filesystem.
|
||||
// Check if this happened by verifying that none of the paths escape the root
|
||||
// when joined with the sync root path.
|
||||
for i, path := range b.Config.Sync.Paths {
|
||||
if filepath.IsLocal(filepath.Join(rel, path)) {
|
||||
continue
|
||||
}
|
||||
|
||||
diags = append(diags, diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("invalid sync path %q", path),
|
||||
Locations: b.Config.GetLocations(fmt.Sprintf("sync.paths[%d]", i)),
|
||||
Paths: []dyn.Path{dyn.NewPath(dyn.Key("sync"), dyn.Key("paths"), dyn.Index(i))},
|
||||
})
|
||||
}
|
||||
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
// Update all paths in the sync configuration to be relative to the sync root.
|
||||
for i, p := range b.Config.Sync.Paths {
|
||||
b.Config.Sync.Paths[i] = filepath.Join(rel, p)
|
||||
}
|
||||
for i, p := range b.Config.Sync.Include {
|
||||
b.Config.Sync.Include[i] = filepath.Join(rel, p)
|
||||
}
|
||||
for i, p := range b.Config.Sync.Exclude {
|
||||
b.Config.Sync.Exclude[i] = filepath.Join(rel, p)
|
||||
}
|
||||
|
||||
// Configure the sync root path.
|
||||
b.SyncRoot = vfs.MustNew(syncRootPath)
|
||||
b.SyncRootPath = syncRootPath
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSyncInferRootInternal_ComputeRoot(t *testing.T) {
|
||||
s := syncInferRoot{}
|
||||
|
||||
tcases := []struct {
|
||||
path string
|
||||
root string
|
||||
out string
|
||||
}{
|
||||
{
|
||||
// Test that "." doesn't change the root.
|
||||
path: ".",
|
||||
root: "/tmp/some/dir",
|
||||
out: "/tmp/some/dir",
|
||||
},
|
||||
{
|
||||
// Test that a subdirectory doesn't change the root.
|
||||
path: "sub",
|
||||
root: "/tmp/some/dir",
|
||||
out: "/tmp/some/dir",
|
||||
},
|
||||
{
|
||||
// Test that a parent directory changes the root.
|
||||
path: "../common",
|
||||
root: "/tmp/some/dir",
|
||||
out: "/tmp/some",
|
||||
},
|
||||
{
|
||||
// Test that a deeply nested parent directory changes the root.
|
||||
path: "../../../../../../common",
|
||||
root: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||
out: "/tmp/some",
|
||||
},
|
||||
{
|
||||
// Test that a parent directory changes the root at the filesystem root boundary.
|
||||
path: "../common",
|
||||
root: "/tmp",
|
||||
out: "/",
|
||||
},
|
||||
{
|
||||
// Test that an invalid parent directory doesn't change the root and returns an empty string.
|
||||
path: "../common",
|
||||
root: "/",
|
||||
out: "",
|
||||
},
|
||||
{
|
||||
// Test that the returned path is cleaned even if the root doesn't change.
|
||||
path: "sub",
|
||||
root: "/tmp/some/../dir",
|
||||
out: "/tmp/dir",
|
||||
},
|
||||
{
|
||||
// Test that a relative root path also works.
|
||||
path: "../common",
|
||||
root: "foo/bar",
|
||||
out: "foo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcases {
|
||||
out := s.computeRoot(tc.path, tc.root)
|
||||
assert.Equal(t, tc.out, filepath.ToSlash(out))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,198 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
".",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
},
|
||||
Exclude: []string{
|
||||
"baz",
|
||||
"qux",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("/tmp/some/dir"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are unchanged.
|
||||
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||
assert.Equal(t, []string{"foo", "bar"}, b.Config.Sync.Include)
|
||||
assert.Equal(t, []string{"baz", "qux"}, b.Config.Sync.Exclude)
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "./some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
".",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
},
|
||||
Exclude: []string{
|
||||
"baz",
|
||||
"qux",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("some/dir"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are unchanged.
|
||||
assert.Equal(t, []string{"."}, b.Config.Sync.Paths)
|
||||
assert.Equal(t, []string{"foo", "bar"}, b.Config.Sync.Include)
|
||||
assert.Equal(t, []string{"baz", "qux"}, b.Config.Sync.Exclude)
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
"../common",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
},
|
||||
Exclude: []string{
|
||||
"baz",
|
||||
"qux",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are updated.
|
||||
assert.Equal(t, []string{"common"}, b.Config.Sync.Paths)
|
||||
assert.Equal(t, []string{filepath.FromSlash("dir/foo"), filepath.FromSlash("dir/bar")}, b.Config.Sync.Include)
|
||||
assert.Equal(t, []string{filepath.FromSlash("dir/baz"), filepath.FromSlash("dir/qux")}, b.Config.Sync.Exclude)
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
"../../../../../../common",
|
||||
},
|
||||
Include: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
},
|
||||
Exclude: []string{
|
||||
"baz",
|
||||
"qux",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are updated.
|
||||
assert.Equal(t, []string{"common"}, b.Config.Sync.Paths)
|
||||
assert.Equal(t, []string{
|
||||
filepath.FromSlash("dir/that/is/very/deeply/nested/foo"),
|
||||
filepath.FromSlash("dir/that/is/very/deeply/nested/bar"),
|
||||
}, b.Config.Sync.Include)
|
||||
assert.Equal(t, []string{
|
||||
filepath.FromSlash("dir/that/is/very/deeply/nested/baz"),
|
||||
filepath.FromSlash("dir/that/is/very/deeply/nested/qux"),
|
||||
}, b.Config.Sync.Exclude)
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/bundle/root",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
"./foo",
|
||||
"../common",
|
||||
"./bar",
|
||||
"../../baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath)
|
||||
|
||||
// Check that the paths are updated.
|
||||
assert.Equal(t, filepath.FromSlash("bundle/root/foo"), b.Config.Sync.Paths[0])
|
||||
assert.Equal(t, filepath.FromSlash("bundle/common"), b.Config.Sync.Paths[1])
|
||||
assert.Equal(t, filepath.FromSlash("bundle/root/bar"), b.Config.Sync.Paths[2])
|
||||
assert.Equal(t, filepath.FromSlash("baz"), b.Config.Sync.Paths[3])
|
||||
}
|
||||
|
||||
func TestSyncInferRoot_Error(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
"../../../../error",
|
||||
"../../../thisworks",
|
||||
"../../../../../error",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths", "databricks.yml")
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
require.Len(t, diags, 2)
|
||||
assert.Equal(t, `invalid sync path "../../../../error"`, diags[0].Summary)
|
||||
assert.Equal(t, "databricks.yml:0:0", diags[0].Locations[0].String())
|
||||
assert.Equal(t, "sync.paths[0]", diags[0].Paths[0].String())
|
||||
assert.Equal(t, `invalid sync path "../../../../../error"`, diags[1].Summary)
|
||||
assert.Equal(t, "databricks.yml:0:0", diags[1].Locations[0].String())
|
||||
assert.Equal(t, "sync.paths[2]", diags[1].Paths[0].String())
|
||||
}
|
|
@ -82,7 +82,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund
|
|||
return err
|
||||
}
|
||||
|
||||
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
|
||||
internalDirRel, err := filepath.Rel(b.SyncRootPath, internalDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -57,17 +56,18 @@ func TestGenerateTrampoline(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/Workspace/files",
|
||||
},
|
||||
Bundle: config.Bundle{
|
||||
Target: "development",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"test": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: tmpDir,
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: tasks,
|
||||
},
|
||||
|
@ -93,6 +93,6 @@ func TestGenerateTrampoline(t *testing.T) {
|
|||
require.Equal(t, "Hello from Trampoline", string(bytes))
|
||||
|
||||
task := b.Config.Resources.Jobs["test"].Tasks[0]
|
||||
require.Equal(t, task.NotebookTask.NotebookPath, ".databricks/bundle/development/.internal/notebook_test_to_trampoline")
|
||||
require.Equal(t, "/Workspace/files/my_bundle/.databricks/bundle/development/.internal/notebook_test_to_trampoline", task.NotebookTask.NotebookPath)
|
||||
require.Nil(t, task.PythonWheelTask)
|
||||
}
|
||||
|
|
|
@ -93,14 +93,14 @@ func (t *translateContext) rewritePath(
|
|||
return nil
|
||||
}
|
||||
|
||||
// Local path must be contained in the bundle root.
|
||||
// Local path must be contained in the sync root.
|
||||
// If it isn't, it won't be synchronized into the workspace.
|
||||
localRelPath, err := filepath.Rel(t.b.RootPath, localPath)
|
||||
localRelPath, err := filepath.Rel(t.b.SyncRootPath, localPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasPrefix(localRelPath, "..") {
|
||||
return fmt.Errorf("path %s is not contained in bundle root path", localPath)
|
||||
return fmt.Errorf("path %s is not contained in sync root path", localPath)
|
||||
}
|
||||
|
||||
// Prefix remote path with its remote root path.
|
||||
|
@ -118,7 +118,7 @@ func (t *translateContext) rewritePath(
|
|||
}
|
||||
|
||||
func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath))
|
||||
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", fmt.Errorf("notebook %s not found", literal)
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func (t *translateContext) translateNotebookPath(literal, localFullPath, localRe
|
|||
}
|
||||
|
||||
func (t *translateContext) translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath))
|
||||
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", fmt.Errorf("file %s not found", literal)
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ func (t *translateContext) translateFilePath(literal, localFullPath, localRelPat
|
|||
}
|
||||
|
||||
func (t *translateContext) translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
info, err := t.b.BundleRoot.Stat(filepath.ToSlash(localRelPath))
|
||||
info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -50,6 +50,11 @@ func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern
|
|||
t.translateNoOp,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
|
||||
t.translateFilePath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,7 +83,7 @@ func (t *translateContext) jobRewritePatterns() []jobRewritePattern {
|
|||
),
|
||||
t.translateNoOpWithPrefix,
|
||||
func(s string) bool {
|
||||
return !libraries.IsEnvironmentDependencyLocal(s)
|
||||
return !libraries.IsLibraryLocal(s)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -41,8 +41,8 @@ func touchEmptyFile(t *testing.T, path string) {
|
|||
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -110,10 +110,11 @@ func TestTranslatePaths(t *testing.T) {
|
|||
touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "my_python_file.py"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "requirements.txt"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -140,6 +141,9 @@ func TestTranslatePaths(t *testing.T) {
|
|||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "./my_job_notebook.py",
|
||||
},
|
||||
Libraries: []compute.Library{
|
||||
{Requirements: "./requirements.txt"},
|
||||
},
|
||||
},
|
||||
{
|
||||
PythonWheelTask: &jobs.PythonWheelTask{
|
||||
|
@ -232,6 +236,11 @@ func TestTranslatePaths(t *testing.T) {
|
|||
"/bundle/my_job_notebook",
|
||||
b.Config.Resources.Jobs["job"].Tasks[2].NotebookTask.NotebookPath,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"/bundle/requirements.txt",
|
||||
b.Config.Resources.Jobs["job"].Tasks[2].Libraries[0].Requirements,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"/bundle/my_python_file.py",
|
||||
|
@ -280,8 +289,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -371,12 +380,12 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
)
|
||||
}
|
||||
|
||||
func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
||||
func TestTranslatePathsOutsideSyncRoot(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -402,15 +411,15 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
|||
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), "is not contained in bundle root")
|
||||
assert.ErrorContains(t, diags.Error(), "is not contained in sync root path")
|
||||
}
|
||||
|
||||
func TestJobNotebookDoesNotExistError(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
|
@ -440,8 +449,8 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
|||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
|
@ -471,8 +480,8 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
|||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
|
@ -502,8 +511,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
|||
dir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
|
@ -534,8 +543,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
|||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -569,8 +578,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -604,8 +613,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -639,8 +648,8 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
|||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/bundle",
|
||||
|
@ -675,8 +684,8 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "env2.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
|
@ -715,8 +724,8 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
|
|||
func TestTranslatePathWithComplexVariables(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Variables: map[string]*variable.Variable{
|
||||
"cluster_libraries": {
|
||||
|
|
|
@ -40,6 +40,10 @@ func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Dia
|
|||
}
|
||||
|
||||
if !c.Check(version) {
|
||||
if version.Prerelease() == "dev" && version.Major() == 0 {
|
||||
return diag.Warningf("Ignoring Databricks CLI version constraint for development build. Required: %s, current: %s", constraint, currentVersion)
|
||||
}
|
||||
|
||||
return diag.Errorf("Databricks CLI version constraint not satisfied. Required: %s, current: %s", constraint, currentVersion)
|
||||
}
|
||||
|
||||
|
|
|
@ -107,6 +107,11 @@ func TestVerifyCliVersion(t *testing.T) {
|
|||
constraint: "^0.100",
|
||||
expectedError: "invalid version constraint \"^0.100\" specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)",
|
||||
},
|
||||
{
|
||||
currentVersion: "0.0.0-dev+06b169284737",
|
||||
constraint: ">= 0.100.0",
|
||||
expectedError: "Ignoring Databricks CLI version constraint for development build. Required: >= 0.100.0",
|
||||
},
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
|
@ -130,7 +135,7 @@ func TestVerifyCliVersion(t *testing.T) {
|
|||
diags := bundle.Apply(context.Background(), b, VerifyCliVersion())
|
||||
if tc.expectedError != "" {
|
||||
require.NotEmpty(t, diags)
|
||||
require.Equal(t, tc.expectedError, diags.Error().Error())
|
||||
require.Contains(t, diags[0].Summary, tc.expectedError)
|
||||
} else {
|
||||
require.Empty(t, diags)
|
||||
}
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
package paths
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type Paths struct {
|
||||
// Absolute path on the local file system to the configuration file that holds
|
||||
// the definition of this resource.
|
||||
ConfigFilePath string `json:"-" bundle:"readonly"`
|
||||
|
||||
// DynamicValue stores the [dyn.Value] of the containing struct.
|
||||
// This assumes that this struct is always embedded.
|
||||
DynamicValue dyn.Value `json:"-"`
|
||||
}
|
||||
|
||||
func (p *Paths) ConfigureConfigFilePath() {
|
||||
if !p.DynamicValue.IsValid() {
|
||||
panic("DynamicValue not set")
|
||||
}
|
||||
p.ConfigFilePath = p.DynamicValue.Location().File
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package config
|
||||
|
||||
const Paused = "PAUSED"
|
||||
const Unpaused = "UNPAUSED"
|
||||
|
||||
type Presets struct {
|
||||
// NamePrefix to prepend to all resource names.
|
||||
NamePrefix string `json:"name_prefix,omitempty"`
|
||||
|
||||
// PipelinesDevelopment is the default value for the development field of pipelines.
|
||||
PipelinesDevelopment *bool `json:"pipelines_development,omitempty"`
|
||||
|
||||
// TriggerPauseStatus is the default value for the pause status of all triggers and schedules.
|
||||
// Either config.Paused, config.Unpaused, or empty.
|
||||
TriggerPauseStatus string `json:"trigger_pause_status,omitempty"`
|
||||
|
||||
// JobsMaxConcurrentRuns is the default value for the max concurrent runs of jobs.
|
||||
JobsMaxConcurrentRuns int `json:"jobs_max_concurrent_runs,omitempty"`
|
||||
|
||||
// Tags to add to all resources.
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
// IsExplicitlyEnabled tests whether this feature is explicitly enabled.
|
||||
func IsExplicitlyEnabled(feature *bool) bool {
|
||||
return feature != nil && *feature
|
||||
}
|
||||
|
||||
// IsExplicitlyDisabled tests whether this feature is explicitly disabled.
|
||||
func IsExplicitlyDisabled(feature *bool) bool {
|
||||
return feature != nil && !*feature
|
||||
}
|
|
@ -21,81 +21,14 @@ type Resources struct {
|
|||
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
|
||||
}
|
||||
|
||||
type resource struct {
|
||||
resource ConfigResource
|
||||
resource_type string
|
||||
key string
|
||||
}
|
||||
|
||||
func (r *Resources) allResources() []resource {
|
||||
all := make([]resource, 0)
|
||||
for k, e := range r.Jobs {
|
||||
all = append(all, resource{resource_type: "job", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.Pipelines {
|
||||
all = append(all, resource{resource_type: "pipeline", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.Models {
|
||||
all = append(all, resource{resource_type: "model", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.Experiments {
|
||||
all = append(all, resource{resource_type: "experiment", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.ModelServingEndpoints {
|
||||
all = append(all, resource{resource_type: "serving endpoint", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.RegisteredModels {
|
||||
all = append(all, resource{resource_type: "registered model", resource: e, key: k})
|
||||
}
|
||||
for k, e := range r.QualityMonitors {
|
||||
all = append(all, resource{resource_type: "quality monitor", resource: e, key: k})
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
func (r *Resources) VerifyAllResourcesDefined() error {
|
||||
all := r.allResources()
|
||||
for _, e := range all {
|
||||
err := e.resource.Validate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s %s is not defined", e.resource_type, e.key)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigureConfigFilePath sets the specified path for all resources contained in this instance.
|
||||
// This property is used to correctly resolve paths relative to the path
|
||||
// of the configuration file they were defined in.
|
||||
func (r *Resources) ConfigureConfigFilePath() {
|
||||
for _, e := range r.Jobs {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.Pipelines {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.Models {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.Experiments {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.ModelServingEndpoints {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.RegisteredModels {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
for _, e := range r.QualityMonitors {
|
||||
e.ConfigureConfigFilePath()
|
||||
}
|
||||
}
|
||||
|
||||
type ConfigResource interface {
|
||||
// Function to assert if the resource exists in the workspace configured in
|
||||
// the input workspace client.
|
||||
Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error)
|
||||
|
||||
// Terraform equivalent name of the resource. For example "databricks_job"
|
||||
// for jobs and "databricks_pipeline" for pipelines.
|
||||
TerraformResourceName() string
|
||||
Validate() error
|
||||
}
|
||||
|
||||
func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) {
|
||||
|
|
|
@ -2,10 +2,8 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -17,8 +15,6 @@ type Job struct {
|
|||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
|
||||
paths.Paths
|
||||
|
||||
*jobs.JobSettings
|
||||
}
|
||||
|
||||
|
@ -48,11 +44,3 @@ func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id stri
|
|||
func (j *Job) TerraformResourceName() string {
|
||||
return "databricks_job"
|
||||
}
|
||||
|
||||
func (j *Job) Validate() error {
|
||||
if j == nil || !j.DynamicValue.IsValid() || j.JobSettings == nil {
|
||||
return fmt.Errorf("job is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -16,8 +14,6 @@ type MlflowExperiment struct {
|
|||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
|
||||
paths.Paths
|
||||
|
||||
*ml.Experiment
|
||||
}
|
||||
|
||||
|
@ -43,11 +39,3 @@ func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceCl
|
|||
func (s *MlflowExperiment) TerraformResourceName() string {
|
||||
return "databricks_mlflow_experiment"
|
||||
}
|
||||
|
||||
func (s *MlflowExperiment) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("experiment is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -16,8 +14,6 @@ type MlflowModel struct {
|
|||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
|
||||
paths.Paths
|
||||
|
||||
*ml.Model
|
||||
}
|
||||
|
||||
|
@ -43,11 +39,3 @@ func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient,
|
|||
func (s *MlflowModel) TerraformResourceName() string {
|
||||
return "databricks_mlflow_model"
|
||||
}
|
||||
|
||||
func (s *MlflowModel) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("model is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -20,10 +18,6 @@ type ModelServingEndpoint struct {
|
|||
// as a reference in other resources. This value is returned by terraform.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
// Path to config file where the resource is defined. All bundle resources
|
||||
// include this for interpolation purposes.
|
||||
paths.Paths
|
||||
|
||||
// This is a resource agnostic implementation of permissions for ACLs.
|
||||
// Implementation could be different based on the resource type.
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
|
@ -53,11 +47,3 @@ func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.Workspa
|
|||
func (s *ModelServingEndpoint) TerraformResourceName() string {
|
||||
return "databricks_model_serving"
|
||||
}
|
||||
|
||||
func (s *ModelServingEndpoint) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("serving endpoint is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -16,8 +14,6 @@ type Pipeline struct {
|
|||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
|
||||
paths.Paths
|
||||
|
||||
*pipelines.PipelineSpec
|
||||
}
|
||||
|
||||
|
@ -43,11 +39,3 @@ func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id
|
|||
func (p *Pipeline) TerraformResourceName() string {
|
||||
return "databricks_pipeline"
|
||||
}
|
||||
|
||||
func (p *Pipeline) Validate() error {
|
||||
if p == nil || !p.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("pipeline is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -21,10 +19,6 @@ type QualityMonitor struct {
|
|||
// as a reference in other resources. This value is returned by terraform.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
// Path to config file where the resource is defined. All bundle resources
|
||||
// include this for interpolation purposes.
|
||||
paths.Paths
|
||||
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
}
|
||||
|
||||
|
@ -50,11 +44,3 @@ func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClie
|
|||
func (s *QualityMonitor) TerraformResourceName() string {
|
||||
return "databricks_quality_monitor"
|
||||
}
|
||||
|
||||
func (s *QualityMonitor) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("quality monitor is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
|
@ -21,10 +19,6 @@ type RegisteredModel struct {
|
|||
// as a reference in other resources. This value is returned by terraform.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
// Path to config file where the resource is defined. All bundle resources
|
||||
// include this for interpolation purposes.
|
||||
paths.Paths
|
||||
|
||||
// This represents the input args for terraform, and will get converted
|
||||
// to a HCL representation for CRUD
|
||||
*catalog.CreateRegisteredModelRequest
|
||||
|
@ -54,11 +48,3 @@ func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceCli
|
|||
func (s *RegisteredModel) TerraformResourceName() string {
|
||||
return "databricks_registered_model"
|
||||
}
|
||||
|
||||
func (s *RegisteredModel) Validate() error {
|
||||
if s == nil || !s.DynamicValue.IsValid() {
|
||||
return fmt.Errorf("registered model is not defined")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -60,6 +60,10 @@ type Root struct {
|
|||
// RunAs section allows to define an execution identity for jobs and pipelines runs
|
||||
RunAs *jobs.JobRunAs `json:"run_as,omitempty"`
|
||||
|
||||
// Presets applies preset transformations throughout the bundle, e.g.
|
||||
// adding a name prefix to deployed resources.
|
||||
Presets Presets `json:"presets,omitempty"`
|
||||
|
||||
Experimental *Experimental `json:"experimental,omitempty"`
|
||||
|
||||
// Permissions section allows to define permissions which will be
|
||||
|
@ -136,17 +140,6 @@ func (r *Root) updateWithDynamicValue(nv dyn.Value) error {
|
|||
|
||||
// Assign the normalized configuration tree.
|
||||
r.value = nv
|
||||
|
||||
// At the moment the check has to be done as part of updateWithDynamicValue
|
||||
// because otherwise ConfigureConfigFilePath will fail with a panic.
|
||||
// In the future, we should move this check to a separate mutator in initialise phase.
|
||||
err = r.Resources.VerifyAllResourcesDefined()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Assign config file paths after converting to typed configuration.
|
||||
r.ConfigureConfigFilePath()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -238,15 +231,6 @@ func (r *Root) MarkMutatorExit(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetConfigFilePath configures the path that its configuration
|
||||
// was loaded from in configuration leafs that require it.
|
||||
func (r *Root) ConfigureConfigFilePath() {
|
||||
r.Resources.ConfigureConfigFilePath()
|
||||
if r.Artifacts != nil {
|
||||
r.Artifacts.ConfigureConfigFilePath()
|
||||
}
|
||||
}
|
||||
|
||||
// Initializes variables using values passed from the command line flag
|
||||
// Input has to be a string of the form `foo=bar`. In this case the variable with
|
||||
// name `foo` is assigned the value `bar`
|
||||
|
@ -327,6 +311,7 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
|||
"resources",
|
||||
"sync",
|
||||
"permissions",
|
||||
"presets",
|
||||
} {
|
||||
if root, err = mergeField(root, target, f); err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
package config
|
||||
|
||||
type Sync struct {
|
||||
// Paths contains a list of paths to synchronize relative to the bundle root path.
|
||||
// If not configured, this defaults to synchronizing everything in the bundle root path (i.e. `.`).
|
||||
Paths []string `json:"paths,omitempty"`
|
||||
|
||||
// Include contains a list of globs evaluated relative to the bundle root path
|
||||
// to explicitly include files that were excluded by the user's gitignore.
|
||||
Include []string `json:"include,omitempty"`
|
||||
|
|
|
@ -20,6 +20,10 @@ type Target struct {
|
|||
// development purposes.
|
||||
Mode Mode `json:"mode,omitempty"`
|
||||
|
||||
// Mutator configurations that e.g. change the
|
||||
// name prefix of deployed resources.
|
||||
Presets Presets `json:"presets,omitempty"`
|
||||
|
||||
// Overrides the compute used for jobs and other supported assets.
|
||||
ComputeID string `json:"compute_id,omitempty"`
|
||||
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
func AllResourcesHaveValues() bundle.Mutator {
|
||||
return &allResourcesHaveValues{}
|
||||
}
|
||||
|
||||
type allResourcesHaveValues struct{}
|
||||
|
||||
func (m *allResourcesHaveValues) Name() string {
|
||||
return "validate:AllResourcesHaveValues"
|
||||
}
|
||||
|
||||
func (m *allResourcesHaveValues) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
_, err := dyn.MapByPattern(
|
||||
b.Config.Value(),
|
||||
dyn.NewPattern(dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
|
||||
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
if v.Kind() != dyn.KindNil {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Type of the resource, stripped of the trailing 's' to make it
|
||||
// singular.
|
||||
rType := strings.TrimSuffix(p[1].Key(), "s")
|
||||
|
||||
// Name of the resource. Eg: "foo" in "jobs.foo".
|
||||
rName := p[2].Key()
|
||||
|
||||
diags = append(diags, diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("%s %s is not defined", rType, rName),
|
||||
Locations: v.Locations(),
|
||||
Paths: []dyn.Path{slices.Clone(p)},
|
||||
})
|
||||
|
||||
return v, nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
|
@ -63,7 +63,7 @@ func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (di
|
|||
return err
|
||||
}
|
||||
|
||||
all, err := fs.All()
|
||||
all, err := fs.Files()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -220,7 +220,7 @@ type resolvers struct {
|
|||
func allResolvers() *resolvers {
|
||||
r := &resolvers{}
|
||||
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||
entity, err := w.Alerts.GetByName(ctx, name)
|
||||
entity, err := w.Alerts.GetByDisplayName(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ func allResolvers() *resolvers {
|
|||
return fmt.Sprint(entity.PipelineId), nil
|
||||
}
|
||||
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
|
||||
entity, err := w.Queries.GetByName(ctx, name)
|
||||
entity, err := w.Queries.GetByDisplayName(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -28,10 +28,12 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp
|
|||
}
|
||||
|
||||
opts := &sync.SyncOptions{
|
||||
LocalPath: rb.BundleRoot(),
|
||||
LocalRoot: rb.SyncRoot(),
|
||||
Paths: rb.Config().Sync.Paths,
|
||||
Include: includes,
|
||||
Exclude: rb.Config().Sync.Exclude,
|
||||
|
||||
RemotePath: rb.Config().Workspace.FilePath,
|
||||
Include: includes,
|
||||
Exclude: rb.Config().Sync.Exclude,
|
||||
Host: rb.WorkspaceClient().Config.Host,
|
||||
|
||||
Full: false,
|
||||
|
|
|
@ -39,7 +39,8 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
for name, job := range b.Config.Resources.Jobs {
|
||||
// Compute config file path the job is defined in, relative to the bundle
|
||||
// root
|
||||
relativePath, err := filepath.Rel(b.RootPath, job.ConfigFilePath)
|
||||
l := b.Config.GetLocation("resources.jobs." + name)
|
||||
relativePath, err := filepath.Rel(b.RootPath, l.File)
|
||||
if err != nil {
|
||||
return diag.Errorf("failed to compute relative path for job %s: %v", name, err)
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
}
|
||||
|
||||
log.Infof(ctx, "Creating new snapshot")
|
||||
snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.BundleRoot), opts)
|
||||
snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.SyncRoot), opts)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -64,6 +64,10 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
|||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRoot: vfs.MustNew(tmpDir),
|
||||
|
||||
SyncRootPath: tmpDir,
|
||||
SyncRoot: vfs.MustNew(tmpDir),
|
||||
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
@ -81,11 +85,11 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
|||
ctx := context.Background()
|
||||
|
||||
for _, file := range opts.localFiles {
|
||||
testutil.Touch(t, b.RootPath, "bar", file)
|
||||
testutil.Touch(t, b.SyncRootPath, "bar", file)
|
||||
}
|
||||
|
||||
for _, file := range opts.localNotebooks {
|
||||
testutil.TouchNotebook(t, b.RootPath, "bar", file)
|
||||
testutil.TouchNotebook(t, b.SyncRootPath, "bar", file)
|
||||
}
|
||||
|
||||
if opts.withExistingSnapshot {
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestFromSlice(t *testing.T) {
|
|||
testutil.Touch(t, tmpDir, "test2.py")
|
||||
testutil.Touch(t, tmpDir, "test3.py")
|
||||
|
||||
files, err := fileset.All()
|
||||
files, err := fileset.Files()
|
||||
require.NoError(t, err)
|
||||
|
||||
f, err := FromSlice(files)
|
||||
|
@ -38,7 +38,7 @@ func TestToSlice(t *testing.T) {
|
|||
testutil.Touch(t, tmpDir, "test2.py")
|
||||
testutil.Touch(t, tmpDir, "test3.py")
|
||||
|
||||
files, err := fileset.All()
|
||||
files, err := fileset.Files()
|
||||
require.NoError(t, err)
|
||||
|
||||
f, err := FromSlice(files)
|
||||
|
|
|
@ -23,7 +23,7 @@ func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle {
|
|||
testutil.Touch(t, tmpDir, "test1.py")
|
||||
testutil.TouchNotebook(t, tmpDir, "test2.py")
|
||||
|
||||
files, err := fileset.New(vfs.MustNew(tmpDir)).All()
|
||||
files, err := fileset.New(vfs.MustNew(tmpDir)).Files()
|
||||
require.NoError(t, err)
|
||||
|
||||
return &bundle.Bundle{
|
||||
|
|
|
@ -29,6 +29,4 @@ func SetLocation(b *bundle.Bundle, prefix string, filePath string) {
|
|||
return v, dyn.ErrSkip
|
||||
})
|
||||
})
|
||||
|
||||
b.Config.ConfigureConfigFilePath()
|
||||
}
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
package schema
|
||||
|
||||
const ProviderVersion = "1.49.1"
|
||||
const ProviderVersion = "1.50.0"
|
||||
|
|
|
@ -3,11 +3,12 @@
|
|||
package schema
|
||||
|
||||
type DataSourceNotebook struct {
|
||||
Content string `json:"content,omitempty"`
|
||||
Format string `json:"format"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Language string `json:"language,omitempty"`
|
||||
ObjectId int `json:"object_id,omitempty"`
|
||||
ObjectType string `json:"object_type,omitempty"`
|
||||
Path string `json:"path"`
|
||||
Content string `json:"content,omitempty"`
|
||||
Format string `json:"format"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Language string `json:"language,omitempty"`
|
||||
ObjectId int `json:"object_id,omitempty"`
|
||||
ObjectType string `json:"object_type,omitempty"`
|
||||
Path string `json:"path"`
|
||||
WorkspacePath string `json:"workspace_path,omitempty"`
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ package schema
|
|||
|
||||
type DataSourceUser struct {
|
||||
AclPrincipalId string `json:"acl_principal_id,omitempty"`
|
||||
Active bool `json:"active,omitempty"`
|
||||
Alphanumeric string `json:"alphanumeric,omitempty"`
|
||||
ApplicationId string `json:"application_id,omitempty"`
|
||||
DisplayName string `json:"display_name,omitempty"`
|
||||
|
|
|
@ -33,7 +33,7 @@ type ResourceClusterPolicy struct {
|
|||
Description string `json:"description,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name,omitempty"`
|
||||
PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"`
|
||||
PolicyFamilyId string `json:"policy_family_id,omitempty"`
|
||||
PolicyId string `json:"policy_id,omitempty"`
|
||||
|
|
|
@ -20,6 +20,12 @@ type ResourceMetastoreDataAccessAzureServicePrincipal struct {
|
|||
DirectoryId string `json:"directory_id"`
|
||||
}
|
||||
|
||||
type ResourceMetastoreDataAccessCloudflareApiToken struct {
|
||||
AccessKeyId string `json:"access_key_id"`
|
||||
AccountId string `json:"account_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
}
|
||||
|
||||
type ResourceMetastoreDataAccessDatabricksGcpServiceAccount struct {
|
||||
CredentialId string `json:"credential_id,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
|
@ -46,6 +52,7 @@ type ResourceMetastoreDataAccess struct {
|
|||
AwsIamRole *ResourceMetastoreDataAccessAwsIamRole `json:"aws_iam_role,omitempty"`
|
||||
AzureManagedIdentity *ResourceMetastoreDataAccessAzureManagedIdentity `json:"azure_managed_identity,omitempty"`
|
||||
AzureServicePrincipal *ResourceMetastoreDataAccessAzureServicePrincipal `json:"azure_service_principal,omitempty"`
|
||||
CloudflareApiToken *ResourceMetastoreDataAccessCloudflareApiToken `json:"cloudflare_api_token,omitempty"`
|
||||
DatabricksGcpServiceAccount *ResourceMetastoreDataAccessDatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"`
|
||||
GcpServiceAccountKey *ResourceMetastoreDataAccessGcpServiceAccountKey `json:"gcp_service_account_key,omitempty"`
|
||||
}
|
||||
|
|
|
@ -10,43 +10,60 @@ type ResourceModelServingConfigAutoCaptureConfig struct {
|
|||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModelAi21LabsConfig struct {
|
||||
Ai21LabsApiKey string `json:"ai21labs_api_key"`
|
||||
Ai21LabsApiKey string `json:"ai21labs_api_key,omitempty"`
|
||||
Ai21LabsApiKeyPlaintext string `json:"ai21labs_api_key_plaintext,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModelAmazonBedrockConfig struct {
|
||||
AwsAccessKeyId string `json:"aws_access_key_id"`
|
||||
AwsRegion string `json:"aws_region"`
|
||||
AwsSecretAccessKey string `json:"aws_secret_access_key"`
|
||||
BedrockProvider string `json:"bedrock_provider"`
|
||||
AwsAccessKeyId string `json:"aws_access_key_id,omitempty"`
|
||||
AwsAccessKeyIdPlaintext string `json:"aws_access_key_id_plaintext,omitempty"`
|
||||
AwsRegion string `json:"aws_region"`
|
||||
AwsSecretAccessKey string `json:"aws_secret_access_key,omitempty"`
|
||||
AwsSecretAccessKeyPlaintext string `json:"aws_secret_access_key_plaintext,omitempty"`
|
||||
BedrockProvider string `json:"bedrock_provider"`
|
||||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModelAnthropicConfig struct {
|
||||
AnthropicApiKey string `json:"anthropic_api_key"`
|
||||
AnthropicApiKey string `json:"anthropic_api_key,omitempty"`
|
||||
AnthropicApiKeyPlaintext string `json:"anthropic_api_key_plaintext,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModelCohereConfig struct {
|
||||
CohereApiKey string `json:"cohere_api_key"`
|
||||
CohereApiBase string `json:"cohere_api_base,omitempty"`
|
||||
CohereApiKey string `json:"cohere_api_key,omitempty"`
|
||||
CohereApiKeyPlaintext string `json:"cohere_api_key_plaintext,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServingConfig struct {
|
||||
DatabricksApiToken string `json:"databricks_api_token"`
|
||||
DatabricksWorkspaceUrl string `json:"databricks_workspace_url"`
|
||||
DatabricksApiToken string `json:"databricks_api_token,omitempty"`
|
||||
DatabricksApiTokenPlaintext string `json:"databricks_api_token_plaintext,omitempty"`
|
||||
DatabricksWorkspaceUrl string `json:"databricks_workspace_url"`
|
||||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig struct {
|
||||
PrivateKey string `json:"private_key,omitempty"`
|
||||
PrivateKeyPlaintext string `json:"private_key_plaintext,omitempty"`
|
||||
ProjectId string `json:"project_id,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig struct {
|
||||
MicrosoftEntraClientId string `json:"microsoft_entra_client_id,omitempty"`
|
||||
MicrosoftEntraClientSecret string `json:"microsoft_entra_client_secret,omitempty"`
|
||||
MicrosoftEntraTenantId string `json:"microsoft_entra_tenant_id,omitempty"`
|
||||
OpenaiApiBase string `json:"openai_api_base,omitempty"`
|
||||
OpenaiApiKey string `json:"openai_api_key,omitempty"`
|
||||
OpenaiApiType string `json:"openai_api_type,omitempty"`
|
||||
OpenaiApiVersion string `json:"openai_api_version,omitempty"`
|
||||
OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"`
|
||||
OpenaiOrganization string `json:"openai_organization,omitempty"`
|
||||
MicrosoftEntraClientId string `json:"microsoft_entra_client_id,omitempty"`
|
||||
MicrosoftEntraClientSecret string `json:"microsoft_entra_client_secret,omitempty"`
|
||||
MicrosoftEntraClientSecretPlaintext string `json:"microsoft_entra_client_secret_plaintext,omitempty"`
|
||||
MicrosoftEntraTenantId string `json:"microsoft_entra_tenant_id,omitempty"`
|
||||
OpenaiApiBase string `json:"openai_api_base,omitempty"`
|
||||
OpenaiApiKey string `json:"openai_api_key,omitempty"`
|
||||
OpenaiApiKeyPlaintext string `json:"openai_api_key_plaintext,omitempty"`
|
||||
OpenaiApiType string `json:"openai_api_type,omitempty"`
|
||||
OpenaiApiVersion string `json:"openai_api_version,omitempty"`
|
||||
OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"`
|
||||
OpenaiOrganization string `json:"openai_organization,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModelPalmConfig struct {
|
||||
PalmApiKey string `json:"palm_api_key"`
|
||||
PalmApiKey string `json:"palm_api_key,omitempty"`
|
||||
PalmApiKeyPlaintext string `json:"palm_api_key_plaintext,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceModelServingConfigServedEntitiesExternalModel struct {
|
||||
|
@ -58,6 +75,7 @@ type ResourceModelServingConfigServedEntitiesExternalModel struct {
|
|||
AnthropicConfig *ResourceModelServingConfigServedEntitiesExternalModelAnthropicConfig `json:"anthropic_config,omitempty"`
|
||||
CohereConfig *ResourceModelServingConfigServedEntitiesExternalModelCohereConfig `json:"cohere_config,omitempty"`
|
||||
DatabricksModelServingConfig *ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"`
|
||||
GoogleCloudVertexAiConfig *ResourceModelServingConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig `json:"google_cloud_vertex_ai_config,omitempty"`
|
||||
OpenaiConfig *ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig `json:"openai_config,omitempty"`
|
||||
PalmConfig *ResourceModelServingConfigServedEntitiesExternalModelPalmConfig `json:"palm_config,omitempty"`
|
||||
}
|
||||
|
|
|
@ -13,4 +13,5 @@ type ResourceNotebook struct {
|
|||
Path string `json:"path"`
|
||||
Source string `json:"source,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
WorkspacePath string `json:"workspace_path,omitempty"`
|
||||
}
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceNotificationDestinationConfigEmail struct {
|
||||
Addresses []string `json:"addresses,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceNotificationDestinationConfigGenericWebhook struct {
|
||||
Password string `json:"password,omitempty"`
|
||||
PasswordSet bool `json:"password_set,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
UrlSet bool `json:"url_set,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
UsernameSet bool `json:"username_set,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceNotificationDestinationConfigMicrosoftTeams struct {
|
||||
Url string `json:"url,omitempty"`
|
||||
UrlSet bool `json:"url_set,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceNotificationDestinationConfigPagerduty struct {
|
||||
IntegrationKey string `json:"integration_key,omitempty"`
|
||||
IntegrationKeySet bool `json:"integration_key_set,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceNotificationDestinationConfigSlack struct {
|
||||
Url string `json:"url,omitempty"`
|
||||
UrlSet bool `json:"url_set,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceNotificationDestinationConfig struct {
|
||||
Email *ResourceNotificationDestinationConfigEmail `json:"email,omitempty"`
|
||||
GenericWebhook *ResourceNotificationDestinationConfigGenericWebhook `json:"generic_webhook,omitempty"`
|
||||
MicrosoftTeams *ResourceNotificationDestinationConfigMicrosoftTeams `json:"microsoft_teams,omitempty"`
|
||||
Pagerduty *ResourceNotificationDestinationConfigPagerduty `json:"pagerduty,omitempty"`
|
||||
Slack *ResourceNotificationDestinationConfigSlack `json:"slack,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceNotificationDestination struct {
|
||||
DestinationType string `json:"destination_type,omitempty"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Config *ResourceNotificationDestinationConfig `json:"config,omitempty"`
|
||||
}
|
|
@ -3,15 +3,17 @@
|
|||
package schema
|
||||
|
||||
type ResourcePipelineClusterAutoscale struct {
|
||||
MaxWorkers int `json:"max_workers,omitempty"`
|
||||
MinWorkers int `json:"min_workers,omitempty"`
|
||||
MaxWorkers int `json:"max_workers"`
|
||||
MinWorkers int `json:"min_workers"`
|
||||
Mode string `json:"mode,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineClusterAwsAttributes struct {
|
||||
Availability string `json:"availability,omitempty"`
|
||||
EbsVolumeCount int `json:"ebs_volume_count,omitempty"`
|
||||
EbsVolumeIops int `json:"ebs_volume_iops,omitempty"`
|
||||
EbsVolumeSize int `json:"ebs_volume_size,omitempty"`
|
||||
EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"`
|
||||
EbsVolumeType string `json:"ebs_volume_type,omitempty"`
|
||||
FirstOnDemand int `json:"first_on_demand,omitempty"`
|
||||
InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
|
||||
|
@ -19,10 +21,16 @@ type ResourcePipelineClusterAwsAttributes struct {
|
|||
ZoneId string `json:"zone_id,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineClusterAzureAttributesLogAnalyticsInfo struct {
|
||||
LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"`
|
||||
LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineClusterAzureAttributes struct {
|
||||
Availability string `json:"availability,omitempty"`
|
||||
FirstOnDemand int `json:"first_on_demand,omitempty"`
|
||||
SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"`
|
||||
Availability string `json:"availability,omitempty"`
|
||||
FirstOnDemand int `json:"first_on_demand,omitempty"`
|
||||
SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"`
|
||||
LogAnalyticsInfo *ResourcePipelineClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineClusterClusterLogConfDbfs struct {
|
||||
|
@ -127,8 +135,69 @@ type ResourcePipelineFilters struct {
|
|||
Include []string `json:"include,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineGatewayDefinition struct {
|
||||
ConnectionId string `json:"connection_id,omitempty"`
|
||||
GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"`
|
||||
GatewayStorageName string `json:"gateway_storage_name,omitempty"`
|
||||
GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration struct {
|
||||
PrimaryKeys []string `json:"primary_keys,omitempty"`
|
||||
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
|
||||
ScdType string `json:"scd_type,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjectsSchema struct {
|
||||
DestinationCatalog string `json:"destination_catalog,omitempty"`
|
||||
DestinationSchema string `json:"destination_schema,omitempty"`
|
||||
SourceCatalog string `json:"source_catalog,omitempty"`
|
||||
SourceSchema string `json:"source_schema,omitempty"`
|
||||
TableConfiguration *ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration `json:"table_configuration,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjectsTableTableConfiguration struct {
|
||||
PrimaryKeys []string `json:"primary_keys,omitempty"`
|
||||
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
|
||||
ScdType string `json:"scd_type,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjectsTable struct {
|
||||
DestinationCatalog string `json:"destination_catalog,omitempty"`
|
||||
DestinationSchema string `json:"destination_schema,omitempty"`
|
||||
DestinationTable string `json:"destination_table,omitempty"`
|
||||
SourceCatalog string `json:"source_catalog,omitempty"`
|
||||
SourceSchema string `json:"source_schema,omitempty"`
|
||||
SourceTable string `json:"source_table,omitempty"`
|
||||
TableConfiguration *ResourcePipelineIngestionDefinitionObjectsTableTableConfiguration `json:"table_configuration,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionObjects struct {
|
||||
Schema *ResourcePipelineIngestionDefinitionObjectsSchema `json:"schema,omitempty"`
|
||||
Table *ResourcePipelineIngestionDefinitionObjectsTable `json:"table,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinitionTableConfiguration struct {
|
||||
PrimaryKeys []string `json:"primary_keys,omitempty"`
|
||||
SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"`
|
||||
ScdType string `json:"scd_type,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineIngestionDefinition struct {
|
||||
ConnectionName string `json:"connection_name,omitempty"`
|
||||
IngestionGatewayId string `json:"ingestion_gateway_id,omitempty"`
|
||||
Objects []ResourcePipelineIngestionDefinitionObjects `json:"objects,omitempty"`
|
||||
TableConfiguration *ResourcePipelineIngestionDefinitionTableConfiguration `json:"table_configuration,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineLatestUpdates struct {
|
||||
CreationTime string `json:"creation_time,omitempty"`
|
||||
State string `json:"state,omitempty"`
|
||||
UpdateId string `json:"update_id,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineLibraryFile struct {
|
||||
Path string `json:"path"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineLibraryMaven struct {
|
||||
|
@ -138,7 +207,7 @@ type ResourcePipelineLibraryMaven struct {
|
|||
}
|
||||
|
||||
type ResourcePipelineLibraryNotebook struct {
|
||||
Path string `json:"path"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineLibrary struct {
|
||||
|
@ -150,28 +219,53 @@ type ResourcePipelineLibrary struct {
|
|||
}
|
||||
|
||||
type ResourcePipelineNotification struct {
|
||||
Alerts []string `json:"alerts"`
|
||||
EmailRecipients []string `json:"email_recipients"`
|
||||
Alerts []string `json:"alerts,omitempty"`
|
||||
EmailRecipients []string `json:"email_recipients,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineTriggerCron struct {
|
||||
QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"`
|
||||
TimezoneId string `json:"timezone_id,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineTriggerManual struct {
|
||||
}
|
||||
|
||||
type ResourcePipelineTrigger struct {
|
||||
Cron *ResourcePipelineTriggerCron `json:"cron,omitempty"`
|
||||
Manual *ResourcePipelineTriggerManual `json:"manual,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipeline struct {
|
||||
AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"`
|
||||
Catalog string `json:"catalog,omitempty"`
|
||||
Channel string `json:"channel,omitempty"`
|
||||
Configuration map[string]string `json:"configuration,omitempty"`
|
||||
Continuous bool `json:"continuous,omitempty"`
|
||||
Development bool `json:"development,omitempty"`
|
||||
Edition string `json:"edition,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Photon bool `json:"photon,omitempty"`
|
||||
Serverless bool `json:"serverless,omitempty"`
|
||||
Storage string `json:"storage,omitempty"`
|
||||
Target string `json:"target,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Cluster []ResourcePipelineCluster `json:"cluster,omitempty"`
|
||||
Deployment *ResourcePipelineDeployment `json:"deployment,omitempty"`
|
||||
Filters *ResourcePipelineFilters `json:"filters,omitempty"`
|
||||
Library []ResourcePipelineLibrary `json:"library,omitempty"`
|
||||
Notification []ResourcePipelineNotification `json:"notification,omitempty"`
|
||||
AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"`
|
||||
Catalog string `json:"catalog,omitempty"`
|
||||
Cause string `json:"cause,omitempty"`
|
||||
Channel string `json:"channel,omitempty"`
|
||||
ClusterId string `json:"cluster_id,omitempty"`
|
||||
Configuration map[string]string `json:"configuration,omitempty"`
|
||||
Continuous bool `json:"continuous,omitempty"`
|
||||
CreatorUserName string `json:"creator_user_name,omitempty"`
|
||||
Development bool `json:"development,omitempty"`
|
||||
Edition string `json:"edition,omitempty"`
|
||||
ExpectedLastModified int `json:"expected_last_modified,omitempty"`
|
||||
Health string `json:"health,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
LastModified int `json:"last_modified,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Photon bool `json:"photon,omitempty"`
|
||||
RunAsUserName string `json:"run_as_user_name,omitempty"`
|
||||
Serverless bool `json:"serverless,omitempty"`
|
||||
State string `json:"state,omitempty"`
|
||||
Storage string `json:"storage,omitempty"`
|
||||
Target string `json:"target,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Cluster []ResourcePipelineCluster `json:"cluster,omitempty"`
|
||||
Deployment *ResourcePipelineDeployment `json:"deployment,omitempty"`
|
||||
Filters *ResourcePipelineFilters `json:"filters,omitempty"`
|
||||
GatewayDefinition *ResourcePipelineGatewayDefinition `json:"gateway_definition,omitempty"`
|
||||
IngestionDefinition *ResourcePipelineIngestionDefinition `json:"ingestion_definition,omitempty"`
|
||||
LatestUpdates []ResourcePipelineLatestUpdates `json:"latest_updates,omitempty"`
|
||||
Library []ResourcePipelineLibrary `json:"library,omitempty"`
|
||||
Notification []ResourcePipelineNotification `json:"notification,omitempty"`
|
||||
Trigger *ResourcePipelineTrigger `json:"trigger,omitempty"`
|
||||
}
|
||||
|
|
|
@ -20,6 +20,12 @@ type ResourceStorageCredentialAzureServicePrincipal struct {
|
|||
DirectoryId string `json:"directory_id"`
|
||||
}
|
||||
|
||||
type ResourceStorageCredentialCloudflareApiToken struct {
|
||||
AccessKeyId string `json:"access_key_id"`
|
||||
AccountId string `json:"account_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
}
|
||||
|
||||
type ResourceStorageCredentialDatabricksGcpServiceAccount struct {
|
||||
CredentialId string `json:"credential_id,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
|
@ -46,6 +52,7 @@ type ResourceStorageCredential struct {
|
|||
AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"`
|
||||
AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"`
|
||||
AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"`
|
||||
CloudflareApiToken *ResourceStorageCredentialCloudflareApiToken `json:"cloudflare_api_token,omitempty"`
|
||||
DatabricksGcpServiceAccount *ResourceStorageCredentialDatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"`
|
||||
GcpServiceAccountKey *ResourceStorageCredentialGcpServiceAccountKey `json:"gcp_service_account_key,omitempty"`
|
||||
}
|
||||
|
|
|
@ -59,6 +59,7 @@ type Resources struct {
|
|||
MwsVpcEndpoint map[string]any `json:"databricks_mws_vpc_endpoint,omitempty"`
|
||||
MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"`
|
||||
Notebook map[string]any `json:"databricks_notebook,omitempty"`
|
||||
NotificationDestination map[string]any `json:"databricks_notification_destination,omitempty"`
|
||||
OboToken map[string]any `json:"databricks_obo_token,omitempty"`
|
||||
OnlineTable map[string]any `json:"databricks_online_table,omitempty"`
|
||||
PermissionAssignment map[string]any `json:"databricks_permission_assignment,omitempty"`
|
||||
|
@ -160,6 +161,7 @@ func NewResources() *Resources {
|
|||
MwsVpcEndpoint: make(map[string]any),
|
||||
MwsWorkspaces: make(map[string]any),
|
||||
Notebook: make(map[string]any),
|
||||
NotificationDestination: make(map[string]any),
|
||||
OboToken: make(map[string]any),
|
||||
OnlineTable: make(map[string]any),
|
||||
PermissionAssignment: make(map[string]any),
|
||||
|
|
|
@ -21,7 +21,7 @@ type Root struct {
|
|||
|
||||
const ProviderHost = "registry.terraform.io"
|
||||
const ProviderSource = "databricks/databricks"
|
||||
const ProviderVersion = "1.49.1"
|
||||
const ProviderVersion = "1.50.0"
|
||||
|
||||
func NewRoot() *Root {
|
||||
return &Root{
|
||||
|
|
|
@ -0,0 +1,221 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type expand struct {
|
||||
}
|
||||
|
||||
func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic {
|
||||
return diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: message,
|
||||
Paths: []dyn.Path{
|
||||
p.Append(),
|
||||
},
|
||||
Locations: l,
|
||||
}
|
||||
}
|
||||
|
||||
func getLibDetails(v dyn.Value) (string, string, bool) {
|
||||
m := v.MustMap()
|
||||
whl, ok := m.GetByString("whl")
|
||||
if ok {
|
||||
return whl.MustString(), "whl", true
|
||||
}
|
||||
|
||||
jar, ok := m.GetByString("jar")
|
||||
if ok {
|
||||
return jar.MustString(), "jar", true
|
||||
}
|
||||
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
func findMatches(b *bundle.Bundle, path string) ([]string, error) {
|
||||
matches, err := filepath.Glob(filepath.Join(b.RootPath, path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
if isGlobPattern(path) {
|
||||
return nil, fmt.Errorf("no files match pattern: %s", path)
|
||||
} else {
|
||||
return nil, fmt.Errorf("file doesn't exist %s", path)
|
||||
}
|
||||
}
|
||||
|
||||
// We make the matched path relative to the root path before storing it
|
||||
// to allow upload mutator to distinguish between local and remote paths
|
||||
for i, match := range matches {
|
||||
matches[i], err = filepath.Rel(b.RootPath, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// Checks if the path is a glob pattern
|
||||
// It can contain *, [] or ? characters
|
||||
func isGlobPattern(path string) bool {
|
||||
return strings.ContainsAny(path, "*?[")
|
||||
}
|
||||
|
||||
func expandLibraries(b *bundle.Bundle, p dyn.Path, v dyn.Value) (diag.Diagnostics, []dyn.Value) {
|
||||
var output []dyn.Value
|
||||
var diags diag.Diagnostics
|
||||
|
||||
libs := v.MustSequence()
|
||||
for i, lib := range libs {
|
||||
lp := p.Append(dyn.Index(i))
|
||||
path, libType, supported := getLibDetails(lib)
|
||||
if !supported || !IsLibraryLocal(path) {
|
||||
output = append(output, lib)
|
||||
continue
|
||||
}
|
||||
|
||||
lp = lp.Append(dyn.Key(libType))
|
||||
|
||||
matches, err := findMatches(b, path)
|
||||
if err != nil {
|
||||
diags = diags.Append(matchError(lp, lib.Locations(), err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
output = append(output, dyn.NewValue(map[string]dyn.Value{
|
||||
libType: dyn.V(match),
|
||||
}, lib.Locations()))
|
||||
}
|
||||
}
|
||||
|
||||
return diags, output
|
||||
}
|
||||
|
||||
func expandEnvironmentDeps(b *bundle.Bundle, p dyn.Path, v dyn.Value) (diag.Diagnostics, []dyn.Value) {
|
||||
var output []dyn.Value
|
||||
var diags diag.Diagnostics
|
||||
|
||||
deps := v.MustSequence()
|
||||
for i, dep := range deps {
|
||||
lp := p.Append(dyn.Index(i))
|
||||
path := dep.MustString()
|
||||
if !IsLibraryLocal(path) {
|
||||
output = append(output, dep)
|
||||
continue
|
||||
}
|
||||
|
||||
matches, err := findMatches(b, path)
|
||||
if err != nil {
|
||||
diags = diags.Append(matchError(lp, dep.Locations(), err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
output = append(output, dyn.NewValue(match, dep.Locations()))
|
||||
}
|
||||
}
|
||||
|
||||
return diags, output
|
||||
}
|
||||
|
||||
type expandPattern struct {
|
||||
pattern dyn.Pattern
|
||||
fn func(b *bundle.Bundle, p dyn.Path, v dyn.Value) (diag.Diagnostics, []dyn.Value)
|
||||
}
|
||||
|
||||
var taskLibrariesPattern = dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("tasks"),
|
||||
dyn.AnyIndex(),
|
||||
dyn.Key("libraries"),
|
||||
)
|
||||
|
||||
var forEachTaskLibrariesPattern = dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("tasks"),
|
||||
dyn.AnyIndex(),
|
||||
dyn.Key("for_each_task"),
|
||||
dyn.Key("task"),
|
||||
dyn.Key("libraries"),
|
||||
)
|
||||
|
||||
var envDepsPattern = dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("environments"),
|
||||
dyn.AnyIndex(),
|
||||
dyn.Key("spec"),
|
||||
dyn.Key("dependencies"),
|
||||
)
|
||||
|
||||
func (e *expand) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
expanders := []expandPattern{
|
||||
{
|
||||
pattern: taskLibrariesPattern,
|
||||
fn: expandLibraries,
|
||||
},
|
||||
{
|
||||
pattern: forEachTaskLibrariesPattern,
|
||||
fn: expandLibraries,
|
||||
},
|
||||
{
|
||||
pattern: envDepsPattern,
|
||||
fn: expandEnvironmentDeps,
|
||||
},
|
||||
}
|
||||
|
||||
var diags diag.Diagnostics
|
||||
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
for _, expander := range expanders {
|
||||
v, err = dyn.MapByPattern(v, expander.pattern, func(p dyn.Path, lv dyn.Value) (dyn.Value, error) {
|
||||
d, output := expander.fn(b, p, lv)
|
||||
diags = diags.Extend(d)
|
||||
return dyn.V(output), nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
}
|
||||
|
||||
return v, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func (e *expand) Name() string {
|
||||
return "libraries.ExpandGlobReferences"
|
||||
}
|
||||
|
||||
// ExpandGlobReferences expands any glob references in the libraries or environments section
|
||||
// to corresponding local paths.
|
||||
// We only expand local paths (i.e. paths that are relative to the root path).
|
||||
// After expanding we make the paths relative to the root path to allow upload mutator later in the chain to
|
||||
// distinguish between local and remote paths.
|
||||
func ExpandGlobReferences() bundle.Mutator {
|
||||
return &expand{}
|
||||
}
|
|
@ -0,0 +1,239 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGlobReferencesExpandedForTaskLibraries(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
testutil.Touch(t, dir, "whl", "my1.whl")
|
||||
testutil.Touch(t, dir, "whl", "my2.whl")
|
||||
testutil.Touch(t, dir, "jar", "my1.jar")
|
||||
testutil.Touch(t, dir, "jar", "my2.jar")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
TaskKey: "task",
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: "whl/*.whl",
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/path/to/whl/my.whl",
|
||||
},
|
||||
{
|
||||
Jar: "./jar/*.jar",
|
||||
},
|
||||
{
|
||||
Egg: "egg/*.egg",
|
||||
},
|
||||
{
|
||||
Jar: "/Workspace/path/to/jar/*.jar",
|
||||
},
|
||||
{
|
||||
Whl: "/some/full/path/to/whl/*.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Empty(t, diags)
|
||||
|
||||
job := b.Config.Resources.Jobs["job"]
|
||||
task := job.JobSettings.Tasks[0]
|
||||
require.Equal(t, []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "my1.whl"),
|
||||
},
|
||||
{
|
||||
Whl: filepath.Join("whl", "my2.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/path/to/whl/my.whl",
|
||||
},
|
||||
{
|
||||
Jar: filepath.Join("jar", "my1.jar"),
|
||||
},
|
||||
{
|
||||
Jar: filepath.Join("jar", "my2.jar"),
|
||||
},
|
||||
{
|
||||
Egg: "egg/*.egg",
|
||||
},
|
||||
{
|
||||
Jar: "/Workspace/path/to/jar/*.jar",
|
||||
},
|
||||
{
|
||||
Whl: "/some/full/path/to/whl/*.whl",
|
||||
},
|
||||
}, task.Libraries)
|
||||
}
|
||||
|
||||
func TestGlobReferencesExpandedForForeachTaskLibraries(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
testutil.Touch(t, dir, "whl", "my1.whl")
|
||||
testutil.Touch(t, dir, "whl", "my2.whl")
|
||||
testutil.Touch(t, dir, "jar", "my1.jar")
|
||||
testutil.Touch(t, dir, "jar", "my2.jar")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
TaskKey: "task",
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: "whl/*.whl",
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/path/to/whl/my.whl",
|
||||
},
|
||||
{
|
||||
Jar: "./jar/*.jar",
|
||||
},
|
||||
{
|
||||
Egg: "egg/*.egg",
|
||||
},
|
||||
{
|
||||
Jar: "/Workspace/path/to/jar/*.jar",
|
||||
},
|
||||
{
|
||||
Whl: "/some/full/path/to/whl/*.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Empty(t, diags)
|
||||
|
||||
job := b.Config.Resources.Jobs["job"]
|
||||
task := job.JobSettings.Tasks[0].ForEachTask.Task
|
||||
require.Equal(t, []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "my1.whl"),
|
||||
},
|
||||
{
|
||||
Whl: filepath.Join("whl", "my2.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/path/to/whl/my.whl",
|
||||
},
|
||||
{
|
||||
Jar: filepath.Join("jar", "my1.jar"),
|
||||
},
|
||||
{
|
||||
Jar: filepath.Join("jar", "my2.jar"),
|
||||
},
|
||||
{
|
||||
Egg: "egg/*.egg",
|
||||
},
|
||||
{
|
||||
Jar: "/Workspace/path/to/jar/*.jar",
|
||||
},
|
||||
{
|
||||
Whl: "/some/full/path/to/whl/*.whl",
|
||||
},
|
||||
}, task.Libraries)
|
||||
}
|
||||
|
||||
func TestGlobReferencesExpandedForEnvironmentsDeps(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
testutil.Touch(t, dir, "whl", "my1.whl")
|
||||
testutil.Touch(t, dir, "whl", "my2.whl")
|
||||
testutil.Touch(t, dir, "jar", "my1.jar")
|
||||
testutil.Touch(t, dir, "jar", "my2.jar")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
TaskKey: "task",
|
||||
EnvironmentKey: "env",
|
||||
},
|
||||
},
|
||||
Environments: []jobs.JobEnvironment{
|
||||
{
|
||||
EnvironmentKey: "env",
|
||||
Spec: &compute.Environment{
|
||||
Dependencies: []string{
|
||||
"./whl/*.whl",
|
||||
"/Workspace/path/to/whl/my.whl",
|
||||
"./jar/*.jar",
|
||||
"/some/local/path/to/whl/*.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Empty(t, diags)
|
||||
|
||||
job := b.Config.Resources.Jobs["job"]
|
||||
env := job.JobSettings.Environments[0]
|
||||
require.Equal(t, []string{
|
||||
filepath.Join("whl", "my1.whl"),
|
||||
filepath.Join("whl", "my2.whl"),
|
||||
"/Workspace/path/to/whl/my.whl",
|
||||
filepath.Join("jar", "my1.jar"),
|
||||
filepath.Join("jar", "my2.jar"),
|
||||
"/some/local/path/to/whl/*.whl",
|
||||
}, env.Spec.Dependencies)
|
||||
}
|
|
@ -1,16 +1,24 @@
|
|||
package libraries
|
||||
|
||||
import "github.com/databricks/databricks-sdk-go/service/compute"
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
func libraryPath(library *compute.Library) string {
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
)
|
||||
|
||||
func libraryPath(library *compute.Library) (string, error) {
|
||||
if library.Whl != "" {
|
||||
return library.Whl
|
||||
return library.Whl, nil
|
||||
}
|
||||
if library.Jar != "" {
|
||||
return library.Jar
|
||||
return library.Jar, nil
|
||||
}
|
||||
if library.Egg != "" {
|
||||
return library.Egg
|
||||
return library.Egg, nil
|
||||
}
|
||||
return ""
|
||||
if library.Requirements != "" {
|
||||
return library.Requirements, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("not supported library type")
|
||||
}
|
||||
|
|
|
@ -10,8 +10,27 @@ import (
|
|||
func TestLibraryPath(t *testing.T) {
|
||||
path := "/some/path"
|
||||
|
||||
assert.Equal(t, path, libraryPath(&compute.Library{Whl: path}))
|
||||
assert.Equal(t, path, libraryPath(&compute.Library{Jar: path}))
|
||||
assert.Equal(t, path, libraryPath(&compute.Library{Egg: path}))
|
||||
assert.Equal(t, "", libraryPath(&compute.Library{}))
|
||||
p, err := libraryPath(&compute.Library{Whl: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Jar: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Egg: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Requirements: path})
|
||||
assert.Equal(t, path, p)
|
||||
assert.Nil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{})
|
||||
assert.Equal(t, "", p)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
p, err = libraryPath(&compute.Library{Pypi: &compute.PythonPyPiLibrary{Package: "pypipackage"}})
|
||||
assert.Equal(t, "", p)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ func isEnvsWithLocalLibraries(envs []jobs.JobEnvironment) bool {
|
|||
}
|
||||
|
||||
for _, l := range e.Spec.Dependencies {
|
||||
if IsEnvironmentDependencyLocal(l) {
|
||||
if IsLibraryLocal(l) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -67,7 +67,12 @@ func FindTasksWithLocalLibraries(b *bundle.Bundle) []jobs.Task {
|
|||
|
||||
func isTaskWithLocalLibraries(task jobs.Task) bool {
|
||||
for _, l := range task.Libraries {
|
||||
if IsLocalLibrary(&l) {
|
||||
p, err := libraryPath(&l)
|
||||
// If there's an error, skip the library because it's not of supported type
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if IsLibraryLocal(p) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,9 +3,8 @@ package libraries
|
|||
import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
)
|
||||
|
||||
// IsLocalPath returns true if the specified path indicates that
|
||||
|
@ -38,12 +37,16 @@ func IsLocalPath(p string) bool {
|
|||
return !path.IsAbs(p)
|
||||
}
|
||||
|
||||
// IsEnvironmentDependencyLocal returns true if the specified dependency
|
||||
// IsLibraryLocal returns true if the specified library or environment dependency
|
||||
// should be interpreted as a local path.
|
||||
// We use this to check if the dependency in environment spec is local.
|
||||
// We use this to check if the dependency in environment spec is local or that library is local.
|
||||
// We can't use IsLocalPath beacuse environment dependencies can be
|
||||
// a pypi package name which can be misinterpreted as a local path by IsLocalPath.
|
||||
func IsEnvironmentDependencyLocal(dep string) bool {
|
||||
func IsLibraryLocal(dep string) bool {
|
||||
if dep == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
possiblePrefixes := []string{
|
||||
".",
|
||||
}
|
||||
|
@ -54,7 +57,40 @@ func IsEnvironmentDependencyLocal(dep string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
return false
|
||||
// If the dependency is a requirements file, it's not a valid local path
|
||||
if strings.HasPrefix(dep, "-r") {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the dependency has no extension, it's a PyPi package name
|
||||
if isPackage(dep) {
|
||||
return false
|
||||
}
|
||||
|
||||
return IsLocalPath(dep)
|
||||
}
|
||||
|
||||
// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_).
|
||||
// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security].
|
||||
// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?)?: Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1).
|
||||
// Spec for package name and version specifier: https://pip.pypa.io/en/stable/reference/requirement-specifiers/
|
||||
var packageRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_]+\s?(\[.*\])?\s?((==|!=|<=|>=|~=|==|>|<)\s?\d+(\.\d+){0,2}(\.\*)?)?$`)
|
||||
|
||||
func isPackage(name string) bool {
|
||||
if packageRegex.MatchString(name) {
|
||||
return true
|
||||
}
|
||||
|
||||
return isUrlBasedLookup(name)
|
||||
}
|
||||
|
||||
func isUrlBasedLookup(name string) bool {
|
||||
parts := strings.Split(name, " @ ")
|
||||
if len(parts) != 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
return packageRegex.MatchString(parts[0]) && isRemoteStorageScheme(parts[1])
|
||||
}
|
||||
|
||||
func isRemoteStorageScheme(path string) bool {
|
||||
|
@ -67,16 +103,6 @@ func isRemoteStorageScheme(path string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// If the path starts with scheme:/ format, it's a correct remote storage scheme
|
||||
return strings.HasPrefix(path, url.Scheme+":/")
|
||||
}
|
||||
|
||||
// IsLocalLibrary returns true if the specified library refers to a local path.
|
||||
func IsLocalLibrary(library *compute.Library) bool {
|
||||
path := libraryPath(library)
|
||||
if path == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
return IsLocalPath(path)
|
||||
// If the path starts with scheme:/ format (not file), it's a correct remote storage scheme
|
||||
return strings.HasPrefix(path, url.Scheme+":/") && url.Scheme != "file"
|
||||
}
|
||||
|
|
|
@ -3,13 +3,13 @@ package libraries
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIsLocalPath(t *testing.T) {
|
||||
// Relative paths, paths with the file scheme, and Windows paths.
|
||||
assert.True(t, IsLocalPath("some/local/path"))
|
||||
assert.True(t, IsLocalPath("./some/local/path"))
|
||||
assert.True(t, IsLocalPath("file://path/to/package"))
|
||||
assert.True(t, IsLocalPath("C:\\path\\to\\package"))
|
||||
|
@ -30,24 +30,13 @@ func TestIsLocalPath(t *testing.T) {
|
|||
assert.False(t, IsLocalPath("abfss://path/to/package"))
|
||||
}
|
||||
|
||||
func TestIsLocalLibrary(t *testing.T) {
|
||||
// Local paths.
|
||||
assert.True(t, IsLocalLibrary(&compute.Library{Whl: "./file.whl"}))
|
||||
assert.True(t, IsLocalLibrary(&compute.Library{Jar: "../target/some.jar"}))
|
||||
|
||||
// Non-local paths.
|
||||
assert.False(t, IsLocalLibrary(&compute.Library{Whl: "/Workspace/path/to/file.whl"}))
|
||||
assert.False(t, IsLocalLibrary(&compute.Library{Jar: "s3:/bucket/path/some.jar"}))
|
||||
|
||||
// Empty.
|
||||
assert.False(t, IsLocalLibrary(&compute.Library{}))
|
||||
}
|
||||
|
||||
func TestIsEnvironmentDependencyLocal(t *testing.T) {
|
||||
func TestIsLibraryLocal(t *testing.T) {
|
||||
testCases := [](struct {
|
||||
path string
|
||||
expected bool
|
||||
}){
|
||||
{path: "local/*.whl", expected: true},
|
||||
{path: "local/test.whl", expected: true},
|
||||
{path: "./local/*.whl", expected: true},
|
||||
{path: ".\\local\\*.whl", expected: true},
|
||||
{path: "./local/mypath.whl", expected: true},
|
||||
|
@ -58,15 +47,27 @@ func TestIsEnvironmentDependencyLocal(t *testing.T) {
|
|||
{path: ".\\..\\local\\*.whl", expected: true},
|
||||
{path: "../../local/*.whl", expected: true},
|
||||
{path: "..\\..\\local\\*.whl", expected: true},
|
||||
{path: "file://path/to/package/whl.whl", expected: true},
|
||||
{path: "", expected: false},
|
||||
{path: "pypipackage", expected: false},
|
||||
{path: "pypipackage/test.whl", expected: false},
|
||||
{path: "pypipackage/*.whl", expected: false},
|
||||
{path: "/Volumes/catalog/schema/volume/path.whl", expected: false},
|
||||
{path: "/Workspace/my_project/dist.whl", expected: false},
|
||||
{path: "-r /Workspace/my_project/requirements.txt", expected: false},
|
||||
{path: "s3://mybucket/path/to/package", expected: false},
|
||||
{path: "dbfs:/mnt/path/to/package", expected: false},
|
||||
{path: "beautifulsoup4", expected: false},
|
||||
{path: "beautifulsoup4==4.12.3", expected: false},
|
||||
{path: "beautifulsoup4 >= 4.12.3", expected: false},
|
||||
{path: "beautifulsoup4 < 4.12.3", expected: false},
|
||||
{path: "beautifulsoup4 ~= 4.12.3", expected: false},
|
||||
{path: "beautifulsoup4[security, tests]", expected: false},
|
||||
{path: "beautifulsoup4[security, tests] ~= 4.12.3", expected: false},
|
||||
{path: "https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||
{path: "pip @ https://github.com/pypa/pip/archive/22.0.2.zip", expected: false},
|
||||
{path: "requests [security] @ https://github.com/psf/requests/archive/refs/heads/main.zip", expected: false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
require.Equal(t, IsEnvironmentDependencyLocal(tc.path), tc.expected)
|
||||
for i, tc := range testCases {
|
||||
require.Equalf(t, tc.expected, IsLibraryLocal(tc.path), "failed case: %d, path: %s", i, tc.path)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
)
|
||||
|
||||
type match struct {
|
||||
}
|
||||
|
||||
func ValidateLocalLibrariesExist() bundle.Mutator {
|
||||
return &match{}
|
||||
}
|
||||
|
||||
func (a *match) Name() string {
|
||||
return "libraries.ValidateLocalLibrariesExist"
|
||||
}
|
||||
|
||||
func (a *match) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
for _, job := range b.Config.Resources.Jobs {
|
||||
err := validateEnvironments(job.Environments, b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
for _, task := range job.JobSettings.Tasks {
|
||||
err := validateTaskLibraries(task.Libraries, b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateTaskLibraries(libs []compute.Library, b *bundle.Bundle) error {
|
||||
for _, lib := range libs {
|
||||
path := libraryPath(&lib)
|
||||
if path == "" || !IsLocalPath(path) {
|
||||
continue
|
||||
}
|
||||
|
||||
matches, err := filepath.Glob(filepath.Join(b.RootPath, path))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libraryPath(&lib))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateEnvironments(envs []jobs.JobEnvironment, b *bundle.Bundle) error {
|
||||
for _, env := range envs {
|
||||
if env.Spec == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, dep := range env.Spec.Dependencies {
|
||||
matches, err := filepath.Glob(filepath.Join(b.RootPath, dep))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(matches) == 0 && IsEnvironmentDependencyLocal(dep) {
|
||||
return fmt.Errorf("file %s is referenced in environments section but doesn't exist on the local file system", dep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -42,7 +42,7 @@ func TestValidateEnvironments(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist())
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Nil(t, diags)
|
||||
}
|
||||
|
||||
|
@ -74,9 +74,9 @@ func TestValidateEnvironmentsNoFile(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist())
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "file ./wheel.whl is referenced in environments section but doesn't exist on the local file system", diags[0].Summary)
|
||||
require.Equal(t, "file doesn't exist ./wheel.whl", diags[0].Summary)
|
||||
}
|
||||
|
||||
func TestValidateTaskLibraries(t *testing.T) {
|
||||
|
@ -109,7 +109,7 @@ func TestValidateTaskLibraries(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist())
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Nil(t, diags)
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ func TestValidateTaskLibrariesNoFile(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist())
|
||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||
require.Len(t, diags, 1)
|
||||
require.Equal(t, "file ./wheel.whl is referenced in libraries section but doesn't exist on the local file system", diags[0].Summary)
|
||||
require.Equal(t, "file doesn't exist ./wheel.whl", diags[0].Summary)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,238 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// The Files API backend has a rate limit of 10 concurrent
|
||||
// requests and 100 QPS. We limit the number of concurrent requests to 5 to
|
||||
// avoid hitting the rate limit.
|
||||
var maxFilesRequestsInFlight = 5
|
||||
|
||||
func Upload() bundle.Mutator {
|
||||
return &upload{}
|
||||
}
|
||||
|
||||
func UploadWithClient(client filer.Filer) bundle.Mutator {
|
||||
return &upload{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
type upload struct {
|
||||
client filer.Filer
|
||||
}
|
||||
|
||||
type configLocation struct {
|
||||
configPath dyn.Path
|
||||
location dyn.Location
|
||||
}
|
||||
|
||||
// Collect all libraries from the bundle configuration and their config paths.
|
||||
// By this stage all glob references are expanded and we have a list of all libraries that need to be uploaded.
|
||||
// We collect them from task libraries, foreach task libraries, environment dependencies, and artifacts.
|
||||
// We return a map of library source to a list of config paths and locations where the library is used.
|
||||
// We use map so we don't upload the same library multiple times.
|
||||
// Instead we upload it once and update all the config paths to point to the uploaded location.
|
||||
func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error) {
|
||||
libs := make(map[string]([]configLocation))
|
||||
|
||||
patterns := []dyn.Pattern{
|
||||
taskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("whl")),
|
||||
taskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("jar")),
|
||||
forEachTaskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("whl")),
|
||||
forEachTaskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("jar")),
|
||||
envDepsPattern.Append(dyn.AnyIndex()),
|
||||
}
|
||||
|
||||
for _, pattern := range patterns {
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
source, ok := v.AsString()
|
||||
if !ok {
|
||||
return v, fmt.Errorf("expected string, got %s", v.Kind())
|
||||
}
|
||||
|
||||
if !IsLibraryLocal(source) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
source = filepath.Join(b.RootPath, source)
|
||||
libs[source] = append(libs[source], configLocation{
|
||||
configPath: p.Append(), // Hack to get the copy of path
|
||||
location: v.Location(),
|
||||
})
|
||||
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
artifactPattern := dyn.NewPattern(
|
||||
dyn.Key("artifacts"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("files"),
|
||||
dyn.AnyIndex(),
|
||||
)
|
||||
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.MapByPattern(v, artifactPattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
file, ok := v.AsMap()
|
||||
if !ok {
|
||||
return v, fmt.Errorf("expected map, got %s", v.Kind())
|
||||
}
|
||||
|
||||
sv, ok := file.GetByString("source")
|
||||
if !ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
source, ok := sv.AsString()
|
||||
if !ok {
|
||||
return v, fmt.Errorf("expected string, got %s", v.Kind())
|
||||
}
|
||||
|
||||
libs[source] = append(libs[source], configLocation{
|
||||
configPath: p.Append(dyn.Key("remote_path")),
|
||||
location: v.Location(),
|
||||
})
|
||||
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return libs, nil
|
||||
}
|
||||
|
||||
func (u *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
uploadPath, err := GetUploadBasePath(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// If the client is not initialized, initialize it
|
||||
// We use client field in mutator to allow for mocking client in testing
|
||||
if u.client == nil {
|
||||
filer, err := GetFilerForLibraries(b.WorkspaceClient(), uploadPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
u.client = filer
|
||||
}
|
||||
|
||||
var diags diag.Diagnostics
|
||||
|
||||
libs, err := collectLocalLibraries(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
errs, errCtx := errgroup.WithContext(ctx)
|
||||
errs.SetLimit(maxFilesRequestsInFlight)
|
||||
|
||||
for source := range libs {
|
||||
errs.Go(func() error {
|
||||
return UploadFile(errCtx, source, u.client)
|
||||
})
|
||||
}
|
||||
|
||||
if err := errs.Wait(); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// Update all the config paths to point to the uploaded location
|
||||
for source, locations := range libs {
|
||||
err = b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
remotePath := path.Join(uploadPath, filepath.Base(source))
|
||||
|
||||
// If the remote path does not start with /Workspace or /Volumes, prepend /Workspace
|
||||
if !strings.HasPrefix(remotePath, "/Workspace") && !strings.HasPrefix(remotePath, "/Volumes") {
|
||||
remotePath = "/Workspace" + remotePath
|
||||
}
|
||||
for _, location := range locations {
|
||||
v, err = dyn.SetByPath(v, location.configPath, dyn.NewValue(remotePath, []dyn.Location{location.location}))
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
}
|
||||
|
||||
return v, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func (u *upload) Name() string {
|
||||
return "libraries.Upload"
|
||||
}
|
||||
|
||||
func GetFilerForLibraries(w *databricks.WorkspaceClient, uploadPath string) (filer.Filer, error) {
|
||||
if isVolumesPath(uploadPath) {
|
||||
return filer.NewFilesClient(w, uploadPath)
|
||||
}
|
||||
return filer.NewWorkspaceFilesClient(w, uploadPath)
|
||||
}
|
||||
|
||||
func isVolumesPath(path string) bool {
|
||||
return strings.HasPrefix(path, "/Volumes/")
|
||||
}
|
||||
|
||||
// Function to upload file (a library, artifact and etc) to Workspace or UC volume
|
||||
func UploadFile(ctx context.Context, file string, client filer.Filer) error {
|
||||
filename := filepath.Base(file)
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename))
|
||||
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open %s: %w", file, errors.Unwrap(err))
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
err = client.Write(ctx, filename, f, filer.OverwriteIfExists, filer.CreateParentDirectories)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to import %s: %w", filename, err)
|
||||
}
|
||||
|
||||
log.Infof(ctx, "Upload succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetUploadBasePath(b *bundle.Bundle) (string, error) {
|
||||
artifactPath := b.Config.Workspace.ArtifactPath
|
||||
if artifactPath == "" {
|
||||
return "", fmt.Errorf("remote artifact path not configured")
|
||||
}
|
||||
|
||||
return path.Join(artifactPath, ".internal"), nil
|
||||
}
|
|
@ -0,0 +1,331 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
mockfiler "github.com/databricks/cli/internal/mocks/libs/filer"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestArtifactUploadForWorkspace(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
whlFolder := filepath.Join(tmpDir, "whl")
|
||||
testutil.Touch(t, whlFolder, "source.whl")
|
||||
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/foo/bar/artifacts",
|
||||
},
|
||||
Artifacts: config.Artifacts{
|
||||
"whl": {
|
||||
Type: config.ArtifactPythonWheel,
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: whlLocalPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Environments: []jobs.JobEnvironment{
|
||||
{
|
||||
Spec: &compute.Environment{
|
||||
Dependencies: []string{
|
||||
filepath.Join("whl", "source.whl"),
|
||||
"/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mockFiler := mockfiler.NewMockFiler(t)
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source.whl"),
|
||||
mock.AnythingOfType("*os.File"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil)
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler)))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Test that libraries path is updated
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl)
|
||||
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl)
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0])
|
||||
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl)
|
||||
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl)
|
||||
}
|
||||
|
||||
func TestArtifactUploadForVolumes(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
whlFolder := filepath.Join(tmpDir, "whl")
|
||||
testutil.Touch(t, whlFolder, "source.whl")
|
||||
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/foo/bar/artifacts",
|
||||
},
|
||||
Artifacts: config.Artifacts{
|
||||
"whl": {
|
||||
Type: config.ArtifactPythonWheel,
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: whlLocalPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Volumes/some/path/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Volumes/some/path/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Environments: []jobs.JobEnvironment{
|
||||
{
|
||||
Spec: &compute.Environment{
|
||||
Dependencies: []string{
|
||||
filepath.Join("whl", "source.whl"),
|
||||
"/Volumes/some/path/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mockFiler := mockfiler.NewMockFiler(t)
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source.whl"),
|
||||
mock.AnythingOfType("*os.File"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil)
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler)))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Test that libraries path is updated
|
||||
require.Equal(t, "/Volumes/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl)
|
||||
require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl)
|
||||
require.Equal(t, "/Volumes/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0])
|
||||
require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
|
||||
require.Equal(t, "/Volumes/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl)
|
||||
require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl)
|
||||
}
|
||||
|
||||
func TestArtifactUploadWithNoLibraryReference(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
whlFolder := filepath.Join(tmpDir, "whl")
|
||||
testutil.Touch(t, whlFolder, "source.whl")
|
||||
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Workspace/foo/bar/artifacts",
|
||||
},
|
||||
Artifacts: config.Artifacts{
|
||||
"whl": {
|
||||
Type: config.ArtifactPythonWheel,
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: whlLocalPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mockFiler := mockfiler.NewMockFiler(t)
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source.whl"),
|
||||
mock.AnythingOfType("*os.File"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil)
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler)))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
require.Equal(t, "/Workspace/foo/bar/artifacts/.internal/source.whl", b.Config.Artifacts["whl"].Files[0].RemotePath)
|
||||
}
|
||||
|
||||
func TestUploadMultipleLibraries(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
whlFolder := filepath.Join(tmpDir, "whl")
|
||||
testutil.Touch(t, whlFolder, "source1.whl")
|
||||
testutil.Touch(t, whlFolder, "source2.whl")
|
||||
testutil.Touch(t, whlFolder, "source3.whl")
|
||||
testutil.Touch(t, whlFolder, "source4.whl")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/foo/bar/artifacts",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Whl: filepath.Join("whl", "*.whl"),
|
||||
},
|
||||
{
|
||||
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Environments: []jobs.JobEnvironment{
|
||||
{
|
||||
Spec: &compute.Environment{
|
||||
Dependencies: []string{
|
||||
filepath.Join("whl", "*.whl"),
|
||||
"/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mockFiler := mockfiler.NewMockFiler(t)
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source1.whl"),
|
||||
mock.AnythingOfType("*os.File"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil).Once()
|
||||
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source2.whl"),
|
||||
mock.AnythingOfType("*os.File"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil).Once()
|
||||
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source3.whl"),
|
||||
mock.AnythingOfType("*os.File"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil).Once()
|
||||
|
||||
mockFiler.EXPECT().Write(
|
||||
mock.Anything,
|
||||
filepath.Join("source4.whl"),
|
||||
mock.AnythingOfType("*os.File"),
|
||||
filer.OverwriteIfExists,
|
||||
filer.CreateParentDirectories,
|
||||
).Return(nil).Once()
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler)))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
// Test that libraries path is updated
|
||||
require.Len(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, 5)
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/foo/bar/artifacts/.internal/source1.whl"})
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/foo/bar/artifacts/.internal/source2.whl"})
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/foo/bar/artifacts/.internal/source3.whl"})
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/foo/bar/artifacts/.internal/source4.whl"})
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/Users/foo@bar.com/mywheel.whl"})
|
||||
|
||||
require.Len(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, 5)
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/foo/bar/artifacts/.internal/source1.whl")
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/foo/bar/artifacts/.internal/source2.whl")
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/foo/bar/artifacts/.internal/source3.whl")
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/foo/bar/artifacts/.internal/source4.whl")
|
||||
require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/Users/foo@bar.com/mywheel.whl")
|
||||
}
|
|
@ -29,8 +29,8 @@ func IsWorkspacePath(path string) bool {
|
|||
|
||||
// IsWorkspaceLibrary returns true if the specified library refers to a workspace path.
|
||||
func IsWorkspaceLibrary(library *compute.Library) bool {
|
||||
path := libraryPath(library)
|
||||
if path == "" {
|
||||
path, err := libraryPath(library)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -148,9 +148,9 @@ func Deploy() bundle.Mutator {
|
|||
terraform.StatePull(),
|
||||
deploy.StatePull(),
|
||||
mutator.ValidateGitDetails(),
|
||||
libraries.ValidateLocalLibrariesExist(),
|
||||
artifacts.CleanUp(),
|
||||
artifacts.UploadAll(),
|
||||
libraries.ExpandGlobReferences(),
|
||||
libraries.Upload(),
|
||||
python.TransformWheelTask(),
|
||||
files.Upload(),
|
||||
deploy.StateUpdate(),
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
pythonmutator "github.com/databricks/cli/bundle/config/mutator/python"
|
||||
"github.com/databricks/cli/bundle/config/validate"
|
||||
"github.com/databricks/cli/bundle/deploy/metadata"
|
||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
|
@ -19,7 +20,19 @@ func Initialize() bundle.Mutator {
|
|||
return newPhase(
|
||||
"initialize",
|
||||
[]bundle.Mutator{
|
||||
validate.AllResourcesHaveValues(),
|
||||
|
||||
// Update all path fields in the sync block to be relative to the bundle root path.
|
||||
mutator.RewriteSyncPaths(),
|
||||
|
||||
// Configure the default sync path to equal the bundle root if not explicitly configured.
|
||||
// By default, this means all files in the bundle root directory are synchronized.
|
||||
mutator.SyncDefaultPath(),
|
||||
|
||||
// Figure out if the sync root path is identical or an ancestor of the bundle root path.
|
||||
// If it is an ancestor, this updates all paths to be relative to the sync root path.
|
||||
mutator.SyncInferRoot(),
|
||||
|
||||
mutator.MergeJobClusters(),
|
||||
mutator.MergeJobParameters(),
|
||||
mutator.MergeJobTasks(),
|
||||
|
@ -45,6 +58,7 @@ func Initialize() bundle.Mutator {
|
|||
mutator.SetRunAs(),
|
||||
mutator.OverrideCompute(),
|
||||
mutator.ProcessTargetMode(),
|
||||
mutator.ApplyPresets(),
|
||||
mutator.DefaultQueueing(),
|
||||
mutator.ExpandPipelineGlobPaths(),
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package python
|
|||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
@ -18,11 +17,15 @@ func TestNoTransformByDefault(t *testing.T) {
|
|||
tmpDir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "development",
|
||||
},
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/Workspace/files",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {
|
||||
|
@ -63,11 +66,15 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) {
|
|||
tmpDir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
RootPath: filepath.Join(tmpDir, "parent", "my_bundle"),
|
||||
SyncRootPath: filepath.Join(tmpDir, "parent"),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "development",
|
||||
},
|
||||
Workspace: config.Workspace{
|
||||
FilePath: "/Workspace/files",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {
|
||||
|
@ -102,14 +109,7 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) {
|
|||
task := b.Config.Resources.Jobs["job1"].Tasks[0]
|
||||
require.Nil(t, task.PythonWheelTask)
|
||||
require.NotNil(t, task.NotebookTask)
|
||||
|
||||
dir, err := b.InternalDir(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
internalDirRel, err := filepath.Rel(b.RootPath, dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, path.Join(filepath.ToSlash(internalDirRel), "notebook_job1_key1"), task.NotebookTask.NotebookPath)
|
||||
require.Equal(t, "/Workspace/files/my_bundle/.databricks/bundle/development/.internal/notebook_job1_key1", task.NotebookTask.NotebookPath)
|
||||
|
||||
require.Len(t, task.Libraries, 1)
|
||||
require.Equal(t, "/Workspace/Users/test@test.com/bundle/dist/test.jar", task.Libraries[0].Jar)
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
|
@ -124,9 +123,6 @@ func TestNoPanicWithNoPythonWheelTasks(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"test": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: tmpDir,
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
|
|
|
@ -223,6 +223,17 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
|
|||
{Whl: "./dist/test.whl"},
|
||||
},
|
||||
},
|
||||
{
|
||||
TaskKey: "key7",
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
ExistingClusterId: "test-key-2",
|
||||
Libraries: []compute.Library{
|
||||
{Whl: "signol_lib-0.4.4-20240822+prod-py3-none-any.whl"},
|
||||
{Pypi: &compute.PythonPyPiLibrary{
|
||||
Package: "requests==2.25.1",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -241,6 +252,46 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
|
|||
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
||||
}
|
||||
|
||||
func TestTasksWithPyPiPackageAreCompatible(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
JobClusters: []jobs.JobCluster{
|
||||
{
|
||||
JobClusterKey: "cluster1",
|
||||
NewCluster: compute.ClusterSpec{
|
||||
SparkVersion: "12.2.x-scala2.12",
|
||||
},
|
||||
},
|
||||
},
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
TaskKey: "key1",
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
ExistingClusterId: "test-key-2",
|
||||
Libraries: []compute.Library{
|
||||
{Pypi: &compute.PythonPyPiLibrary{
|
||||
Package: "requests==2.25.1",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
|
||||
}
|
||||
|
||||
func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
|
|
|
@ -53,7 +53,7 @@ func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId string, u
|
|||
// Otherwise for long lived pipelines, there can be a lot of unnecessary
|
||||
// latency due to multiple pagination API calls needed underneath the hood for
|
||||
// ListPipelineEventsAll
|
||||
res, err := w.Pipelines.Impl().ListPipelineEvents(ctx, pipelines.ListPipelineEventsRequest{
|
||||
events, err := w.Pipelines.ListPipelineEventsAll(ctx, pipelines.ListPipelineEventsRequest{
|
||||
Filter: `level='ERROR'`,
|
||||
MaxResults: 100,
|
||||
PipelineId: pipelineId,
|
||||
|
@ -61,7 +61,7 @@ func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId string, u
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateEvents := filterEventsByUpdateId(res.Events, updateId)
|
||||
updateEvents := filterEventsByUpdateId(events, updateId)
|
||||
// The events API returns most recent events first. We iterate in a reverse order
|
||||
// to print the events chronologically
|
||||
for i := len(updateEvents) - 1; i >= 0; i-- {
|
||||
|
|
|
@ -78,7 +78,7 @@ func (l *UpdateTracker) Events(ctx context.Context) ([]ProgressEvent, error) {
|
|||
}
|
||||
|
||||
// we only check the most recent 100 events for progress
|
||||
response, err := l.w.Pipelines.Impl().ListPipelineEvents(ctx, pipelines.ListPipelineEventsRequest{
|
||||
events, err := l.w.Pipelines.ListPipelineEventsAll(ctx, pipelines.ListPipelineEventsRequest{
|
||||
PipelineId: l.PipelineId,
|
||||
MaxResults: 100,
|
||||
Filter: filter,
|
||||
|
@ -89,8 +89,8 @@ func (l *UpdateTracker) Events(ctx context.Context) ([]ProgressEvent, error) {
|
|||
|
||||
result := make([]ProgressEvent, 0)
|
||||
// we iterate in reverse to return events in chronological order
|
||||
for i := len(response.Events) - 1; i >= 0; i-- {
|
||||
event := response.Events[i]
|
||||
for i := len(events) - 1; i >= 0; i-- {
|
||||
event := events[i]
|
||||
// filter to only include update_progress and flow_progress events
|
||||
if event.EventType == "flow_progress" || event.EventType == "update_progress" {
|
||||
result = append(result, ProgressEvent(event))
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/jsonschema"
|
||||
"github.com/databricks/databricks-sdk-go/openapi"
|
||||
)
|
||||
|
||||
// A subset of Schema struct
|
||||
|
@ -63,7 +62,7 @@ func UpdateBundleDescriptions(openapiSpecPath string) (*Docs, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
err = json.Unmarshal(openapiSpec, spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -85,6 +85,12 @@
|
|||
"enabled": {
|
||||
"description": ""
|
||||
},
|
||||
"import": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"venv_path": {
|
||||
"description": ""
|
||||
}
|
||||
|
@ -130,6 +136,29 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"presets": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"jobs_max_concurrent_runs": {
|
||||
"description": ""
|
||||
},
|
||||
"name_prefix": {
|
||||
"description": ""
|
||||
},
|
||||
"pipelines_development": {
|
||||
"description": ""
|
||||
},
|
||||
"tags": {
|
||||
"description": "",
|
||||
"additionalproperties": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"trigger_pause_status": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"description": "Collection of Databricks resources to deploy.",
|
||||
"properties": {
|
||||
|
@ -218,7 +247,7 @@
|
|||
}
|
||||
},
|
||||
"description": {
|
||||
"description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding."
|
||||
"description": "An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding."
|
||||
},
|
||||
"edit_mode": {
|
||||
"description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified."
|
||||
|
@ -935,7 +964,7 @@
|
|||
}
|
||||
},
|
||||
"egg": {
|
||||
"description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
|
||||
"description": "Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is not supported in Databricks Runtime 14.0 and above."
|
||||
},
|
||||
"jar": {
|
||||
"description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
|
||||
|
@ -1827,13 +1856,16 @@
|
|||
}
|
||||
},
|
||||
"external_model": {
|
||||
"description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n",
|
||||
"description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model,\nit cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later.\nThe task type of all external models within an endpoint must be the same.\n",
|
||||
"properties": {
|
||||
"ai21labs_config": {
|
||||
"description": "AI21Labs Config. Only required if the provider is 'ai21labs'.",
|
||||
"properties": {
|
||||
"ai21labs_api_key": {
|
||||
"description": "The Databricks secret key reference for an AI21Labs API key."
|
||||
"description": "The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`."
|
||||
},
|
||||
"ai21labs_api_key_plaintext": {
|
||||
"description": "An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1841,13 +1873,19 @@
|
|||
"description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.",
|
||||
"properties": {
|
||||
"aws_access_key_id": {
|
||||
"description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services."
|
||||
"description": "The Databricks secret key reference for an AWS access key ID with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`."
|
||||
},
|
||||
"aws_access_key_id_plaintext": {
|
||||
"description": "An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`."
|
||||
},
|
||||
"aws_region": {
|
||||
"description": "The AWS region to use. Bedrock has to be enabled there."
|
||||
},
|
||||
"aws_secret_access_key": {
|
||||
"description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services."
|
||||
"description": "The Databricks secret key reference for an AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`."
|
||||
},
|
||||
"aws_secret_access_key_plaintext": {
|
||||
"description": "An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_secret_access_key`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`."
|
||||
},
|
||||
"bedrock_provider": {
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon."
|
||||
|
@ -1858,15 +1896,24 @@
|
|||
"description": "Anthropic Config. Only required if the provider is 'anthropic'.",
|
||||
"properties": {
|
||||
"anthropic_api_key": {
|
||||
"description": "The Databricks secret key reference for an Anthropic API key."
|
||||
"description": "The Databricks secret key reference for an Anthropic API key. If you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`."
|
||||
},
|
||||
"anthropic_api_key_plaintext": {
|
||||
"description": "The Anthropic API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `anthropic_api_key`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"cohere_config": {
|
||||
"description": "Cohere Config. Only required if the provider is 'cohere'.",
|
||||
"properties": {
|
||||
"cohere_api_base": {
|
||||
"description": "This is an optional field to provide a customized base URL for the Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n"
|
||||
},
|
||||
"cohere_api_key": {
|
||||
"description": "The Databricks secret key reference for a Cohere API key."
|
||||
"description": "The Databricks secret key reference for a Cohere API key. If you prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`."
|
||||
},
|
||||
"cohere_api_key_plaintext": {
|
||||
"description": "The Cohere API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `cohere_api_key`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1874,13 +1921,33 @@
|
|||
"description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.",
|
||||
"properties": {
|
||||
"databricks_api_token": {
|
||||
"description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n"
|
||||
"description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\nIf you prefer to paste your API key directly, see `databricks_api_token_plaintext`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n"
|
||||
},
|
||||
"databricks_api_token_plaintext": {
|
||||
"description": "The Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `databricks_api_token`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n"
|
||||
},
|
||||
"databricks_workspace_url": {
|
||||
"description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"google_cloud_vertex_ai_config": {
|
||||
"description": "Google Cloud Vertex AI Config. Only required if the provider is 'google-cloud-vertex-ai'.",
|
||||
"properties": {
|
||||
"private_key": {
|
||||
"description": "The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`"
|
||||
},
|
||||
"private_key_plaintext": {
|
||||
"description": "The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`."
|
||||
},
|
||||
"project_id": {
|
||||
"description": "This is the Google Cloud project id that the service account is associated with."
|
||||
},
|
||||
"region": {
|
||||
"description": "This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions."
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the external model."
|
||||
},
|
||||
|
@ -1891,16 +1958,22 @@
|
|||
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n"
|
||||
},
|
||||
"microsoft_entra_client_secret": {
|
||||
"description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n"
|
||||
"description": "The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication.\nIf you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n"
|
||||
},
|
||||
"microsoft_entra_client_secret_plaintext": {
|
||||
"description": "The client secret used for Microsoft Entra ID authentication provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n"
|
||||
},
|
||||
"microsoft_entra_tenant_id": {
|
||||
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n"
|
||||
},
|
||||
"openai_api_base": {
|
||||
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
|
||||
"description": "This is a field to provide a customized base URl for the OpenAI API.\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\nFor other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used.\n"
|
||||
},
|
||||
"openai_api_key": {
|
||||
"description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key."
|
||||
"description": "The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`."
|
||||
},
|
||||
"openai_api_key_plaintext": {
|
||||
"description": "The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`."
|
||||
},
|
||||
"openai_api_type": {
|
||||
"description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n"
|
||||
|
@ -1920,12 +1993,15 @@
|
|||
"description": "PaLM Config. Only required if the provider is 'palm'.",
|
||||
"properties": {
|
||||
"palm_api_key": {
|
||||
"description": "The Databricks secret key reference for a PaLM API key."
|
||||
"description": "The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`."
|
||||
},
|
||||
"palm_api_key_plaintext": {
|
||||
"description": "The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"provider": {
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n"
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n"
|
||||
},
|
||||
"task": {
|
||||
"description": "The task type of the external model."
|
||||
|
@ -2331,6 +2407,9 @@
|
|||
"driver_node_type_id": {
|
||||
"description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above."
|
||||
},
|
||||
"enable_local_disk_encryption": {
|
||||
"description": "Whether to enable local disk encryption for the cluster."
|
||||
},
|
||||
"gcp_attributes": {
|
||||
"description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||
"properties": {
|
||||
|
@ -2525,7 +2604,7 @@
|
|||
"description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location."
|
||||
},
|
||||
"gateway_storage_name": {
|
||||
"description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n"
|
||||
"description": "Optional. The Unity Catalog-compatible name for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n"
|
||||
},
|
||||
"gateway_storage_schema": {
|
||||
"description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location."
|
||||
|
@ -2565,7 +2644,7 @@
|
|||
"description": "Required. Schema name in the source database."
|
||||
},
|
||||
"table_configuration": {
|
||||
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.",
|
||||
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the IngestionPipelineDefinition object.",
|
||||
"properties": {
|
||||
"primary_keys": {
|
||||
"description": "The primary key of the table used to apply changes.",
|
||||
|
@ -2605,7 +2684,7 @@
|
|||
"description": "Required. Table name in the source database."
|
||||
},
|
||||
"table_configuration": {
|
||||
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.",
|
||||
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object and the SchemaSpec.",
|
||||
"properties": {
|
||||
"primary_keys": {
|
||||
"description": "The primary key of the table used to apply changes.",
|
||||
|
@ -2685,6 +2764,9 @@
|
|||
"description": "The absolute path of the notebook."
|
||||
}
|
||||
}
|
||||
},
|
||||
"whl": {
|
||||
"description": "URI of the whl to be installed."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2955,6 +3037,49 @@
|
|||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"schemas": {
|
||||
"description": "",
|
||||
"additionalproperties": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"catalog_name": {
|
||||
"description": ""
|
||||
},
|
||||
"comment": {
|
||||
"description": ""
|
||||
},
|
||||
"grants": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"principal": {
|
||||
"description": ""
|
||||
},
|
||||
"privileges": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": ""
|
||||
},
|
||||
"properties": {
|
||||
"description": "",
|
||||
"additionalproperties": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"storage_root": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -2983,6 +3108,12 @@
|
|||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"paths": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -3106,6 +3237,29 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"presets": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"jobs_max_concurrent_runs": {
|
||||
"description": ""
|
||||
},
|
||||
"name_prefix": {
|
||||
"description": ""
|
||||
},
|
||||
"pipelines_development": {
|
||||
"description": ""
|
||||
},
|
||||
"tags": {
|
||||
"description": "",
|
||||
"additionalproperties": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"trigger_pause_status": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"description": "Collection of Databricks resources to deploy.",
|
||||
"properties": {
|
||||
|
@ -3194,7 +3348,7 @@
|
|||
}
|
||||
},
|
||||
"description": {
|
||||
"description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding."
|
||||
"description": "An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding."
|
||||
},
|
||||
"edit_mode": {
|
||||
"description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified."
|
||||
|
@ -3911,7 +4065,7 @@
|
|||
}
|
||||
},
|
||||
"egg": {
|
||||
"description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
|
||||
"description": "Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is not supported in Databricks Runtime 14.0 and above."
|
||||
},
|
||||
"jar": {
|
||||
"description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
|
||||
|
@ -4803,13 +4957,16 @@
|
|||
}
|
||||
},
|
||||
"external_model": {
|
||||
"description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n",
|
||||
"description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model,\nit cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later.\nThe task type of all external models within an endpoint must be the same.\n",
|
||||
"properties": {
|
||||
"ai21labs_config": {
|
||||
"description": "AI21Labs Config. Only required if the provider is 'ai21labs'.",
|
||||
"properties": {
|
||||
"ai21labs_api_key": {
|
||||
"description": "The Databricks secret key reference for an AI21Labs API key."
|
||||
"description": "The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`."
|
||||
},
|
||||
"ai21labs_api_key_plaintext": {
|
||||
"description": "An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -4817,13 +4974,19 @@
|
|||
"description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.",
|
||||
"properties": {
|
||||
"aws_access_key_id": {
|
||||
"description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services."
|
||||
"description": "The Databricks secret key reference for an AWS access key ID with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`."
|
||||
},
|
||||
"aws_access_key_id_plaintext": {
|
||||
"description": "An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`."
|
||||
},
|
||||
"aws_region": {
|
||||
"description": "The AWS region to use. Bedrock has to be enabled there."
|
||||
},
|
||||
"aws_secret_access_key": {
|
||||
"description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services."
|
||||
"description": "The Databricks secret key reference for an AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`."
|
||||
},
|
||||
"aws_secret_access_key_plaintext": {
|
||||
"description": "An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_secret_access_key`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`."
|
||||
},
|
||||
"bedrock_provider": {
|
||||
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon."
|
||||
|
@ -4834,15 +4997,24 @@
|
|||
"description": "Anthropic Config. Only required if the provider is 'anthropic'.",
|
||||
"properties": {
|
||||
"anthropic_api_key": {
|
||||
"description": "The Databricks secret key reference for an Anthropic API key."
|
||||
"description": "The Databricks secret key reference for an Anthropic API key. If you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`."
|
||||
},
|
||||
"anthropic_api_key_plaintext": {
|
||||
"description": "The Anthropic API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `anthropic_api_key`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"cohere_config": {
|
||||
"description": "Cohere Config. Only required if the provider is 'cohere'.",
|
||||
"properties": {
|
||||
"cohere_api_base": {
|
||||
"description": "This is an optional field to provide a customized base URL for the Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n"
|
||||
},
|
||||
"cohere_api_key": {
|
||||
"description": "The Databricks secret key reference for a Cohere API key."
|
||||
"description": "The Databricks secret key reference for a Cohere API key. If you prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`."
|
||||
},
|
||||
"cohere_api_key_plaintext": {
|
||||
"description": "The Cohere API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `cohere_api_key`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -4850,13 +5022,33 @@
|
|||
"description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.",
|
||||
"properties": {
|
||||
"databricks_api_token": {
|
||||
"description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n"
|
||||
"description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\nIf you prefer to paste your API key directly, see `databricks_api_token_plaintext`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n"
|
||||
},
|
||||
"databricks_api_token_plaintext": {
|
||||
"description": "The Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `databricks_api_token`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n"
|
||||
},
|
||||
"databricks_workspace_url": {
|
||||
"description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"google_cloud_vertex_ai_config": {
|
||||
"description": "Google Cloud Vertex AI Config. Only required if the provider is 'google-cloud-vertex-ai'.",
|
||||
"properties": {
|
||||
"private_key": {
|
||||
"description": "The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`"
|
||||
},
|
||||
"private_key_plaintext": {
|
||||
"description": "The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`."
|
||||
},
|
||||
"project_id": {
|
||||
"description": "This is the Google Cloud project id that the service account is associated with."
|
||||
},
|
||||
"region": {
|
||||
"description": "This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions."
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the external model."
|
||||
},
|
||||
|
@ -4867,16 +5059,22 @@
|
|||
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n"
|
||||
},
|
||||
"microsoft_entra_client_secret": {
|
||||
"description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n"
|
||||
"description": "The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication.\nIf you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n"
|
||||
},
|
||||
"microsoft_entra_client_secret_plaintext": {
|
||||
"description": "The client secret used for Microsoft Entra ID authentication provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n"
|
||||
},
|
||||
"microsoft_entra_tenant_id": {
|
||||
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n"
|
||||
},
|
||||
"openai_api_base": {
|
||||
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
|
||||
"description": "This is a field to provide a customized base URl for the OpenAI API.\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\nFor other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used.\n"
|
||||
},
|
||||
"openai_api_key": {
|
||||
"description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key."
|
||||
"description": "The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`."
|
||||
},
|
||||
"openai_api_key_plaintext": {
|
||||
"description": "The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`."
|
||||
},
|
||||
"openai_api_type": {
|
||||
"description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n"
|
||||
|
@ -4896,12 +5094,15 @@
|
|||
"description": "PaLM Config. Only required if the provider is 'palm'.",
|
||||
"properties": {
|
||||
"palm_api_key": {
|
||||
"description": "The Databricks secret key reference for a PaLM API key."
|
||||
"description": "The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`."
|
||||
},
|
||||
"palm_api_key_plaintext": {
|
||||
"description": "The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`."
|
||||
}
|
||||
}
|
||||
},
|
||||
"provider": {
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n"
|
||||
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n"
|
||||
},
|
||||
"task": {
|
||||
"description": "The task type of the external model."
|
||||
|
@ -5307,6 +5508,9 @@
|
|||
"driver_node_type_id": {
|
||||
"description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above."
|
||||
},
|
||||
"enable_local_disk_encryption": {
|
||||
"description": "Whether to enable local disk encryption for the cluster."
|
||||
},
|
||||
"gcp_attributes": {
|
||||
"description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||
"properties": {
|
||||
|
@ -5501,7 +5705,7 @@
|
|||
"description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location."
|
||||
},
|
||||
"gateway_storage_name": {
|
||||
"description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n"
|
||||
"description": "Optional. The Unity Catalog-compatible name for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n"
|
||||
},
|
||||
"gateway_storage_schema": {
|
||||
"description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location."
|
||||
|
@ -5541,7 +5745,7 @@
|
|||
"description": "Required. Schema name in the source database."
|
||||
},
|
||||
"table_configuration": {
|
||||
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.",
|
||||
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the IngestionPipelineDefinition object.",
|
||||
"properties": {
|
||||
"primary_keys": {
|
||||
"description": "The primary key of the table used to apply changes.",
|
||||
|
@ -5581,7 +5785,7 @@
|
|||
"description": "Required. Table name in the source database."
|
||||
},
|
||||
"table_configuration": {
|
||||
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.",
|
||||
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object and the SchemaSpec.",
|
||||
"properties": {
|
||||
"primary_keys": {
|
||||
"description": "The primary key of the table used to apply changes.",
|
||||
|
@ -5661,6 +5865,9 @@
|
|||
"description": "The absolute path of the notebook."
|
||||
}
|
||||
}
|
||||
},
|
||||
"whl": {
|
||||
"description": "URI of the whl to be installed."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5931,6 +6138,49 @@
|
|||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"schemas": {
|
||||
"description": "",
|
||||
"additionalproperties": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"catalog_name": {
|
||||
"description": ""
|
||||
},
|
||||
"comment": {
|
||||
"description": ""
|
||||
},
|
||||
"grants": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"principal": {
|
||||
"description": ""
|
||||
},
|
||||
"privileges": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": ""
|
||||
},
|
||||
"properties": {
|
||||
"description": "",
|
||||
"additionalproperties": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"storage_root": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -5959,6 +6209,12 @@
|
|||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
},
|
||||
"paths": {
|
||||
"description": "",
|
||||
"items": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -6010,6 +6266,9 @@
|
|||
"description": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6115,6 +6374,9 @@
|
|||
"description": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,12 +6,11 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/databricks/cli/libs/jsonschema"
|
||||
"github.com/databricks/databricks-sdk-go/openapi"
|
||||
)
|
||||
|
||||
type OpenapiReader struct {
|
||||
// OpenAPI spec to read schemas from.
|
||||
OpenapiSpec *openapi.Specification
|
||||
OpenapiSpec *Specification
|
||||
|
||||
// In-memory cache of schemas read from the OpenAPI spec.
|
||||
memo map[string]jsonschema.Schema
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/databricks/cli/libs/jsonschema"
|
||||
"github.com/databricks/databricks-sdk-go/openapi"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -45,7 +44,7 @@ func TestReadSchemaForObject(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
reader := &OpenapiReader{
|
||||
OpenapiSpec: spec,
|
||||
memo: make(map[string]jsonschema.Schema),
|
||||
|
@ -103,7 +102,7 @@ func TestReadSchemaForArray(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
reader := &OpenapiReader{
|
||||
OpenapiSpec: spec,
|
||||
memo: make(map[string]jsonschema.Schema),
|
||||
|
@ -149,7 +148,7 @@ func TestReadSchemaForMap(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
reader := &OpenapiReader{
|
||||
OpenapiSpec: spec,
|
||||
memo: make(map[string]jsonschema.Schema),
|
||||
|
@ -198,7 +197,7 @@ func TestRootReferenceIsResolved(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
reader := &OpenapiReader{
|
||||
OpenapiSpec: spec,
|
||||
memo: make(map[string]jsonschema.Schema),
|
||||
|
@ -248,7 +247,7 @@ func TestSelfReferenceLoopErrors(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
reader := &OpenapiReader{
|
||||
OpenapiSpec: spec,
|
||||
memo: make(map[string]jsonschema.Schema),
|
||||
|
@ -282,7 +281,7 @@ func TestCrossReferenceLoopErrors(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
reader := &OpenapiReader{
|
||||
OpenapiSpec: spec,
|
||||
memo: make(map[string]jsonschema.Schema),
|
||||
|
@ -327,7 +326,7 @@ func TestReferenceResolutionForMapInObject(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
reader := &OpenapiReader{
|
||||
OpenapiSpec: spec,
|
||||
memo: make(map[string]jsonschema.Schema),
|
||||
|
@ -397,7 +396,7 @@ func TestReferenceResolutionForArrayInObject(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
reader := &OpenapiReader{
|
||||
OpenapiSpec: spec,
|
||||
memo: make(map[string]jsonschema.Schema),
|
||||
|
@ -460,7 +459,7 @@ func TestReferenceResolutionDoesNotOverwriteDescriptions(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
spec := &openapi.Specification{}
|
||||
spec := &Specification{}
|
||||
reader := &OpenapiReader{
|
||||
OpenapiSpec: spec,
|
||||
memo: make(map[string]jsonschema.Schema),
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
package schema
|
||||
|
||||
import "github.com/databricks/cli/libs/jsonschema"
|
||||
|
||||
type Specification struct {
|
||||
Components *Components `json:"components"`
|
||||
}
|
||||
|
||||
type Components struct {
|
||||
Schemas map[string]*jsonschema.Schema `json:"schemas,omitempty"`
|
||||
}
|
|
@ -18,6 +18,6 @@ func TestEnvironmentKeyProvidedAndNoPanic(t *testing.T) {
|
|||
b, diags := loadTargetWithDiags("./environment_key_only", "default")
|
||||
require.Empty(t, diags)
|
||||
|
||||
diags = bundle.Apply(context.Background(), b, libraries.ValidateLocalLibrariesExist())
|
||||
diags = bundle.Apply(context.Background(), b, libraries.ExpandGlobReferences())
|
||||
require.Empty(t, diags)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package config_tests
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
|
@ -15,7 +14,6 @@ func TestJobAndPipelineDevelopmentWithEnvironment(t *testing.T) {
|
|||
assert.Len(t, b.Config.Resources.Pipelines, 1)
|
||||
|
||||
p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"]
|
||||
assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath))
|
||||
assert.Equal(t, b.Config.Bundle.Mode, config.Development)
|
||||
assert.True(t, p.Development)
|
||||
require.Len(t, p.Libraries, 1)
|
||||
|
@ -29,7 +27,6 @@ func TestJobAndPipelineStagingWithEnvironment(t *testing.T) {
|
|||
assert.Len(t, b.Config.Resources.Pipelines, 1)
|
||||
|
||||
p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"]
|
||||
assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath))
|
||||
assert.False(t, p.Development)
|
||||
require.Len(t, p.Libraries, 1)
|
||||
assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path)
|
||||
|
@ -42,14 +39,12 @@ func TestJobAndPipelineProductionWithEnvironment(t *testing.T) {
|
|||
assert.Len(t, b.Config.Resources.Pipelines, 1)
|
||||
|
||||
p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"]
|
||||
assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath))
|
||||
assert.False(t, p.Development)
|
||||
require.Len(t, p.Libraries, 1)
|
||||
assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path)
|
||||
assert.Equal(t, "nyc_taxi_production", p.Target)
|
||||
|
||||
j := b.Config.Resources.Jobs["pipeline_schedule"]
|
||||
assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(j.ConfigFilePath))
|
||||
assert.Equal(t, "Daily refresh of production pipeline", j.Name)
|
||||
require.Len(t, j.Tasks, 1)
|
||||
assert.NotEmpty(t, j.Tasks[0].PipelineTask.PipelineId)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue