mirror of https://github.com/databricks/cli.git
Merge branch 'main' into feature/apps
This commit is contained in:
commit
d0d875b0db
|
@ -45,7 +45,6 @@ jobs:
|
|||
echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV
|
||||
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
|
||||
go install gotest.tools/gotestsum@latest
|
||||
go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
- name: Pull external libraries
|
||||
run: |
|
||||
|
@ -53,7 +52,7 @@ jobs:
|
|||
pip3 install wheel
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
run: make testonly
|
||||
|
||||
- name: Publish test coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
|
@ -90,6 +89,20 @@ jobs:
|
|||
# Exit with status code 1 if there are differences (i.e. unformatted files)
|
||||
git diff --exit-code
|
||||
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.2
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.62.2
|
||||
args: --timeout=15m
|
||||
|
||||
validate-bundle-schema:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ on:
|
|||
branches:
|
||||
- "main"
|
||||
- "demo-*"
|
||||
- "bugbash-*"
|
||||
|
||||
# Confirm that snapshot builds work if this file is modified.
|
||||
pull_request:
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
# errcheck and govet are part of default setup and should be included but give too many errors now
|
||||
# once errors are fixed, they should be enabled here:
|
||||
#- errcheck
|
||||
- gosimple
|
||||
#- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- unused
|
||||
- gofmt
|
||||
linters-settings:
|
||||
gofmt:
|
||||
rewrite-rules:
|
||||
- pattern: 'a[b:len(a)]'
|
||||
replacement: 'a[b:]'
|
||||
issues:
|
||||
exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/
|
|
@ -3,6 +3,10 @@
|
|||
"editor.insertSpaces": false,
|
||||
"editor.formatOnSave": true
|
||||
},
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintFlags": [
|
||||
"--fast"
|
||||
],
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"files.insertFinalNewline": true,
|
||||
"files.trimFinalNewlines": true,
|
||||
|
|
12
Makefile
12
Makefile
|
@ -7,10 +7,16 @@ fmt:
|
|||
@gofmt -w $(shell find . -type f -name '*.go' -not -path "./vendor/*")
|
||||
|
||||
lint: vendor
|
||||
@echo "✓ Linting source code with https://staticcheck.io/ ..."
|
||||
@staticcheck ./...
|
||||
@echo "✓ Linting source code with https://golangci-lint.run/ ..."
|
||||
@golangci-lint run ./...
|
||||
|
||||
test: lint
|
||||
lintfix: vendor
|
||||
@echo "✓ Linting source code with 'golangci-lint run --fix' ..."
|
||||
@golangci-lint run --fix ./...
|
||||
|
||||
test: lint testonly
|
||||
|
||||
testonly:
|
||||
@echo "✓ Running tests ..."
|
||||
@gotestsum --format pkgname-and-test-fails --no-summary=skipped --raw-command go test -v -json -short -coverprofile=coverage.txt ./...
|
||||
|
||||
|
|
|
@ -21,18 +21,13 @@ func (m *cleanUp) Name() string {
|
|||
}
|
||||
|
||||
func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
uploadPath, err := libraries.GetUploadBasePath(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
client, err := libraries.GetFilerForLibraries(b.WorkspaceClient(), uploadPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
client, uploadPath, diags := libraries.GetFilerForLibraries(ctx, b)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
// We intentionally ignore the error because it is not critical to the deployment
|
||||
err = client.Delete(ctx, ".", filer.DeleteRecursively)
|
||||
err := client.Delete(ctx, ".", filer.DeleteRecursively)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "failed to delete %s: %v", uploadPath, err)
|
||||
}
|
||||
|
|
|
@ -48,6 +48,10 @@ type Bundle struct {
|
|||
// Exclusively use this field for filesystem operations.
|
||||
SyncRoot vfs.Path
|
||||
|
||||
// Path to the root of git worktree containing the bundle.
|
||||
// https://git-scm.com/docs/git-worktree
|
||||
WorktreeRoot vfs.Path
|
||||
|
||||
// Config contains the bundle configuration.
|
||||
// It is loaded from the bundle configuration files and mutators may update it.
|
||||
Config config.Root
|
||||
|
|
|
@ -32,6 +32,10 @@ func (r ReadOnlyBundle) SyncRoot() vfs.Path {
|
|||
return r.b.SyncRoot
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) WorktreeRoot() vfs.Path {
|
||||
return r.b.WorktreeRoot
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||
return r.b.WorkspaceClient()
|
||||
}
|
||||
|
|
|
@ -49,4 +49,8 @@ type Bundle struct {
|
|||
|
||||
// Databricks CLI version constraints required to run the bundle.
|
||||
DatabricksCliVersion string `json:"databricks_cli_version,omitempty"`
|
||||
|
||||
// A stable generated UUID for the bundle. This is normally serialized by
|
||||
// Databricks first party template when a user runs bundle init.
|
||||
Uuid string `json:"uuid,omitempty"`
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ func TestApplyPresetsPrefix(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsPrefixForUcSchema(t *testing.T) {
|
||||
func TestApplyPresetsPrefixForSchema(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefix string
|
||||
|
@ -129,6 +129,36 @@ func TestApplyPresetsPrefixForUcSchema(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsVolumesShouldNotBePrefixed(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"volume1": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
Name: "volume1",
|
||||
CatalogName: "catalog1",
|
||||
SchemaName: "schema1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
NamePrefix: "[prefix]",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diag := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
if diag.HasError() {
|
||||
t.Fatalf("unexpected error: %v", diag)
|
||||
}
|
||||
|
||||
require.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name)
|
||||
}
|
||||
|
||||
func TestApplyPresetsTags(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type configureVolumeDefaults struct{}
|
||||
|
||||
func ConfigureVolumeDefaults() bundle.Mutator {
|
||||
return &configureVolumeDefaults{}
|
||||
}
|
||||
|
||||
func (m *configureVolumeDefaults) Name() string {
|
||||
return "ConfigureVolumeDefaults"
|
||||
}
|
||||
|
||||
func (m *configureVolumeDefaults) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
pattern := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("volumes"),
|
||||
dyn.AnyKey(),
|
||||
)
|
||||
|
||||
// Configure defaults for all volumes.
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("volume_type")), dyn.V("MANAGED"))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
return diags
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigureVolumeDefaultsVolumeType(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"v1": {
|
||||
// Empty string is skipped.
|
||||
// See below for how it is set.
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
VolumeType: "",
|
||||
},
|
||||
},
|
||||
"v2": {
|
||||
// Non-empty string is skipped.
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
VolumeType: "already-set",
|
||||
},
|
||||
},
|
||||
"v3": {
|
||||
// No volume type set.
|
||||
},
|
||||
"v4": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// We can't set an empty string in the typed configuration.
|
||||
// Do it on the dyn.Value directly.
|
||||
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Set(v, "resources.volumes.v1.volume_type", dyn.V(""))
|
||||
})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ConfigureVolumeDefaults())
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
var v dyn.Value
|
||||
var err error
|
||||
|
||||
// Set to empty string; unchanged.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.volumes.v1.volume_type")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "", v.MustString())
|
||||
|
||||
// Set to non-empty string; unchanged.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.volumes.v2.volume_type")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "already-set", v.MustString())
|
||||
|
||||
// Not set; set to default.
|
||||
v, err = dyn.Get(b.Config.Value(), "resources.volumes.v3.volume_type")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "MANAGED", v.MustString())
|
||||
|
||||
// No valid volume; No change.
|
||||
_, err = dyn.Get(b.Config.Value(), "resources.volumes.v4.volume_type")
|
||||
assert.True(t, dyn.IsCannotTraverseNilError(err))
|
||||
}
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/git"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
)
|
||||
|
||||
type loadGitDetails struct{}
|
||||
|
@ -21,50 +21,40 @@ func (m *loadGitDetails) Name() string {
|
|||
}
|
||||
|
||||
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
// Load relevant git repository
|
||||
repo, err := git.NewRepository(b.BundleRoot)
|
||||
var diags diag.Diagnostics
|
||||
info, err := git.FetchRepositoryInfo(ctx, b.BundleRoot.Native(), b.WorkspaceClient())
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
diags = append(diags, diag.WarningFromErr(err)...)
|
||||
}
|
||||
|
||||
// Read branch name of current checkout
|
||||
branch, err := repo.CurrentBranch()
|
||||
if err == nil {
|
||||
b.Config.Bundle.Git.ActualBranch = branch
|
||||
if b.Config.Bundle.Git.Branch == "" {
|
||||
// Only load branch if there's no user defined value
|
||||
b.Config.Bundle.Git.Inferred = true
|
||||
b.Config.Bundle.Git.Branch = branch
|
||||
}
|
||||
if info.WorktreeRoot == "" {
|
||||
b.WorktreeRoot = b.BundleRoot
|
||||
} else {
|
||||
log.Warnf(ctx, "failed to load current branch: %s", err)
|
||||
b.WorktreeRoot = vfs.MustNew(info.WorktreeRoot)
|
||||
}
|
||||
|
||||
b.Config.Bundle.Git.ActualBranch = info.CurrentBranch
|
||||
if b.Config.Bundle.Git.Branch == "" {
|
||||
// Only load branch if there's no user defined value
|
||||
b.Config.Bundle.Git.Inferred = true
|
||||
b.Config.Bundle.Git.Branch = info.CurrentBranch
|
||||
}
|
||||
|
||||
// load commit hash if undefined
|
||||
if b.Config.Bundle.Git.Commit == "" {
|
||||
commit, err := repo.LatestCommit()
|
||||
if err != nil {
|
||||
log.Warnf(ctx, "failed to load latest commit: %s", err)
|
||||
} else {
|
||||
b.Config.Bundle.Git.Commit = commit
|
||||
}
|
||||
}
|
||||
// load origin url if undefined
|
||||
if b.Config.Bundle.Git.OriginURL == "" {
|
||||
remoteUrl := repo.OriginUrl()
|
||||
b.Config.Bundle.Git.OriginURL = remoteUrl
|
||||
b.Config.Bundle.Git.Commit = info.LatestCommit
|
||||
}
|
||||
|
||||
// Compute relative path of the bundle root from the Git repo root.
|
||||
absBundlePath, err := filepath.Abs(b.BundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
// load origin url if undefined
|
||||
if b.Config.Bundle.Git.OriginURL == "" {
|
||||
b.Config.Bundle.Git.OriginURL = info.OriginURL
|
||||
}
|
||||
// repo.Root() returns the absolute path of the repo
|
||||
relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath)
|
||||
|
||||
relBundlePath, err := filepath.Rel(b.WorktreeRoot.Native(), b.BundleRoot.Native())
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
} else {
|
||||
b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath)
|
||||
}
|
||||
b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath)
|
||||
return nil
|
||||
return diags
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ func DefaultMutators() []bundle.Mutator {
|
|||
ComputeIdToClusterId(),
|
||||
InitializeVariables(),
|
||||
DefineDefaultTarget(),
|
||||
LoadGitDetails(),
|
||||
pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoad),
|
||||
|
||||
// Note: This mutator must run before the target overrides are merged.
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -132,6 +132,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
Schemas: map[string]*resources.Schema{
|
||||
"schema1": {CreateSchema: &catalog.CreateSchema{Name: "schema1"}},
|
||||
},
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"volume1": {CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{Name: "volume1"}},
|
||||
},
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}},
|
||||
},
|
||||
|
@ -319,6 +322,8 @@ func TestProcessTargetModeDefault(t *testing.T) {
|
|||
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||
assert.Equal(t, "schema1", b.Config.Resources.Schemas["schema1"].Name)
|
||||
assert.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name)
|
||||
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||
}
|
||||
|
||||
|
@ -363,6 +368,8 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
|||
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||
assert.Equal(t, "schema1", b.Config.Resources.Schemas["schema1"].Name)
|
||||
assert.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name)
|
||||
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||
}
|
||||
|
||||
|
@ -396,10 +403,17 @@ func TestAllResourcesMocked(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Make sure that we at least rename all resources
|
||||
func TestAllResourcesRenamed(t *testing.T) {
|
||||
// Make sure that we at rename all non UC resources
|
||||
func TestAllNonUcResourcesAreRenamed(t *testing.T) {
|
||||
b := mockBundle(config.Development)
|
||||
|
||||
// UC resources should not have a prefix added to their name. Right now
|
||||
// this list only contains the Volume resource since we have yet to remove
|
||||
// prefixing support for UC schemas and registered models.
|
||||
ucFields := []reflect.Type{
|
||||
reflect.TypeOf(&resources.Volume{}),
|
||||
}
|
||||
|
||||
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -419,14 +433,14 @@ func TestAllResourcesRenamed(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
|
||||
if nameField.IsValid() && nameField.Kind() == reflect.String {
|
||||
assert.True(
|
||||
t,
|
||||
strings.Contains(nameField.String(), "dev"),
|
||||
"process_target_mode should rename '%s' in '%s'",
|
||||
key,
|
||||
t,
|
||||
)
|
||||
if !nameField.IsValid() || nameField.Kind() != reflect.String {
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Contains(ucFields, resource.Type()) {
|
||||
assert.NotContains(t, nameField.String(), "dev", "process_target_mode should not rename '%s' in '%s'", key, resources.Type().Field(i).Name)
|
||||
} else {
|
||||
assert.Contains(t, nameField.String(), "dev", "process_target_mode should rename '%s' in '%s'", key, resources.Type().Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ func allResourceTypes(t *testing.T) []string {
|
|||
"quality_monitors",
|
||||
"registered_models",
|
||||
"schemas",
|
||||
"volumes",
|
||||
},
|
||||
resourceTypes,
|
||||
)
|
||||
|
@ -142,6 +143,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
|||
"registered_models",
|
||||
"experiments",
|
||||
"schemas",
|
||||
"volumes",
|
||||
}
|
||||
|
||||
base := config.Root{
|
||||
|
|
|
@ -20,6 +20,7 @@ type Resources struct {
|
|||
RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"`
|
||||
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
|
||||
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
|
||||
Volumes map[string]*resources.Volume `json:"volumes,omitempty"`
|
||||
Clusters map[string]*resources.Cluster `json:"clusters,omitempty"`
|
||||
Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"`
|
||||
Apps map[string]*resources.App `json:"apps,omitempty"`
|
||||
|
@ -86,6 +87,7 @@ func (r *Resources) AllResources() []ResourceGroup {
|
|||
collectResourceMap(descriptions["schemas"], r.Schemas),
|
||||
collectResourceMap(descriptions["clusters"], r.Clusters),
|
||||
collectResourceMap(descriptions["dashboards"], r.Dashboards),
|
||||
collectResourceMap(descriptions["volumes"], r.Volumes),
|
||||
collectResourceMap(descriptions["apps"], r.Apps),
|
||||
}
|
||||
}
|
||||
|
@ -191,11 +193,17 @@ func SupportedResources() map[string]ResourceDescription {
|
|||
SingularTitle: "Dashboard",
|
||||
PluralTitle: "Dashboards",
|
||||
},
|
||||
"apps": {
|
||||
"volumes": {
|
||||
SingularName: "volume",
|
||||
PluralName: "volumes",
|
||||
SingularTitle: "Volume",
|
||||
PluralTitle: "Volumes",
|
||||
},
|
||||
"apps": {
|
||||
SingularName: "app",
|
||||
PluralName: "apps",
|
||||
SingularTitle: "App",
|
||||
PluralTitle: "Apps",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
)
|
||||
|
||||
type Volume struct {
|
||||
// List of grants to apply on this volume.
|
||||
Grants []Grant `json:"grants,omitempty"`
|
||||
|
||||
// Full name of the volume (catalog_name.schema_name.volume_name). This value is read from
|
||||
// the terraform state after deployment succeeds.
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
|
||||
*catalog.CreateVolumeRequestContent
|
||||
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
URL string `json:"url,omitempty" bundle:"internal"`
|
||||
}
|
||||
|
||||
func (v *Volume) UnmarshalJSON(b []byte) error {
|
||||
return marshal.Unmarshal(b, v)
|
||||
}
|
||||
|
||||
func (v Volume) MarshalJSON() ([]byte, error) {
|
||||
return marshal.Marshal(v)
|
||||
}
|
||||
|
||||
func (v *Volume) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
||||
return false, fmt.Errorf("volume.Exists() is not supported")
|
||||
}
|
||||
|
||||
func (v *Volume) TerraformResourceName() string {
|
||||
return "databricks_volume"
|
||||
}
|
||||
|
||||
func (v *Volume) InitializeURL(baseURL url.URL) {
|
||||
if v.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("explore/data/volumes/%s", strings.ReplaceAll(v.ID, ".", "/"))
|
||||
v.URL = baseURL.String()
|
||||
}
|
||||
|
||||
func (v *Volume) GetURL() string {
|
||||
return v.URL
|
||||
}
|
||||
|
||||
func (v *Volume) GetName() string {
|
||||
return v.Name
|
||||
}
|
||||
|
||||
func (v *Volume) IsNil() bool {
|
||||
return v.CreateVolumeRequestContent == nil
|
||||
}
|
|
@ -44,6 +44,7 @@ func setupBundleForFilesToSyncTest(t *testing.T) *bundle.Bundle {
|
|||
BundleRoot: vfs.MustNew(dir),
|
||||
SyncRootPath: dir,
|
||||
SyncRoot: vfs.MustNew(dir),
|
||||
WorktreeRoot: vfs.MustNew(dir),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -28,10 +28,11 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp
|
|||
}
|
||||
|
||||
opts := &sync.SyncOptions{
|
||||
LocalRoot: rb.SyncRoot(),
|
||||
Paths: rb.Config().Sync.Paths,
|
||||
Include: includes,
|
||||
Exclude: rb.Config().Sync.Exclude,
|
||||
WorktreeRoot: rb.WorktreeRoot(),
|
||||
LocalRoot: rb.SyncRoot(),
|
||||
Paths: rb.Config().Sync.Paths,
|
||||
Include: includes,
|
||||
Exclude: rb.Config().Sync.Exclude,
|
||||
|
||||
RemotePath: rb.Config().Workspace.FilePath,
|
||||
Host: rb.WorkspaceClient().Config.Host,
|
||||
|
|
|
@ -166,6 +166,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Schemas[resource.Name] = cur
|
||||
case "databricks_volume":
|
||||
if config.Resources.Volumes == nil {
|
||||
config.Resources.Volumes = make(map[string]*resources.Volume)
|
||||
}
|
||||
cur := config.Resources.Volumes[resource.Name]
|
||||
if cur == nil {
|
||||
cur = &resources.Volume{ModifiedStatus: resources.ModifiedStatusDeleted}
|
||||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Volumes[resource.Name] = cur
|
||||
case "databricks_cluster":
|
||||
if config.Resources.Clusters == nil {
|
||||
config.Resources.Clusters = make(map[string]*resources.Cluster)
|
||||
|
@ -245,6 +255,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
for _, src := range config.Resources.Volumes {
|
||||
if src.ModifiedStatus == "" && src.ID == "" {
|
||||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
for _, src := range config.Resources.Clusters {
|
||||
if src.ModifiedStatus == "" && src.ID == "" {
|
||||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
|
|
|
@ -671,6 +671,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_volume",
|
||||
Mode: "managed",
|
||||
Name: "test_volume",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_cluster",
|
||||
Mode: "managed",
|
||||
|
@ -724,6 +732,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
assert.Equal(t, "1", config.Resources.Schemas["test_schema"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Volumes["test_volume"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Volumes["test_volume"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
|
@ -795,6 +806,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"test_volume": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
Name: "test_volume",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"test_cluster": {
|
||||
ClusterSpec: &compute.ClusterSpec{
|
||||
|
@ -848,6 +866,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Schemas["test_schema"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "", config.Resources.Volumes["test_volume"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Volumes["test_volume"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
|
@ -959,6 +980,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"test_volume": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
Name: "test_volume",
|
||||
},
|
||||
},
|
||||
"test_volume_new": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
Name: "test_volume_new",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"test_cluster": {
|
||||
ClusterSpec: &compute.ClusterSpec{
|
||||
|
@ -1127,6 +1160,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_volume",
|
||||
Mode: "managed",
|
||||
Name: "test_volume",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_volume",
|
||||
Mode: "managed",
|
||||
Name: "test_volume_old",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_cluster",
|
||||
Mode: "managed",
|
||||
|
@ -1236,6 +1285,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Schemas["test_schema_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema_new"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Volumes["test_volume"].ID)
|
||||
assert.Equal(t, "", config.Resources.Volumes["test_volume"].ModifiedStatus)
|
||||
assert.Equal(t, "2", config.Resources.Volumes["test_volume_old"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Volumes["test_volume_old"].ModifiedStatus)
|
||||
assert.Equal(t, "", config.Resources.Volumes["test_volume_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Volumes["test_volume_new"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
assert.Equal(t, "2", config.Resources.Clusters["test_cluster_old"].ID)
|
||||
|
|
|
@ -56,7 +56,7 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
|||
buf := bytes.NewBuffer(nil)
|
||||
tf.SetStdout(buf)
|
||||
|
||||
//lint:ignore SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file
|
||||
//nolint:staticcheck // SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file
|
||||
changed, err := tf.Plan(ctx, tfexec.State(tmpState), tfexec.Target(importAddress))
|
||||
if err != nil {
|
||||
return diag.Errorf("terraform plan: %v", err)
|
||||
|
|
|
@ -58,6 +58,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
|||
path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...)
|
||||
case dyn.Key("schemas"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...)
|
||||
case dyn.Key("volumes"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_volume")).Append(path[2:]...)
|
||||
case dyn.Key("clusters"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...)
|
||||
case dyn.Key("dashboards"):
|
||||
|
|
|
@ -31,6 +31,7 @@ func TestInterpolate(t *testing.T) {
|
|||
"other_model_serving": "${resources.model_serving_endpoints.other_model_serving.id}",
|
||||
"other_registered_model": "${resources.registered_models.other_registered_model.id}",
|
||||
"other_schema": "${resources.schemas.other_schema.id}",
|
||||
"other_volume": "${resources.volumes.other_volume.id}",
|
||||
"other_cluster": "${resources.clusters.other_cluster.id}",
|
||||
"other_dashboard": "${resources.dashboards.other_dashboard.id}",
|
||||
"other_app": "${resources.apps.other_app.id}",
|
||||
|
@ -70,6 +71,7 @@ func TestInterpolate(t *testing.T) {
|
|||
assert.Equal(t, "${databricks_model_serving.other_model_serving.id}", j.Tags["other_model_serving"])
|
||||
assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"])
|
||||
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
|
||||
assert.Equal(t, "${databricks_volume.other_volume.id}", j.Tags["other_volume"])
|
||||
assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"])
|
||||
assert.Equal(t, "${databricks_dashboard.other_dashboard.id}", j.Tags["other_dashboard"])
|
||||
assert.Equal(t, "${databricks_app.other_app.id}", j.Tags["other_app"])
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
package tfdyn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
func convertVolumeResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
|
||||
// Normalize the output value to the target schema.
|
||||
vout, diags := convert.Normalize(schema.ResourceVolume{}, vin)
|
||||
for _, diag := range diags {
|
||||
log.Debugf(ctx, "volume normalization diagnostic: %s", diag.Summary)
|
||||
}
|
||||
|
||||
return vout, nil
|
||||
}
|
||||
|
||||
type volumeConverter struct{}
|
||||
|
||||
func (volumeConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error {
|
||||
vout, err := convertVolumeResource(ctx, vin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the converted resource to the output.
|
||||
out.Volume[key] = vout.AsAny()
|
||||
|
||||
// Configure grants for this resource.
|
||||
if grants := convertGrantsResource(ctx, vin); grants != nil {
|
||||
grants.Volume = fmt.Sprintf("${databricks_volume.%s.id}", key)
|
||||
out.Grants["volume_"+key] = grants
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerConverter("volumes", volumeConverter{})
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
package tfdyn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConvertVolume(t *testing.T) {
|
||||
var src = resources.Volume{
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
CatalogName: "catalog",
|
||||
Comment: "comment",
|
||||
Name: "name",
|
||||
SchemaName: "schema",
|
||||
StorageLocation: "s3://bucket/path",
|
||||
VolumeType: "EXTERNAL",
|
||||
},
|
||||
Grants: []resources.Grant{
|
||||
{
|
||||
Privileges: []string{"READ_VOLUME"},
|
||||
Principal: "jack@gmail.com",
|
||||
},
|
||||
{
|
||||
Privileges: []string{"WRITE_VOLUME"},
|
||||
Principal: "jane@gmail.com",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = volumeConverter{}.Convert(ctx, "my_volume", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert equality on the volume
|
||||
require.Equal(t, map[string]any{
|
||||
"catalog_name": "catalog",
|
||||
"comment": "comment",
|
||||
"name": "name",
|
||||
"schema_name": "schema",
|
||||
"storage_location": "s3://bucket/path",
|
||||
"volume_type": "EXTERNAL",
|
||||
}, out.Volume["my_volume"])
|
||||
|
||||
// Assert equality on the grants
|
||||
assert.Equal(t, &schema.ResourceGrants{
|
||||
Volume: "${databricks_volume.my_volume.id}",
|
||||
Grant: []schema.ResourceGrantsGrant{
|
||||
{
|
||||
Privileges: []string{"READ_VOLUME"},
|
||||
Principal: "jack@gmail.com",
|
||||
},
|
||||
{
|
||||
Privileges: []string{"WRITE_VOLUME"},
|
||||
Principal: "jane@gmail.com",
|
||||
},
|
||||
},
|
||||
}, out.Grants["volume_my_volume"])
|
||||
}
|
|
@ -93,6 +93,24 @@ func removeJobsFields(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema {
|
|||
return s
|
||||
}
|
||||
|
||||
// While volume_type is required in the volume create API, DABs automatically sets
|
||||
// it's value to "MANAGED" if it's not provided. Thus, we make it optional
|
||||
// in the bundle schema.
|
||||
func makeVolumeTypeOptional(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema {
|
||||
if typ != reflect.TypeOf(resources.Volume{}) {
|
||||
return s
|
||||
}
|
||||
|
||||
res := []string{}
|
||||
for _, r := range s.Required {
|
||||
if r != "volume_type" {
|
||||
res = append(res, r)
|
||||
}
|
||||
}
|
||||
s.Required = res
|
||||
return s
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 2 {
|
||||
fmt.Println("Usage: go run main.go <output-file>")
|
||||
|
@ -118,6 +136,7 @@ func main() {
|
|||
p.addDescriptions,
|
||||
p.addEnums,
|
||||
removeJobsFields,
|
||||
makeVolumeTypeOptional,
|
||||
addInterpolationPatterns,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
bundle:
|
||||
name: volume with incorrect type
|
||||
|
||||
resources:
|
||||
volumes:
|
||||
foo:
|
||||
catalog_name: main
|
||||
name: my_volume
|
||||
schema_name: myschema
|
||||
volume_type: incorrect_type
|
|
@ -0,0 +1,9 @@
|
|||
bundle:
|
||||
name: a volume
|
||||
|
||||
resources:
|
||||
volumes:
|
||||
foo:
|
||||
catalog_name: main
|
||||
name: my_volume
|
||||
schema_name: myschema
|
|
@ -1,3 +1,3 @@
|
|||
package schema
|
||||
|
||||
const ProviderVersion = "1.58.0"
|
||||
const ProviderVersion = "1.59.0"
|
||||
|
|
|
@ -21,7 +21,7 @@ type Root struct {
|
|||
|
||||
const ProviderHost = "registry.terraform.io"
|
||||
const ProviderSource = "databricks/databricks"
|
||||
const ProviderVersion = "1.58.0"
|
||||
const ProviderVersion = "1.59.0"
|
||||
|
||||
func NewRoot() *Root {
|
||||
return &Root{
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
)
|
||||
|
||||
// We upload artifacts to the workspace in a directory named ".internal" to have
|
||||
// a well defined location for artifacts that have been uploaded by the DABs.
|
||||
const InternalDirName = ".internal"
|
||||
|
||||
// This function returns a filer for uploading artifacts to the configured location.
|
||||
// Supported locations:
|
||||
// 1. WSFS
|
||||
// 2. UC volumes
|
||||
func GetFilerForLibraries(ctx context.Context, b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) {
|
||||
artifactPath := b.Config.Workspace.ArtifactPath
|
||||
if artifactPath == "" {
|
||||
return nil, "", diag.Errorf("remote artifact path not configured")
|
||||
}
|
||||
|
||||
switch {
|
||||
case IsVolumesPath(artifactPath):
|
||||
return filerForVolume(ctx, b)
|
||||
|
||||
default:
|
||||
return filerForWorkspace(b)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetFilerForLibrariesValidWsfs(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/foo/bar/artifacts",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
client, uploadPath, diags := GetFilerForLibraries(context.Background(), b)
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, "/foo/bar/artifacts/.internal", uploadPath)
|
||||
|
||||
assert.IsType(t, &filer.WorkspaceFilesClient{}, client)
|
||||
}
|
||||
|
||||
func TestGetFilerForLibrariesValidUcVolume(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/main/my_schema/my_volume",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(nil)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
client, uploadPath, diags := GetFilerForLibraries(context.Background(), b)
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, "/Volumes/main/my_schema/my_volume/.internal", uploadPath)
|
||||
|
||||
assert.IsType(t, &filer.FilesClient{}, client)
|
||||
}
|
||||
|
||||
func TestGetFilerForLibrariesRemotePathNotSet(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{},
|
||||
},
|
||||
}
|
||||
|
||||
_, _, diags := GetFilerForLibraries(context.Background(), b)
|
||||
require.EqualError(t, diags.Error(), "remote artifact path not configured")
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/dynvar"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
)
|
||||
|
||||
func extractVolumeFromPath(artifactPath string) (string, string, string, error) {
|
||||
if !IsVolumesPath(artifactPath) {
|
||||
return "", "", "", fmt.Errorf("expected artifact_path to start with /Volumes/, got %s", artifactPath)
|
||||
}
|
||||
|
||||
parts := strings.Split(artifactPath, "/")
|
||||
volumeFormatErr := fmt.Errorf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", artifactPath)
|
||||
|
||||
// Incorrect format.
|
||||
if len(parts) < 5 {
|
||||
return "", "", "", volumeFormatErr
|
||||
}
|
||||
|
||||
catalogName := parts[2]
|
||||
schemaName := parts[3]
|
||||
volumeName := parts[4]
|
||||
|
||||
// Incorrect format.
|
||||
if catalogName == "" || schemaName == "" || volumeName == "" {
|
||||
return "", "", "", volumeFormatErr
|
||||
}
|
||||
|
||||
return catalogName, schemaName, volumeName, nil
|
||||
}
|
||||
|
||||
// This function returns a filer for ".internal" folder inside the directory configured
|
||||
// at `workspace.artifact_path`.
|
||||
// This function also checks if the UC volume exists in the workspace and then:
|
||||
// 1. If the UC volume exists in the workspace:
|
||||
// Returns a filer for the UC volume.
|
||||
// 2. If the UC volume does not exist in the workspace but is (with high confidence) defined in
|
||||
// the bundle configuration:
|
||||
// Returns an error and a warning that instructs the user to deploy the
|
||||
// UC volume before using it in the artifact path.
|
||||
// 3. If the UC volume does not exist in the workspace and is not defined in the bundle configuration:
|
||||
// Returns an error.
|
||||
func filerForVolume(ctx context.Context, b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) {
|
||||
artifactPath := b.Config.Workspace.ArtifactPath
|
||||
w := b.WorkspaceClient()
|
||||
|
||||
catalogName, schemaName, volumeName, err := extractVolumeFromPath(artifactPath)
|
||||
if err != nil {
|
||||
return nil, "", diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: err.Error(),
|
||||
Locations: b.Config.GetLocations("workspace.artifact_path"),
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the UC volume exists in the workspace.
|
||||
volumePath := fmt.Sprintf("/Volumes/%s/%s/%s", catalogName, schemaName, volumeName)
|
||||
err = w.Files.GetDirectoryMetadataByDirectoryPath(ctx, volumePath)
|
||||
|
||||
// If the volume exists already, directly return the filer for the path to
|
||||
// upload the artifacts to.
|
||||
if err == nil {
|
||||
uploadPath := path.Join(artifactPath, InternalDirName)
|
||||
f, err := filer.NewFilesClient(w, uploadPath)
|
||||
return f, uploadPath, diag.FromErr(err)
|
||||
}
|
||||
|
||||
baseErr := diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("unable to determine if volume at %s exists: %s", volumePath, err),
|
||||
Locations: b.Config.GetLocations("workspace.artifact_path"),
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
}
|
||||
|
||||
if errors.Is(err, apierr.ErrNotFound) {
|
||||
// Since the API returned a 404, the volume does not exist.
|
||||
// Modify the error message to provide more context.
|
||||
baseErr.Summary = fmt.Sprintf("volume %s does not exist: %s", volumePath, err)
|
||||
|
||||
// If the volume is defined in the bundle, provide a more helpful error diagnostic,
|
||||
// with more details and location information.
|
||||
path, locations, ok := findVolumeInBundle(b, catalogName, schemaName, volumeName)
|
||||
if !ok {
|
||||
return nil, "", diag.Diagnostics{baseErr}
|
||||
}
|
||||
baseErr.Detail = `You are using a volume in your artifact_path that is managed by
|
||||
this bundle but which has not been deployed yet. Please first deploy
|
||||
the volume using 'bundle deploy' and then switch over to using it in
|
||||
the artifact_path.`
|
||||
baseErr.Paths = append(baseErr.Paths, path)
|
||||
baseErr.Locations = append(baseErr.Locations, locations...)
|
||||
}
|
||||
|
||||
return nil, "", diag.Diagnostics{baseErr}
|
||||
}
|
||||
|
||||
func findVolumeInBundle(b *bundle.Bundle, catalogName, schemaName, volumeName string) (dyn.Path, []dyn.Location, bool) {
|
||||
volumes := b.Config.Resources.Volumes
|
||||
for k, v := range volumes {
|
||||
if v.CatalogName != catalogName || v.Name != volumeName {
|
||||
continue
|
||||
}
|
||||
// UC schemas can be defined in the bundle itself, and thus might be interpolated
|
||||
// at runtime via the ${resources.schemas.<name>} syntax. Thus we match the volume
|
||||
// definition if the schema name is the same as the one in the bundle, or if the
|
||||
// schema name is interpolated.
|
||||
// We only have to check for ${resources.schemas...} references because any
|
||||
// other valid reference (like ${var.foo}) would have been interpolated by this point.
|
||||
p, ok := dynvar.PureReferenceToPath(v.SchemaName)
|
||||
isSchemaDefinedInBundle := ok && p.HasPrefix(dyn.Path{dyn.Key("resources"), dyn.Key("schemas")})
|
||||
if v.SchemaName != schemaName && !isSchemaDefinedInBundle {
|
||||
continue
|
||||
}
|
||||
pathString := fmt.Sprintf("resources.volumes.%s", k)
|
||||
return dyn.MustPathFromString(pathString), b.Config.GetLocations(pathString), true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
|
@ -0,0 +1,275 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFindVolumeInBundle(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"foo": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
CatalogName: "main",
|
||||
Name: "my_volume",
|
||||
SchemaName: "my_schema",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{
|
||||
{
|
||||
File: "volume.yml",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
},
|
||||
})
|
||||
|
||||
// volume is in DAB.
|
||||
path, locations, ok := findVolumeInBundle(b, "main", "my_schema", "my_volume")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, []dyn.Location{{
|
||||
File: "volume.yml",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
}}, locations)
|
||||
assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path)
|
||||
|
||||
// wrong volume name
|
||||
_, _, ok = findVolumeInBundle(b, "main", "my_schema", "doesnotexist")
|
||||
assert.False(t, ok)
|
||||
|
||||
// wrong schema name
|
||||
_, _, ok = findVolumeInBundle(b, "main", "doesnotexist", "my_volume")
|
||||
assert.False(t, ok)
|
||||
|
||||
// wrong catalog name
|
||||
_, _, ok = findVolumeInBundle(b, "doesnotexist", "my_schema", "my_volume")
|
||||
assert.False(t, ok)
|
||||
|
||||
// schema name is interpolated but does not have the right prefix. In this case
|
||||
// we should not match the volume.
|
||||
b.Config.Resources.Volumes["foo"].SchemaName = "${foo.bar.baz}"
|
||||
_, _, ok = findVolumeInBundle(b, "main", "my_schema", "my_volume")
|
||||
assert.False(t, ok)
|
||||
|
||||
// schema name is interpolated.
|
||||
b.Config.Resources.Volumes["foo"].SchemaName = "${resources.schemas.my_schema.name}"
|
||||
path, locations, ok = findVolumeInBundle(b, "main", "valuedoesnotmatter", "my_volume")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, []dyn.Location{{
|
||||
File: "volume.yml",
|
||||
Line: 1,
|
||||
Column: 2,
|
||||
}}, locations)
|
||||
assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path)
|
||||
}
|
||||
|
||||
func TestFilerForVolumeForErrorFromAPI(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/main/my_schema/my_volume",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}})
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(fmt.Errorf("error from API"))
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
_, _, diags := filerForVolume(context.Background(), b)
|
||||
assert.Equal(t, diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "unable to determine if volume at /Volumes/main/my_schema/my_volume exists: error from API",
|
||||
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
}}, diags)
|
||||
}
|
||||
|
||||
func TestFilerForVolumeWithVolumeNotFound(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/main/my_schema/doesnotexist",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}})
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/doesnotexist").Return(apierr.NotFound("some error message"))
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
_, _, diags := filerForVolume(context.Background(), b)
|
||||
assert.Equal(t, diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "volume /Volumes/main/my_schema/doesnotexist does not exist: some error message",
|
||||
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
}}, diags)
|
||||
}
|
||||
|
||||
func TestFilerForVolumeNotFoundAndInBundle(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volumes/main/my_schema/my_volume",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Volumes: map[string]*resources.Volume{
|
||||
"foo": {
|
||||
CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{
|
||||
CatalogName: "main",
|
||||
Name: "my_volume",
|
||||
VolumeType: "MANAGED",
|
||||
SchemaName: "my_schema",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}})
|
||||
bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{{File: "volume.yml", Line: 1, Column: 2}})
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(apierr.NotFound("error from API"))
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
_, _, diags := GetFilerForLibraries(context.Background(), b)
|
||||
assert.Equal(t, diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: "volume /Volumes/main/my_schema/my_volume does not exist: error from API",
|
||||
Locations: []dyn.Location{{"config.yml", 1, 2}, {"volume.yml", 1, 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path"), dyn.MustPathFromString("resources.volumes.foo")},
|
||||
Detail: `You are using a volume in your artifact_path that is managed by
|
||||
this bundle but which has not been deployed yet. Please first deploy
|
||||
the volume using 'bundle deploy' and then switch over to using it in
|
||||
the artifact_path.`,
|
||||
},
|
||||
}, diags)
|
||||
}
|
||||
|
||||
func invalidVolumePaths() []string {
|
||||
return []string{
|
||||
"/Volumes/",
|
||||
"/Volumes/main",
|
||||
"/Volumes/main/",
|
||||
"/Volumes/main//",
|
||||
"/Volumes/main//my_schema",
|
||||
"/Volumes/main/my_schema",
|
||||
"/Volumes/main/my_schema/",
|
||||
"/Volumes/main/my_schema//",
|
||||
"/Volumes//my_schema/my_volume",
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilerForVolumeWithInvalidVolumePaths(t *testing.T) {
|
||||
for _, p := range invalidVolumePaths() {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: p,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}})
|
||||
|
||||
_, _, diags := GetFilerForLibraries(context.Background(), b)
|
||||
require.Equal(t, diags, diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p),
|
||||
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
}})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilerForVolumeWithInvalidPrefix(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Volume/main/my_schema/my_volume",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, _, diags := filerForVolume(context.Background(), b)
|
||||
require.EqualError(t, diags.Error(), "expected artifact_path to start with /Volumes/, got /Volume/main/my_schema/my_volume")
|
||||
}
|
||||
|
||||
func TestFilerForVolumeWithValidVolumePaths(t *testing.T) {
|
||||
validPaths := []string{
|
||||
"/Volumes/main/my_schema/my_volume",
|
||||
"/Volumes/main/my_schema/my_volume/",
|
||||
"/Volumes/main/my_schema/my_volume/a/b/c",
|
||||
"/Volumes/main/my_schema/my_volume/a/a/a",
|
||||
}
|
||||
|
||||
for _, p := range validPaths {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: p,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(nil)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
client, uploadPath, diags := filerForVolume(context.Background(), b)
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, path.Join(p, ".internal"), uploadPath)
|
||||
assert.IsType(t, &filer.FilesClient{}, client)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractVolumeFromPath(t *testing.T) {
|
||||
catalogName, schemaName, volumeName, err := extractVolumeFromPath("/Volumes/main/my_schema/my_volume")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "main", catalogName)
|
||||
assert.Equal(t, "my_schema", schemaName)
|
||||
assert.Equal(t, "my_volume", volumeName)
|
||||
|
||||
for _, p := range invalidVolumePaths() {
|
||||
_, _, _, err := extractVolumeFromPath(p)
|
||||
assert.EqualError(t, err, fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
)
|
||||
|
||||
func filerForWorkspace(b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) {
|
||||
uploadPath := path.Join(b.Config.Workspace.ArtifactPath, InternalDirName)
|
||||
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath)
|
||||
return f, uploadPath, diag.FromErr(err)
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFilerForWorkspace(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
ArtifactPath: "/Workspace/Users/shreyas.goenka@databricks.com/a/b/c",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
client, uploadPath, diags := filerForWorkspace(b)
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, path.Join("/Workspace/Users/shreyas.goenka@databricks.com/a/b/c/.internal"), uploadPath)
|
||||
assert.IsType(t, &filer.WorkspaceFilesClient{}, client)
|
||||
}
|
|
@ -16,8 +16,6 @@ import (
|
|||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
|
@ -130,24 +128,17 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error
|
|||
}
|
||||
|
||||
func (u *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
uploadPath, err := GetUploadBasePath(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
client, uploadPath, diags := GetFilerForLibraries(ctx, b)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
// If the client is not initialized, initialize it
|
||||
// We use client field in mutator to allow for mocking client in testing
|
||||
// Only set the filer client if it's not already set. We use the client field
|
||||
// in the mutator to mock the filer client in testing
|
||||
if u.client == nil {
|
||||
filer, err := GetFilerForLibraries(b.WorkspaceClient(), uploadPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
u.client = filer
|
||||
u.client = client
|
||||
}
|
||||
|
||||
var diags diag.Diagnostics
|
||||
|
||||
libs, err := collectLocalLibraries(b)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
@ -197,17 +188,6 @@ func (u *upload) Name() string {
|
|||
return "libraries.Upload"
|
||||
}
|
||||
|
||||
func GetFilerForLibraries(w *databricks.WorkspaceClient, uploadPath string) (filer.Filer, error) {
|
||||
if isVolumesPath(uploadPath) {
|
||||
return filer.NewFilesClient(w, uploadPath)
|
||||
}
|
||||
return filer.NewWorkspaceFilesClient(w, uploadPath)
|
||||
}
|
||||
|
||||
func isVolumesPath(path string) bool {
|
||||
return strings.HasPrefix(path, "/Volumes/")
|
||||
}
|
||||
|
||||
// Function to upload file (a library, artifact and etc) to Workspace or UC volume
|
||||
func UploadFile(ctx context.Context, file string, client filer.Filer) error {
|
||||
filename := filepath.Base(file)
|
||||
|
@ -227,12 +207,3 @@ func UploadFile(ctx context.Context, file string, client filer.Filer) error {
|
|||
log.Infof(ctx, "Upload succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetUploadBasePath(b *bundle.Bundle) (string, error) {
|
||||
artifactPath := b.Config.Workspace.ArtifactPath
|
||||
if artifactPath == "" {
|
||||
return "", fmt.Errorf("remote artifact path not configured")
|
||||
}
|
||||
|
||||
return path.Join(artifactPath, ".internal"), nil
|
||||
}
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
mockfiler "github.com/databricks/cli/internal/mocks/libs/filer"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
@ -181,6 +183,11 @@ func TestArtifactUploadForVolumes(t *testing.T) {
|
|||
filer.CreateParentDirectories,
|
||||
).Return(nil)
|
||||
|
||||
m := mocks.NewMockWorkspaceClient(t)
|
||||
m.WorkspaceClient.Config = &sdkconfig.Config{}
|
||||
m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/foo/bar/artifacts").Return(nil)
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler)))
|
||||
require.NoError(t, diags.Error())
|
||||
|
||||
|
|
|
@ -23,10 +23,10 @@ import (
|
|||
tfjson "github.com/hashicorp/terraform-json"
|
||||
)
|
||||
|
||||
func parseTerraformActions(changes []*tfjson.ResourceChange, toInclude func(typ string, actions tfjson.Actions) bool) []terraformlib.Action {
|
||||
func filterDeleteOrRecreateActions(changes []*tfjson.ResourceChange, resourceType string) []terraformlib.Action {
|
||||
res := make([]terraformlib.Action, 0)
|
||||
for _, rc := range changes {
|
||||
if !toInclude(rc.Type, rc.Change.Actions) {
|
||||
if rc.Type != resourceType {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ func parseTerraformActions(changes []*tfjson.ResourceChange, toInclude func(typ
|
|||
case rc.Change.Actions.Replace():
|
||||
actionType = terraformlib.ActionTypeRecreate
|
||||
default:
|
||||
// No use case for other action types yet.
|
||||
// Filter other action types..
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -63,30 +63,12 @@ func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
|
||||
schemaActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool {
|
||||
// Filter in only UC schema resources.
|
||||
if typ != "databricks_schema" {
|
||||
return false
|
||||
}
|
||||
|
||||
// We only display prompts for destructive actions like deleting or
|
||||
// recreating a schema.
|
||||
return actions.Delete() || actions.Replace()
|
||||
})
|
||||
|
||||
dltActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool {
|
||||
// Filter in only DLT pipeline resources.
|
||||
if typ != "databricks_pipeline" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Recreating DLT pipeline leads to metadata loss and for a transient period
|
||||
// the underling tables will be unavailable.
|
||||
return actions.Replace() || actions.Delete()
|
||||
})
|
||||
schemaActions := filterDeleteOrRecreateActions(plan.ResourceChanges, "databricks_schema")
|
||||
dltActions := filterDeleteOrRecreateActions(plan.ResourceChanges, "databricks_pipeline")
|
||||
volumeActions := filterDeleteOrRecreateActions(plan.ResourceChanges, "databricks_volume")
|
||||
|
||||
// We don't need to display any prompts in this case.
|
||||
if len(dltActions) == 0 && len(schemaActions) == 0 {
|
||||
if len(schemaActions) == 0 && len(dltActions) == 0 && len(volumeActions) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -111,6 +93,19 @@ properties such as the 'catalog' or 'storage' are changed:`
|
|||
}
|
||||
}
|
||||
|
||||
// One or more volumes is being recreated.
|
||||
if len(volumeActions) != 0 {
|
||||
msg := `
|
||||
This action will result in the deletion or recreation of the following volumes.
|
||||
For managed volumes, the files stored in the volume are also deleted from your
|
||||
cloud tenant within 30 days. For external volumes, the metadata about the volume
|
||||
is removed from the catalog, but the underlying files are not deleted:`
|
||||
cmdio.LogString(ctx, msg)
|
||||
for _, action := range volumeActions {
|
||||
cmdio.Log(ctx, action)
|
||||
}
|
||||
}
|
||||
|
||||
if b.AutoApprove {
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -40,17 +40,7 @@ func TestParseTerraformActions(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
res := parseTerraformActions(changes, func(typ string, actions tfjson.Actions) bool {
|
||||
if typ != "databricks_pipeline" {
|
||||
return false
|
||||
}
|
||||
|
||||
if actions.Delete() || actions.Replace() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
res := filterDeleteOrRecreateActions(changes, "databricks_pipeline")
|
||||
|
||||
assert.Equal(t, []terraformlib.Action{
|
||||
{
|
||||
|
|
|
@ -42,6 +42,7 @@ func Initialize() bundle.Mutator {
|
|||
|
||||
mutator.InitializeWorkspaceClient(),
|
||||
mutator.PopulateCurrentUser(),
|
||||
mutator.LoadGitDetails(),
|
||||
|
||||
mutator.DefineDefaultWorkspaceRoot(),
|
||||
mutator.ExpandWorkspaceRoot(),
|
||||
|
@ -70,6 +71,7 @@ func Initialize() bundle.Mutator {
|
|||
mutator.SetRunAs(),
|
||||
mutator.OverrideCompute(),
|
||||
mutator.ConfigureDashboardDefaults(),
|
||||
mutator.ConfigureVolumeDefaults(),
|
||||
mutator.ProcessTargetMode(),
|
||||
mutator.ApplyPresets(),
|
||||
mutator.DefaultQueueing(),
|
||||
|
|
|
@ -791,6 +791,51 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"resources.Volume": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"catalog_name": {
|
||||
"description": "The name of the catalog where the schema and the volume are",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"comment": {
|
||||
"description": "The comment attached to the volume",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"grants": {
|
||||
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Grant"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the volume",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"schema_name": {
|
||||
"description": "The name of the schema where the volume is",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"storage_location": {
|
||||
"description": "The storage location on the cloud",
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"volume_type": {
|
||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.VolumeType"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"catalog_name",
|
||||
"name",
|
||||
"schema_name"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"variable.Lookup": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
@ -963,6 +1008,9 @@
|
|||
},
|
||||
"name": {
|
||||
"$ref": "#/$defs/string"
|
||||
},
|
||||
"uuid": {
|
||||
"$ref": "#/$defs/string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -1157,6 +1205,9 @@
|
|||
},
|
||||
"schemas": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Schema"
|
||||
},
|
||||
"volumes": {
|
||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Volume"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
|
@ -1558,6 +1609,13 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"catalog.VolumeType": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"EXTERNAL",
|
||||
"MANAGED"
|
||||
]
|
||||
},
|
||||
"compute.Adlsgen2Info": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
@ -5565,6 +5623,20 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"resources.Volume": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Volume"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"variable.TargetVariable": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
|
|
@ -1,15 +1,19 @@
|
|||
package config_tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGitAutoLoadWithEnvironment(t *testing.T) {
|
||||
b := load(t, "./environments_autoload_git")
|
||||
bundle.Apply(context.Background(), b, mutator.LoadGitDetails())
|
||||
assert.True(t, b.Config.Bundle.Git.Inferred)
|
||||
validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
|
||||
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
|
||||
|
@ -17,6 +21,7 @@ func TestGitAutoLoadWithEnvironment(t *testing.T) {
|
|||
|
||||
func TestGitManuallySetBranchWithEnvironment(t *testing.T) {
|
||||
b := loadTarget(t, "./environments_autoload_git", "production")
|
||||
bundle.Apply(context.Background(), b, mutator.LoadGitDetails())
|
||||
assert.False(t, b.Config.Bundle.Git.Inferred)
|
||||
assert.Equal(t, "main", b.Config.Bundle.Git.Branch)
|
||||
validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
|
||||
func TestGitAutoLoad(t *testing.T) {
|
||||
b := load(t, "./autoload_git")
|
||||
bundle.Apply(context.Background(), b, mutator.LoadGitDetails())
|
||||
assert.True(t, b.Config.Bundle.Git.Inferred)
|
||||
validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
|
||||
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
|
||||
|
@ -21,6 +22,7 @@ func TestGitAutoLoad(t *testing.T) {
|
|||
|
||||
func TestGitManuallySetBranch(t *testing.T) {
|
||||
b := loadTarget(t, "./autoload_git", "production")
|
||||
bundle.Apply(context.Background(), b, mutator.LoadGitDetails())
|
||||
assert.False(t, b.Config.Bundle.Git.Inferred)
|
||||
assert.Equal(t, "main", b.Config.Bundle.Git.Branch)
|
||||
validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
|
||||
|
@ -34,6 +36,7 @@ func TestGitBundleBranchValidation(t *testing.T) {
|
|||
})
|
||||
|
||||
b := load(t, "./git_branch_validation")
|
||||
bundle.Apply(context.Background(), b, mutator.LoadGitDetails())
|
||||
assert.False(t, b.Config.Bundle.Git.Inferred)
|
||||
assert.Equal(t, "feature-a", b.Config.Bundle.Git.Branch)
|
||||
assert.Equal(t, "feature-b", b.Config.Bundle.Git.ActualBranch)
|
||||
|
|
|
@ -12,6 +12,8 @@ import (
|
|||
"github.com/databricks/cli/bundle/deploy/files"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/cli/libs/git"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -37,6 +39,7 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b *
|
|||
|
||||
opts.Full = f.full
|
||||
opts.PollInterval = f.interval
|
||||
opts.WorktreeRoot = b.WorktreeRoot
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
|
@ -60,11 +63,30 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn
|
|||
}
|
||||
}
|
||||
|
||||
ctx := cmd.Context()
|
||||
client := root.WorkspaceClient(ctx)
|
||||
|
||||
localRoot := vfs.MustNew(args[0])
|
||||
info, err := git.FetchRepositoryInfo(ctx, localRoot.Native(), client)
|
||||
|
||||
if err != nil {
|
||||
log.Warnf(ctx, "Failed to read git info: %s", err)
|
||||
}
|
||||
|
||||
var worktreeRoot vfs.Path
|
||||
|
||||
if info.WorktreeRoot == "" {
|
||||
worktreeRoot = localRoot
|
||||
} else {
|
||||
worktreeRoot = vfs.MustNew(info.WorktreeRoot)
|
||||
}
|
||||
|
||||
opts := sync.SyncOptions{
|
||||
LocalRoot: vfs.MustNew(args[0]),
|
||||
Paths: []string{"."},
|
||||
Include: nil,
|
||||
Exclude: nil,
|
||||
WorktreeRoot: worktreeRoot,
|
||||
LocalRoot: localRoot,
|
||||
Paths: []string{"."},
|
||||
Include: nil,
|
||||
Exclude: nil,
|
||||
|
||||
RemotePath: args[1],
|
||||
Full: f.full,
|
||||
|
@ -75,7 +97,7 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn
|
|||
// The sync code will automatically create this directory if it doesn't
|
||||
// exist and add it to the `.gitignore` file in the root.
|
||||
SnapshotBasePath: filepath.Join(args[0], ".databricks"),
|
||||
WorkspaceClient: root.WorkspaceClient(cmd.Context()),
|
||||
WorkspaceClient: client,
|
||||
|
||||
OutputHandler: outputHandler,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -13,8 +14,11 @@ import (
|
|||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/internal"
|
||||
"github.com/databricks/cli/internal/acc"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -225,3 +229,84 @@ func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) {
|
|||
b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl,
|
||||
)
|
||||
}
|
||||
|
||||
func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) {
|
||||
ctx, wt := acc.UcWorkspaceTest(t)
|
||||
w := wt.W
|
||||
|
||||
schemaName := internal.RandomName("schema-")
|
||||
|
||||
_, err := w.Schemas.Create(ctx, catalog.CreateSchema{
|
||||
CatalogName: "main",
|
||||
Comment: "test schema",
|
||||
Name: schemaName,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = w.Schemas.DeleteByFullName(ctx, "main."+schemaName)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
||||
"unique_id": uuid.New().String(),
|
||||
"schema_name": schemaName,
|
||||
"volume_name": "doesnotexist",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||
stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/doesnotexist does not exist: Not Found
|
||||
at workspace.artifact_path
|
||||
in databricks.yml:6:18
|
||||
|
||||
`, schemaName), stdout.String())
|
||||
assert.Equal(t, "", stderr.String())
|
||||
}
|
||||
|
||||
func TestAccUploadArtifactToVolumeNotYetDeployed(t *testing.T) {
|
||||
ctx, wt := acc.UcWorkspaceTest(t)
|
||||
w := wt.W
|
||||
|
||||
schemaName := internal.RandomName("schema-")
|
||||
|
||||
_, err := w.Schemas.Create(ctx, catalog.CreateSchema{
|
||||
CatalogName: "main",
|
||||
Comment: "test schema",
|
||||
Name: schemaName,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
err = w.Schemas.DeleteByFullName(ctx, "main."+schemaName)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{
|
||||
"unique_id": uuid.New().String(),
|
||||
"schema_name": schemaName,
|
||||
"volume_name": "my_volume",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||
stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/my_volume does not exist: Not Found
|
||||
at workspace.artifact_path
|
||||
resources.volumes.foo
|
||||
in databricks.yml:6:18
|
||||
databricks.yml:11:7
|
||||
|
||||
You are using a volume in your artifact_path that is managed by
|
||||
this bundle but which has not been deployed yet. Please first deploy
|
||||
the volume using 'bundle deploy' and then switch over to using it in
|
||||
the artifact_path.
|
||||
|
||||
`, schemaName), stdout.String())
|
||||
assert.Equal(t, "", stderr.String())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"properties": {
|
||||
"unique_id": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for job name"
|
||||
},
|
||||
"schema_name": {
|
||||
"type": "string",
|
||||
"description": "schema name to use in the artifact_path"
|
||||
},
|
||||
"volume_name": {
|
||||
"type": "string",
|
||||
"description": "volume name to use in the artifact_path"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
bundle:
|
||||
name: artifact_path_with_volume
|
||||
|
||||
workspace:
|
||||
root_path: "~/.bundle/{{.unique_id}}"
|
||||
artifact_path: /Volumes/main/{{.schema_name}}/{{.volume_name}}
|
||||
|
||||
resources:
|
||||
volumes:
|
||||
foo:
|
||||
catalog_name: main
|
||||
name: my_volume
|
||||
schema_name: {{.schema_name}}
|
||||
volume_type: MANAGED
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"properties": {
|
||||
"unique_id": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for the schema names"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
bundle:
|
||||
name: test-uc-volumes-{{.unique_id}}
|
||||
|
||||
variables:
|
||||
schema_name:
|
||||
default: ${resources.schemas.schema1.name}
|
||||
|
||||
resources:
|
||||
schemas:
|
||||
schema1:
|
||||
name: schema1-{{.unique_id}}
|
||||
catalog_name: main
|
||||
comment: This schema was created from DABs
|
||||
|
||||
schema2:
|
||||
name: schema2-{{.unique_id}}
|
||||
catalog_name: main
|
||||
comment: This schema was created from DABs
|
||||
|
||||
volumes:
|
||||
foo:
|
||||
catalog_name: main
|
||||
name: my_volume
|
||||
schema_name: ${var.schema_name}
|
||||
volume_type: MANAGED
|
||||
comment: This volume was created from DABs.
|
||||
|
||||
grants:
|
||||
- principal: account users
|
||||
privileges:
|
||||
- WRITE_VOLUME
|
|
@ -0,0 +1,2 @@
|
|||
-- Databricks notebook source
|
||||
select 1
|
|
@ -243,3 +243,73 @@ func TestAccDeployBasicBundleLogs(t *testing.T) {
|
|||
}, "\n"), stderr)
|
||||
assert.Equal(t, "", stdout)
|
||||
}
|
||||
|
||||
func TestAccDeployUcVolume(t *testing.T) {
|
||||
ctx, wt := acc.UcWorkspaceTest(t)
|
||||
w := wt.W
|
||||
|
||||
uniqueId := uuid.New().String()
|
||||
bundleRoot, err := initTestTemplate(t, ctx, "volume", map[string]any{
|
||||
"unique_id": uniqueId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deployBundle(t, ctx, bundleRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
destroyBundle(t, ctx, bundleRoot)
|
||||
})
|
||||
|
||||
// Assert the volume is created successfully
|
||||
catalogName := "main"
|
||||
schemaName := "schema1-" + uniqueId
|
||||
volumeName := "my_volume"
|
||||
fullName := fmt.Sprintf("%s.%s.%s", catalogName, schemaName, volumeName)
|
||||
volume, err := w.Volumes.ReadByName(ctx, fullName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, volume.Name, volumeName)
|
||||
require.Equal(t, catalogName, volume.CatalogName)
|
||||
require.Equal(t, schemaName, volume.SchemaName)
|
||||
|
||||
// Assert that the grants were successfully applied.
|
||||
grants, err := w.Grants.GetBySecurableTypeAndFullName(ctx, catalog.SecurableTypeVolume, fullName)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, grants.PrivilegeAssignments, 1)
|
||||
assert.Equal(t, "account users", grants.PrivilegeAssignments[0].Principal)
|
||||
assert.Equal(t, []catalog.Privilege{catalog.PrivilegeWriteVolume}, grants.PrivilegeAssignments[0].Privileges)
|
||||
|
||||
// Recreation of the volume without --auto-approve should fail since prompting is not possible
|
||||
t.Setenv("TERM", "dumb")
|
||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||
stdout, stderr, err := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following volumes.
|
||||
For managed volumes, the files stored in the volume are also deleted from your
|
||||
cloud tenant within 30 days. For external volumes, the metadata about the volume
|
||||
is removed from the catalog, but the underlying files are not deleted:
|
||||
recreate volume foo`)
|
||||
assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||
|
||||
// Successfully recreate the volume with --auto-approve
|
||||
t.Setenv("TERM", "dumb")
|
||||
t.Setenv("BUNDLE_ROOT", bundleRoot)
|
||||
_, _, err = internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Assert the volume is updated successfully
|
||||
schemaName = "schema2-" + uniqueId
|
||||
fullName = fmt.Sprintf("%s.%s.%s", catalogName, schemaName, volumeName)
|
||||
volume, err = w.Volumes.ReadByName(ctx, fullName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, volume.Name, volumeName)
|
||||
require.Equal(t, catalogName, volume.CatalogName)
|
||||
require.Equal(t, schemaName, volume.SchemaName)
|
||||
|
||||
// assert that the grants were applied / retained on recreate.
|
||||
grants, err = w.Grants.GetBySecurableTypeAndFullName(ctx, catalog.SecurableTypeVolume, fullName)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, grants.PrivilegeAssignments, 1)
|
||||
assert.Equal(t, "account users", grants.PrivilegeAssignments[0].Principal)
|
||||
assert.Equal(t, []catalog.Privilege{catalog.PrivilegeWriteVolume}, grants.PrivilegeAssignments[0].Privileges)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/internal/acc"
|
||||
"github.com/databricks/cli/libs/dbr"
|
||||
"github.com/databricks/cli/libs/git"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const examplesRepoUrl = "https://github.com/databricks/bundle-examples"
|
||||
const examplesRepoProvider = "gitHub"
|
||||
|
||||
func assertFullGitInfo(t *testing.T, expectedRoot string, info git.RepositoryInfo) {
|
||||
assert.Equal(t, "main", info.CurrentBranch)
|
||||
assert.NotEmpty(t, info.LatestCommit)
|
||||
assert.Equal(t, examplesRepoUrl, info.OriginURL)
|
||||
assert.Equal(t, expectedRoot, info.WorktreeRoot)
|
||||
}
|
||||
|
||||
func assertEmptyGitInfo(t *testing.T, info git.RepositoryInfo) {
|
||||
assertSparseGitInfo(t, "", info)
|
||||
}
|
||||
|
||||
func assertSparseGitInfo(t *testing.T, expectedRoot string, info git.RepositoryInfo) {
|
||||
assert.Equal(t, "", info.CurrentBranch)
|
||||
assert.Equal(t, "", info.LatestCommit)
|
||||
assert.Equal(t, "", info.OriginURL)
|
||||
assert.Equal(t, expectedRoot, info.WorktreeRoot)
|
||||
}
|
||||
|
||||
func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) {
|
||||
ctx, wt := acc.WorkspaceTest(t)
|
||||
me, err := wt.W.CurrentUser.Me(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
targetPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "/testing-clone-bundle-examples-"))
|
||||
stdout, stderr := RequireSuccessfulRun(t, "repos", "create", examplesRepoUrl, examplesRepoProvider, "--path", targetPath)
|
||||
t.Cleanup(func() {
|
||||
RequireSuccessfulRun(t, "repos", "delete", targetPath)
|
||||
})
|
||||
|
||||
assert.Empty(t, stderr.String())
|
||||
assert.NotEmpty(t, stdout.String())
|
||||
ctx = dbr.MockRuntime(ctx, true)
|
||||
|
||||
for _, inputPath := range []string{
|
||||
path.Join(targetPath, "knowledge_base/dashboard_nyc_taxi"),
|
||||
targetPath,
|
||||
} {
|
||||
t.Run(inputPath, func(t *testing.T) {
|
||||
info, err := git.FetchRepositoryInfo(ctx, inputPath, wt.W)
|
||||
assert.NoError(t, err)
|
||||
assertFullGitInfo(t, targetPath, info)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) {
|
||||
ctx, wt := acc.WorkspaceTest(t)
|
||||
me, err := wt.W.CurrentUser.Me(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
rootPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "testing-nonrepo-"))
|
||||
_, stderr := RequireSuccessfulRun(t, "workspace", "mkdirs", path.Join(rootPath, "a/b/c"))
|
||||
t.Cleanup(func() {
|
||||
RequireSuccessfulRun(t, "workspace", "delete", "--recursive", rootPath)
|
||||
})
|
||||
|
||||
assert.Empty(t, stderr.String())
|
||||
ctx = dbr.MockRuntime(ctx, true)
|
||||
|
||||
tests := []struct {
|
||||
input string
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
input: path.Join(rootPath, "a/b/c"),
|
||||
msg: "",
|
||||
},
|
||||
{
|
||||
input: rootPath,
|
||||
msg: "",
|
||||
},
|
||||
{
|
||||
input: path.Join(rootPath, "/non-existent"),
|
||||
msg: "doesn't exist",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input+" <==> "+test.msg, func(t *testing.T) {
|
||||
info, err := git.FetchRepositoryInfo(ctx, test.input, wt.W)
|
||||
if test.msg == "" {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), test.msg)
|
||||
}
|
||||
assertEmptyGitInfo(t, info)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccFetchRepositoryInfoDotGit_FromGitRepo(t *testing.T) {
|
||||
ctx, wt := acc.WorkspaceTest(t)
|
||||
|
||||
repo := cloneRepoLocally(t, examplesRepoUrl)
|
||||
|
||||
for _, inputPath := range []string{
|
||||
filepath.Join(repo, "knowledge_base/dashboard_nyc_taxi"),
|
||||
repo,
|
||||
} {
|
||||
t.Run(inputPath, func(t *testing.T) {
|
||||
info, err := git.FetchRepositoryInfo(ctx, inputPath, wt.W)
|
||||
assert.NoError(t, err)
|
||||
assertFullGitInfo(t, repo, info)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func cloneRepoLocally(t *testing.T, repoUrl string) string {
|
||||
tempDir := t.TempDir()
|
||||
localRoot := filepath.Join(tempDir, "repo")
|
||||
|
||||
cmd := exec.Command("git", "clone", "--depth=1", examplesRepoUrl, localRoot)
|
||||
err := cmd.Run()
|
||||
require.NoError(t, err)
|
||||
return localRoot
|
||||
}
|
||||
|
||||
func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) {
|
||||
ctx, wt := acc.WorkspaceTest(t)
|
||||
|
||||
tempDir := t.TempDir()
|
||||
root := filepath.Join(tempDir, "repo")
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(root, "a/b/c"), 0700))
|
||||
|
||||
tests := []string{
|
||||
filepath.Join(root, "a/b/c"),
|
||||
root,
|
||||
filepath.Join(root, "/non-existent"),
|
||||
}
|
||||
|
||||
for _, input := range tests {
|
||||
t.Run(input, func(t *testing.T) {
|
||||
info, err := git.FetchRepositoryInfo(ctx, input, wt.W)
|
||||
assert.NoError(t, err)
|
||||
assertEmptyGitInfo(t, info)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccFetchRepositoryInfoDotGit_FromBrokenGitRepo(t *testing.T) {
|
||||
ctx, wt := acc.WorkspaceTest(t)
|
||||
|
||||
tempDir := t.TempDir()
|
||||
root := filepath.Join(tempDir, "repo")
|
||||
path := filepath.Join(root, "a/b/c")
|
||||
require.NoError(t, os.MkdirAll(path, 0700))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(root, ".git"), []byte(""), 0000))
|
||||
|
||||
info, err := git.FetchRepositoryInfo(ctx, path, wt.W)
|
||||
assert.NoError(t, err)
|
||||
assertSparseGitInfo(t, root, info)
|
||||
}
|
|
@ -250,7 +250,7 @@ func (t *cobraTestRunner) RunBackground() {
|
|||
// Reset context on command for the next test.
|
||||
// These commands are globals so we have to clean up to the best of our ability after each run.
|
||||
// See https://github.com/spf13/cobra/blob/a6f198b635c4b18fff81930c40d464904e55b161/command.go#L1062-L1066
|
||||
//lint:ignore SA1012 cobra sets the context and doesn't clear it
|
||||
//nolint:staticcheck // cobra sets the context and doesn't clear it
|
||||
cli.SetContext(nil)
|
||||
|
||||
// Make caller aware of error.
|
||||
|
|
|
@ -53,6 +53,19 @@ func FromErr(err error) Diagnostics {
|
|||
}
|
||||
}
|
||||
|
||||
// FromErr returns a new warning diagnostic from the specified error, if any.
|
||||
func WarningFromErr(err error) Diagnostics {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return []Diagnostic{
|
||||
{
|
||||
Severity: Warning,
|
||||
Summary: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Warningf creates a new warning diagnostic.
|
||||
func Warningf(format string, args ...any) Diagnostics {
|
||||
return []Diagnostic{
|
||||
|
|
|
@ -71,3 +71,23 @@ func (v ref) references() []string {
|
|||
func IsPureVariableReference(s string) bool {
|
||||
return len(s) > 0 && re.FindString(s) == s
|
||||
}
|
||||
|
||||
// If s is a pure variable reference, this function returns the corresponding
|
||||
// dyn.Path. Otherwise, it returns false.
|
||||
func PureReferenceToPath(s string) (dyn.Path, bool) {
|
||||
ref, ok := newRef(dyn.V(s))
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if !ref.isPure() {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
p, err := dyn.NewPathFromString(ref.references()[0])
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return p, true
|
||||
}
|
||||
|
|
|
@ -51,3 +51,34 @@ func TestIsPureVariableReference(t *testing.T) {
|
|||
assert.False(t, IsPureVariableReference("prefix ${foo.bar}"))
|
||||
assert.True(t, IsPureVariableReference("${foo.bar}"))
|
||||
}
|
||||
|
||||
func TestPureReferenceToPath(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
in string
|
||||
out string
|
||||
ok bool
|
||||
}{
|
||||
{"${foo.bar}", "foo.bar", true},
|
||||
{"${foo.bar.baz}", "foo.bar.baz", true},
|
||||
{"${foo.bar.baz[0]}", "foo.bar.baz[0]", true},
|
||||
{"${foo.bar.baz[0][1]}", "foo.bar.baz[0][1]", true},
|
||||
{"${foo.bar.baz[0][1].qux}", "foo.bar.baz[0][1].qux", true},
|
||||
|
||||
{"${foo.one}${foo.two}", "", false},
|
||||
{"prefix ${foo.bar}", "", false},
|
||||
{"${foo.bar} suffix", "", false},
|
||||
{"${foo.bar", "", false},
|
||||
{"foo.bar}", "", false},
|
||||
{"foo.bar", "", false},
|
||||
{"{foo.bar}", "", false},
|
||||
{"", "", false},
|
||||
} {
|
||||
path, ok := PureReferenceToPath(tc.in)
|
||||
if tc.ok {
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, dyn.MustPathFromString(tc.out), path)
|
||||
} else {
|
||||
assert.False(t, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ type apiClient interface {
|
|||
|
||||
// NOTE: This API is available for files under /Repos if a workspace has files-in-repos enabled.
|
||||
// It can access any workspace path if files-in-workspace is enabled.
|
||||
type workspaceFilesClient struct {
|
||||
type WorkspaceFilesClient struct {
|
||||
workspaceClient *databricks.WorkspaceClient
|
||||
apiClient apiClient
|
||||
|
||||
|
@ -128,7 +128,7 @@ func NewWorkspaceFilesClient(w *databricks.WorkspaceClient, root string) (Filer,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &workspaceFilesClient{
|
||||
return &WorkspaceFilesClient{
|
||||
workspaceClient: w,
|
||||
apiClient: apiClient,
|
||||
|
||||
|
@ -136,7 +136,7 @@ func NewWorkspaceFilesClient(w *databricks.WorkspaceClient, root string) (Filer,
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (w *workspaceFilesClient) Write(ctx context.Context, name string, reader io.Reader, mode ...WriteMode) error {
|
||||
func (w *WorkspaceFilesClient) Write(ctx context.Context, name string, reader io.Reader, mode ...WriteMode) error {
|
||||
absPath, err := w.root.Join(name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -214,7 +214,7 @@ func (w *workspaceFilesClient) Write(ctx context.Context, name string, reader io
|
|||
return err
|
||||
}
|
||||
|
||||
func (w *workspaceFilesClient) Read(ctx context.Context, name string) (io.ReadCloser, error) {
|
||||
func (w *WorkspaceFilesClient) Read(ctx context.Context, name string) (io.ReadCloser, error) {
|
||||
absPath, err := w.root.Join(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -238,7 +238,7 @@ func (w *workspaceFilesClient) Read(ctx context.Context, name string) (io.ReadCl
|
|||
return w.workspaceClient.Workspace.Download(ctx, absPath)
|
||||
}
|
||||
|
||||
func (w *workspaceFilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error {
|
||||
func (w *WorkspaceFilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error {
|
||||
absPath, err := w.root.Join(name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -282,7 +282,7 @@ func (w *workspaceFilesClient) Delete(ctx context.Context, name string, mode ...
|
|||
return err
|
||||
}
|
||||
|
||||
func (w *workspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) {
|
||||
func (w *WorkspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) {
|
||||
absPath, err := w.root.Join(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -315,7 +315,7 @@ func (w *workspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.D
|
|||
return wsfsDirEntriesFromObjectInfos(objects), nil
|
||||
}
|
||||
|
||||
func (w *workspaceFilesClient) Mkdir(ctx context.Context, name string) error {
|
||||
func (w *WorkspaceFilesClient) Mkdir(ctx context.Context, name string) error {
|
||||
dirPath, err := w.root.Join(name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -325,7 +325,7 @@ func (w *workspaceFilesClient) Mkdir(ctx context.Context, name string) error {
|
|||
})
|
||||
}
|
||||
|
||||
func (w *workspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
|
||||
func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
|
||||
absPath, err := w.root.Join(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -174,7 +174,7 @@ func TestFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) {
|
|||
"return_export_info": "true",
|
||||
}, mock.AnythingOfType("*filer.wsfsFileInfo"), []func(*http.Request) error(nil)).Return(nil, statNotebook)
|
||||
|
||||
workspaceFilesClient := workspaceFilesClient{
|
||||
workspaceFilesClient := WorkspaceFilesClient{
|
||||
workspaceClient: mockedWorkspaceClient.WorkspaceClient,
|
||||
apiClient: &mockedApiClient,
|
||||
root: NewWorkspaceRootPath("/dir"),
|
||||
|
|
|
@ -13,10 +13,10 @@ type FileSet struct {
|
|||
view *View
|
||||
}
|
||||
|
||||
// NewFileSet returns [FileSet] for the Git repository located at `root`.
|
||||
func NewFileSet(root vfs.Path, paths ...[]string) (*FileSet, error) {
|
||||
// NewFileSet returns [FileSet] for the directory `root` which is contained within Git worktree located at `worktreeRoot`.
|
||||
func NewFileSet(worktreeRoot, root vfs.Path, paths ...[]string) (*FileSet, error) {
|
||||
fs := fileset.New(root, paths...)
|
||||
v, err := NewView(root)
|
||||
v, err := NewView(worktreeRoot, root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -27,6 +27,10 @@ func NewFileSet(root vfs.Path, paths ...[]string) (*FileSet, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func NewFileSetAtRoot(root vfs.Path, paths ...[]string) (*FileSet, error) {
|
||||
return NewFileSet(root, root, paths...)
|
||||
}
|
||||
|
||||
func (f *FileSet) IgnoreFile(file string) (bool, error) {
|
||||
return f.view.IgnoreFile(file)
|
||||
}
|
||||
|
|
|
@ -12,8 +12,8 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testFileSetAll(t *testing.T, root string) {
|
||||
fileSet, err := NewFileSet(vfs.MustNew(root))
|
||||
func testFileSetAll(t *testing.T, worktreeRoot, root string) {
|
||||
fileSet, err := NewFileSet(vfs.MustNew(worktreeRoot), vfs.MustNew(root))
|
||||
require.NoError(t, err)
|
||||
files, err := fileSet.Files()
|
||||
require.NoError(t, err)
|
||||
|
@ -24,18 +24,28 @@ func testFileSetAll(t *testing.T, root string) {
|
|||
}
|
||||
|
||||
func TestFileSetListAllInRepo(t *testing.T) {
|
||||
testFileSetAll(t, "./testdata")
|
||||
testFileSetAll(t, "./testdata", "./testdata")
|
||||
}
|
||||
|
||||
func TestFileSetListAllInRepoDifferentRoot(t *testing.T) {
|
||||
testFileSetAll(t, ".", "./testdata")
|
||||
}
|
||||
|
||||
func TestFileSetListAllInTempDir(t *testing.T) {
|
||||
testFileSetAll(t, copyTestdata(t, "./testdata"))
|
||||
dir := copyTestdata(t, "./testdata")
|
||||
testFileSetAll(t, dir, dir)
|
||||
}
|
||||
|
||||
func TestFileSetListAllInTempDirDifferentRoot(t *testing.T) {
|
||||
dir := copyTestdata(t, "./testdata")
|
||||
testFileSetAll(t, filepath.Dir(dir), dir)
|
||||
}
|
||||
|
||||
func TestFileSetNonCleanRoot(t *testing.T) {
|
||||
// Test what happens if the root directory can be simplified.
|
||||
// Path simplification is done by most filepath functions.
|
||||
// This should yield the same result as above test.
|
||||
fileSet, err := NewFileSet(vfs.MustNew("./testdata/../testdata"))
|
||||
fileSet, err := NewFileSetAtRoot(vfs.MustNew("./testdata/../testdata"))
|
||||
require.NoError(t, err)
|
||||
files, err := fileSet.Files()
|
||||
require.NoError(t, err)
|
||||
|
@ -44,7 +54,7 @@ func TestFileSetNonCleanRoot(t *testing.T) {
|
|||
|
||||
func TestFileSetAddsCacheDirToGitIgnore(t *testing.T) {
|
||||
projectDir := t.TempDir()
|
||||
fileSet, err := NewFileSet(vfs.MustNew(projectDir))
|
||||
fileSet, err := NewFileSetAtRoot(vfs.MustNew(projectDir))
|
||||
require.NoError(t, err)
|
||||
fileSet.EnsureValidGitIgnoreExists()
|
||||
|
||||
|
@ -59,7 +69,7 @@ func TestFileSetDoesNotCacheDirToGitIgnoreIfAlreadyPresent(t *testing.T) {
|
|||
projectDir := t.TempDir()
|
||||
gitIgnorePath := filepath.Join(projectDir, ".gitignore")
|
||||
|
||||
fileSet, err := NewFileSet(vfs.MustNew(projectDir))
|
||||
fileSet, err := NewFileSetAtRoot(vfs.MustNew(projectDir))
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(gitIgnorePath, []byte(".databricks"), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/libs/dbr"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/vfs"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/client"
|
||||
)
|
||||
|
||||
type RepositoryInfo struct {
|
||||
// Various metadata about the repo. Each could be "" if it could not be read. No error is returned for such case.
|
||||
OriginURL string
|
||||
LatestCommit string
|
||||
CurrentBranch string
|
||||
|
||||
// Absolute path to determined worktree root or "" if worktree root could not be determined.
|
||||
WorktreeRoot string
|
||||
}
|
||||
|
||||
type gitInfo struct {
|
||||
Branch string `json:"branch"`
|
||||
HeadCommitID string `json:"head_commit_id"`
|
||||
Path string `json:"path"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type response struct {
|
||||
GitInfo *gitInfo `json:"git_info,omitempty"`
|
||||
}
|
||||
|
||||
// Fetch repository information either by quering .git or by fetching it from API (for dabs-in-workspace case).
|
||||
// - In case we could not find git repository, all string fields of RepositoryInfo will be "" and err will be nil.
|
||||
// - If there were any errors when trying to determine git root (e.g. API call returned an error or there were permission issues
|
||||
// reading the file system), all strings fields of RepositoryInfo will be "" and err will be non-nil.
|
||||
// - If we could determine git worktree root but there were errors when reading metadata (origin, branch, commit), those errors
|
||||
// will be logged as warnings, RepositoryInfo is guaranteed to have non-empty WorktreeRoot and other fields on best effort basis.
|
||||
// - In successful case, all fields are set to proper git repository metadata.
|
||||
func FetchRepositoryInfo(ctx context.Context, path string, w *databricks.WorkspaceClient) (RepositoryInfo, error) {
|
||||
if strings.HasPrefix(path, "/Workspace/") && dbr.RunsOnRuntime(ctx) {
|
||||
return fetchRepositoryInfoAPI(ctx, path, w)
|
||||
} else {
|
||||
return fetchRepositoryInfoDotGit(ctx, path)
|
||||
}
|
||||
}
|
||||
|
||||
func fetchRepositoryInfoAPI(ctx context.Context, path string, w *databricks.WorkspaceClient) (RepositoryInfo, error) {
|
||||
result := RepositoryInfo{}
|
||||
|
||||
apiClient, err := client.New(w.Config)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
var response response
|
||||
const apiEndpoint = "/api/2.0/workspace/get-status"
|
||||
|
||||
err = apiClient.Do(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
apiEndpoint,
|
||||
nil,
|
||||
map[string]string{
|
||||
"path": path,
|
||||
"return_git_info": "true",
|
||||
},
|
||||
&response,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Check if GitInfo is present and extract relevant fields
|
||||
gi := response.GitInfo
|
||||
if gi != nil {
|
||||
fixedPath := ensureWorkspacePrefix(gi.Path)
|
||||
result.OriginURL = gi.URL
|
||||
result.LatestCommit = gi.HeadCommitID
|
||||
result.CurrentBranch = gi.Branch
|
||||
result.WorktreeRoot = fixedPath
|
||||
} else {
|
||||
log.Warnf(ctx, "Failed to load git info from %s", apiEndpoint)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func ensureWorkspacePrefix(p string) string {
|
||||
if !strings.HasPrefix(p, "/Workspace/") {
|
||||
return path.Join("/Workspace", p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func fetchRepositoryInfoDotGit(ctx context.Context, path string) (RepositoryInfo, error) {
|
||||
result := RepositoryInfo{}
|
||||
|
||||
rootDir, err := findLeafInTree(path, GitDirectoryName)
|
||||
if rootDir == "" {
|
||||
return result, err
|
||||
}
|
||||
|
||||
result.WorktreeRoot = rootDir
|
||||
|
||||
repo, err := NewRepository(vfs.MustNew(rootDir))
|
||||
if err != nil {
|
||||
log.Warnf(ctx, "failed to read .git: %s", err)
|
||||
|
||||
// return early since operations below won't work
|
||||
return result, nil
|
||||
}
|
||||
|
||||
result.OriginURL = repo.OriginUrl()
|
||||
|
||||
result.CurrentBranch, err = repo.CurrentBranch()
|
||||
if err != nil {
|
||||
log.Warnf(ctx, "failed to load current branch: %s", err)
|
||||
}
|
||||
|
||||
result.LatestCommit, err = repo.LatestCommit()
|
||||
if err != nil {
|
||||
log.Warnf(ctx, "failed to load latest commit: %s", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func findLeafInTree(p string, leafName string) (string, error) {
|
||||
var err error
|
||||
for i := 0; i < 10000; i++ {
|
||||
_, err = os.Stat(filepath.Join(p, leafName))
|
||||
|
||||
if err == nil {
|
||||
// Found [leafName] in p
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// ErrNotExist means we continue traversal up the tree.
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
parent := filepath.Dir(p)
|
||||
if parent == p {
|
||||
return "", nil
|
||||
}
|
||||
p = parent
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
|
@ -1,9 +1,7 @@
|
|||
package git
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -204,17 +202,7 @@ func (r *Repository) Ignore(relPath string) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func NewRepository(path vfs.Path) (*Repository, error) {
|
||||
rootDir, err := vfs.FindLeafInTree(path, GitDirectoryName)
|
||||
if err != nil {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
// Cannot find `.git` directory.
|
||||
// Treat the specified path as a potential repository root checkout.
|
||||
rootDir = path
|
||||
}
|
||||
|
||||
func NewRepository(rootDir vfs.Path) (*Repository, error) {
|
||||
// Derive $GIT_DIR and $GIT_COMMON_DIR paths if this is a real repository.
|
||||
// If it isn't a real repository, they'll point to the (non-existent) `.git` directory.
|
||||
gitDir, gitCommonDir, err := resolveGitDirs(rootDir)
|
||||
|
|
|
@ -72,8 +72,8 @@ func (v *View) IgnoreDirectory(dir string) (bool, error) {
|
|||
return v.Ignore(dir + "/")
|
||||
}
|
||||
|
||||
func NewView(root vfs.Path) (*View, error) {
|
||||
repo, err := NewRepository(root)
|
||||
func NewView(worktreeRoot, root vfs.Path) (*View, error) {
|
||||
repo, err := NewRepository(worktreeRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -96,6 +96,10 @@ func NewView(root vfs.Path) (*View, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func NewViewAtRoot(root vfs.Path) (*View, error) {
|
||||
return NewView(root, root)
|
||||
}
|
||||
|
||||
func (v *View) EnsureValidGitIgnoreExists() error {
|
||||
ign, err := v.IgnoreDirectory(".databricks")
|
||||
if err != nil {
|
||||
|
|
|
@ -90,19 +90,19 @@ func testViewAtRoot(t *testing.T, tv testView) {
|
|||
}
|
||||
|
||||
func TestViewRootInBricksRepo(t *testing.T) {
|
||||
v, err := NewView(vfs.MustNew("./testdata"))
|
||||
v, err := NewViewAtRoot(vfs.MustNew("./testdata"))
|
||||
require.NoError(t, err)
|
||||
testViewAtRoot(t, testView{t, v})
|
||||
}
|
||||
|
||||
func TestViewRootInTempRepo(t *testing.T) {
|
||||
v, err := NewView(vfs.MustNew(createFakeRepo(t, "testdata")))
|
||||
v, err := NewViewAtRoot(vfs.MustNew(createFakeRepo(t, "testdata")))
|
||||
require.NoError(t, err)
|
||||
testViewAtRoot(t, testView{t, v})
|
||||
}
|
||||
|
||||
func TestViewRootInTempDir(t *testing.T) {
|
||||
v, err := NewView(vfs.MustNew(copyTestdata(t, "testdata")))
|
||||
v, err := NewViewAtRoot(vfs.MustNew(copyTestdata(t, "testdata")))
|
||||
require.NoError(t, err)
|
||||
testViewAtRoot(t, testView{t, v})
|
||||
}
|
||||
|
@ -125,20 +125,21 @@ func testViewAtA(t *testing.T, tv testView) {
|
|||
}
|
||||
|
||||
func TestViewAInBricksRepo(t *testing.T) {
|
||||
v, err := NewView(vfs.MustNew("./testdata/a"))
|
||||
v, err := NewView(vfs.MustNew("."), vfs.MustNew("./testdata/a"))
|
||||
require.NoError(t, err)
|
||||
testViewAtA(t, testView{t, v})
|
||||
}
|
||||
|
||||
func TestViewAInTempRepo(t *testing.T) {
|
||||
v, err := NewView(vfs.MustNew(filepath.Join(createFakeRepo(t, "testdata"), "a")))
|
||||
repo := createFakeRepo(t, "testdata")
|
||||
v, err := NewView(vfs.MustNew(repo), vfs.MustNew(filepath.Join(repo, "a")))
|
||||
require.NoError(t, err)
|
||||
testViewAtA(t, testView{t, v})
|
||||
}
|
||||
|
||||
func TestViewAInTempDir(t *testing.T) {
|
||||
// Since this is not a fake repo it should not traverse up the tree.
|
||||
v, err := NewView(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a")))
|
||||
v, err := NewViewAtRoot(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a")))
|
||||
require.NoError(t, err)
|
||||
tv := testView{t, v}
|
||||
|
||||
|
@ -175,20 +176,21 @@ func testViewAtAB(t *testing.T, tv testView) {
|
|||
}
|
||||
|
||||
func TestViewABInBricksRepo(t *testing.T) {
|
||||
v, err := NewView(vfs.MustNew("./testdata/a/b"))
|
||||
v, err := NewView(vfs.MustNew("."), vfs.MustNew("./testdata/a/b"))
|
||||
require.NoError(t, err)
|
||||
testViewAtAB(t, testView{t, v})
|
||||
}
|
||||
|
||||
func TestViewABInTempRepo(t *testing.T) {
|
||||
v, err := NewView(vfs.MustNew(filepath.Join(createFakeRepo(t, "testdata"), "a", "b")))
|
||||
repo := createFakeRepo(t, "testdata")
|
||||
v, err := NewView(vfs.MustNew(repo), vfs.MustNew(filepath.Join(repo, "a", "b")))
|
||||
require.NoError(t, err)
|
||||
testViewAtAB(t, testView{t, v})
|
||||
}
|
||||
|
||||
func TestViewABInTempDir(t *testing.T) {
|
||||
// Since this is not a fake repo it should not traverse up the tree.
|
||||
v, err := NewView(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a", "b")))
|
||||
v, err := NewViewAtRoot(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a", "b")))
|
||||
tv := testView{t, v}
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -215,7 +217,7 @@ func TestViewDoesNotChangeGitignoreIfCacheDirAlreadyIgnoredAtRoot(t *testing.T)
|
|||
|
||||
// Since root .gitignore already has .databricks, there should be no edits
|
||||
// to root .gitignore
|
||||
v, err := NewView(vfs.MustNew(repoPath))
|
||||
v, err := NewViewAtRoot(vfs.MustNew(repoPath))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v.EnsureValidGitIgnoreExists()
|
||||
|
@ -235,7 +237,7 @@ func TestViewDoesNotChangeGitignoreIfCacheDirAlreadyIgnoredInSubdir(t *testing.T
|
|||
|
||||
// Since root .gitignore already has .databricks, there should be no edits
|
||||
// to a/.gitignore
|
||||
v, err := NewView(vfs.MustNew(filepath.Join(repoPath, "a")))
|
||||
v, err := NewView(vfs.MustNew(repoPath), vfs.MustNew(filepath.Join(repoPath, "a")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v.EnsureValidGitIgnoreExists()
|
||||
|
@ -253,7 +255,7 @@ func TestViewAddsGitignoreWithCacheDir(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// Since root .gitignore was deleted, new view adds .databricks to root .gitignore
|
||||
v, err := NewView(vfs.MustNew(repoPath))
|
||||
v, err := NewViewAtRoot(vfs.MustNew(repoPath))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v.EnsureValidGitIgnoreExists()
|
||||
|
@ -271,7 +273,7 @@ func TestViewAddsGitignoreWithCacheDirAtSubdir(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
// Since root .gitignore was deleted, new view adds .databricks to a/.gitignore
|
||||
v, err := NewView(vfs.MustNew(filepath.Join(repoPath, "a")))
|
||||
v, err := NewView(vfs.MustNew(repoPath), vfs.MustNew(filepath.Join(repoPath, "a")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v.EnsureValidGitIgnoreExists()
|
||||
|
@ -288,7 +290,7 @@ func TestViewAddsGitignoreWithCacheDirAtSubdir(t *testing.T) {
|
|||
func TestViewAlwaysIgnoresCacheDir(t *testing.T) {
|
||||
repoPath := createFakeRepo(t, "testdata")
|
||||
|
||||
v, err := NewView(vfs.MustNew(repoPath))
|
||||
v, err := NewViewAtRoot(vfs.MustNew(repoPath))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v.EnsureValidGitIgnoreExists()
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestDiff(t *testing.T) {
|
|||
|
||||
// Create temp project dir
|
||||
projectDir := t.TempDir()
|
||||
fileSet, err := git.NewFileSet(vfs.MustNew(projectDir))
|
||||
fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir))
|
||||
require.NoError(t, err)
|
||||
state := Snapshot{
|
||||
SnapshotState: &SnapshotState{
|
||||
|
@ -94,7 +94,7 @@ func TestSymlinkDiff(t *testing.T) {
|
|||
|
||||
// Create temp project dir
|
||||
projectDir := t.TempDir()
|
||||
fileSet, err := git.NewFileSet(vfs.MustNew(projectDir))
|
||||
fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir))
|
||||
require.NoError(t, err)
|
||||
state := Snapshot{
|
||||
SnapshotState: &SnapshotState{
|
||||
|
@ -125,7 +125,7 @@ func TestFolderDiff(t *testing.T) {
|
|||
|
||||
// Create temp project dir
|
||||
projectDir := t.TempDir()
|
||||
fileSet, err := git.NewFileSet(vfs.MustNew(projectDir))
|
||||
fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir))
|
||||
require.NoError(t, err)
|
||||
state := Snapshot{
|
||||
SnapshotState: &SnapshotState{
|
||||
|
@ -170,7 +170,7 @@ func TestPythonNotebookDiff(t *testing.T) {
|
|||
|
||||
// Create temp project dir
|
||||
projectDir := t.TempDir()
|
||||
fileSet, err := git.NewFileSet(vfs.MustNew(projectDir))
|
||||
fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir))
|
||||
require.NoError(t, err)
|
||||
state := Snapshot{
|
||||
SnapshotState: &SnapshotState{
|
||||
|
@ -245,7 +245,7 @@ func TestErrorWhenIdenticalRemoteName(t *testing.T) {
|
|||
|
||||
// Create temp project dir
|
||||
projectDir := t.TempDir()
|
||||
fileSet, err := git.NewFileSet(vfs.MustNew(projectDir))
|
||||
fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir))
|
||||
require.NoError(t, err)
|
||||
state := Snapshot{
|
||||
SnapshotState: &SnapshotState{
|
||||
|
@ -282,7 +282,7 @@ func TestNoErrorRenameWithIdenticalRemoteName(t *testing.T) {
|
|||
|
||||
// Create temp project dir
|
||||
projectDir := t.TempDir()
|
||||
fileSet, err := git.NewFileSet(vfs.MustNew(projectDir))
|
||||
fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir))
|
||||
require.NoError(t, err)
|
||||
state := Snapshot{
|
||||
SnapshotState: &SnapshotState{
|
||||
|
|
|
@ -19,10 +19,11 @@ import (
|
|||
type OutputHandler func(context.Context, <-chan Event)
|
||||
|
||||
type SyncOptions struct {
|
||||
LocalRoot vfs.Path
|
||||
Paths []string
|
||||
Include []string
|
||||
Exclude []string
|
||||
WorktreeRoot vfs.Path
|
||||
LocalRoot vfs.Path
|
||||
Paths []string
|
||||
Include []string
|
||||
Exclude []string
|
||||
|
||||
RemotePath string
|
||||
|
||||
|
@ -62,7 +63,7 @@ type Sync struct {
|
|||
|
||||
// New initializes and returns a new [Sync] instance.
|
||||
func New(ctx context.Context, opts SyncOptions) (*Sync, error) {
|
||||
fileSet, err := git.NewFileSet(opts.LocalRoot, opts.Paths)
|
||||
fileSet, err := git.NewFileSet(opts.WorktreeRoot, opts.LocalRoot, opts.Paths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func TestGetFileSet(t *testing.T) {
|
|||
|
||||
dir := setupFiles(t)
|
||||
root := vfs.MustNew(dir)
|
||||
fileSet, err := git.NewFileSet(root)
|
||||
fileSet, err := git.NewFileSetAtRoot(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fileSet.EnsureValidGitIgnoreExists()
|
||||
|
@ -103,7 +103,7 @@ func TestRecursiveExclude(t *testing.T) {
|
|||
|
||||
dir := setupFiles(t)
|
||||
root := vfs.MustNew(dir)
|
||||
fileSet, err := git.NewFileSet(root)
|
||||
fileSet, err := git.NewFileSetAtRoot(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fileSet.EnsureValidGitIgnoreExists()
|
||||
|
@ -133,7 +133,7 @@ func TestNegateExclude(t *testing.T) {
|
|||
|
||||
dir := setupFiles(t)
|
||||
root := vfs.MustNew(dir)
|
||||
fileSet, err := git.NewFileSet(root)
|
||||
fileSet, err := git.NewFileSetAtRoot(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fileSet.EnsureValidGitIgnoreExists()
|
||||
|
|
|
@ -35,6 +35,13 @@ var cachedUser *iam.User
|
|||
var cachedIsServicePrincipal *bool
|
||||
var cachedCatalog *string
|
||||
|
||||
// UUID that is stable for the duration of the template execution. This can be used
|
||||
// to populate the `bundle.uuid` field in databricks.yml by template authors.
|
||||
//
|
||||
// It's automatically logged in our telemetry logs when `databricks bundle init`
|
||||
// is run and can be used to attribute DBU revenue to bundle templates.
|
||||
var bundleUuid = uuid.New().String()
|
||||
|
||||
func loadHelpers(ctx context.Context) template.FuncMap {
|
||||
w := root.WorkspaceClient(ctx)
|
||||
return template.FuncMap{
|
||||
|
@ -57,6 +64,9 @@ func loadHelpers(ctx context.Context) template.FuncMap {
|
|||
"uuid": func() string {
|
||||
return uuid.New().String()
|
||||
},
|
||||
"bundle_uuid": func() string {
|
||||
return bundleUuid
|
||||
},
|
||||
// A key value pair. This is used with the map function to generate maps
|
||||
// to use inside a template
|
||||
"pair": func(k string, v any) pair {
|
||||
|
|
|
@ -32,6 +32,29 @@ func TestTemplatePrintStringWithoutProcessing(t *testing.T) {
|
|||
assert.Equal(t, `{{ fail "abc" }}`, cleanContent)
|
||||
}
|
||||
|
||||
func TestTemplateBundleUuidFunction(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
ctx = root.SetWorkspaceClient(ctx, nil)
|
||||
helpers := loadHelpers(ctx)
|
||||
r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/bundle-uuid/template", "./testdata/bundle-uuid/library")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.walk()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// We test the content of two generated files to ensure that the same UUID
|
||||
// is used across all files generated by the template. Each file content is
|
||||
// generated by a separate (*template.Template).Execute call, so testing different
|
||||
// files ensures that the UUID is stable across all template execution calls.
|
||||
copy := strings.Clone(bundleUuid)
|
||||
assert.Len(t, r.files, 2)
|
||||
c1 := strings.Trim(string(r.files[0].(*inMemoryFile).content), "\n\r")
|
||||
assert.Equal(t, strings.Repeat(copy, 3), c1)
|
||||
c2 := strings.Trim(string(r.files[1].(*inMemoryFile).content), "\n\r")
|
||||
assert.Equal(t, strings.Repeat(copy, 5), c2)
|
||||
}
|
||||
|
||||
func TestTemplateRegexpCompileFunction(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
{{bundle_uuid}}{{bundle_uuid}}{{bundle_uuid}}
|
|
@ -0,0 +1 @@
|
|||
{{bundle_uuid}}{{bundle_uuid}}{{bundle_uuid}}{{bundle_uuid}}{{bundle_uuid}}
|
Loading…
Reference in New Issue