Merge remote-tracking branch 'databricks/main' into cp-better-errors

This commit is contained in:
Lennart Kats 2024-06-02 13:17:52 +02:00
commit fc07725307
No known key found for this signature in database
GPG Key ID: 1EB8B57673197023
213 changed files with 7155 additions and 1892 deletions

View File

@ -1 +1 @@
94684175b8bd65f8701f89729351f8069e8309c9 7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92

View File

@ -39,6 +39,7 @@ import (
{{define "service"}} {{define "service"}}
{{- $excludeMethods := list "put-secret" -}} {{- $excludeMethods := list "put-secret" -}}
{{- $hideService := .IsPrivatePreview }}
// Slice with functions to override default command behavior. // Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory. // Functions can be added from the `init()` function in manually curated files in this directory.
@ -57,7 +58,7 @@ func New() *cobra.Command {
"package": "{{ .Package.Name }}", "package": "{{ .Package.Name }}",
}, },
{{- end }} {{- end }}
{{- if .IsPrivatePreview }} {{- if $hideService }}
// This service is being previewed; hide from help output. // This service is being previewed; hide from help output.
Hidden: true, Hidden: true,
@ -151,6 +152,7 @@ func new{{.PascalName}}() *cobra.Command {
"provider-exchanges delete" "provider-exchanges delete"
"provider-exchanges delete-listing-from-exchange" "provider-exchanges delete-listing-from-exchange"
"provider-exchanges list-exchanges-for-listing" "provider-exchanges list-exchanges-for-listing"
"provider-exchanges list-listings-for-exchange"
-}} -}}
{{- $fullCommandName := (print $serviceName " " .KebabName) -}} {{- $fullCommandName := (print $serviceName " " .KebabName) -}}
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }} {{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
@ -189,7 +191,8 @@ func new{{.PascalName}}() *cobra.Command {
{{- end -}} {{- end -}}
` `
{{- end }} {{- end }}
{{- if .IsPrivatePreview }} {{/* Don't hide commands if the service itself is already hidden. */}}
{{- if and (not $hideService) .IsPrivatePreview }}
// This command is being previewed; hide from help output. // This command is being previewed; hide from help output.
cmd.Hidden = true cmd.Hidden = true

6
.gitattributes vendored
View File

@ -37,6 +37,7 @@ cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true
cmd/workspace/cmd.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true
cmd/workspace/compliance-security-profile/compliance-security-profile.go linguist-generated=true
cmd/workspace/connections/connections.go linguist-generated=true cmd/workspace/connections/connections.go linguist-generated=true
cmd/workspace/consumer-fulfillments/consumer-fulfillments.go linguist-generated=true cmd/workspace/consumer-fulfillments/consumer-fulfillments.go linguist-generated=true
cmd/workspace/consumer-installations/consumer-installations.go linguist-generated=true cmd/workspace/consumer-installations/consumer-installations.go linguist-generated=true
@ -44,13 +45,12 @@ cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true
cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true
cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true
cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true
cmd/workspace/csp-enablement/csp-enablement.go linguist-generated=true
cmd/workspace/current-user/current-user.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true
cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true
cmd/workspace/data-sources/data-sources.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
cmd/workspace/esm-enablement/esm-enablement.go linguist-generated=true cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
cmd/workspace/experiments/experiments.go linguist-generated=true cmd/workspace/experiments/experiments.go linguist-generated=true
cmd/workspace/external-locations/external-locations.go linguist-generated=true cmd/workspace/external-locations/external-locations.go linguist-generated=true
cmd/workspace/functions/functions.go linguist-generated=true cmd/workspace/functions/functions.go linguist-generated=true
@ -62,7 +62,6 @@ cmd/workspace/instance-pools/instance-pools.go linguist-generated=true
cmd/workspace/instance-profiles/instance-profiles.go linguist-generated=true cmd/workspace/instance-profiles/instance-profiles.go linguist-generated=true
cmd/workspace/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/workspace/ip-access-lists/ip-access-lists.go linguist-generated=true
cmd/workspace/jobs/jobs.go linguist-generated=true cmd/workspace/jobs/jobs.go linguist-generated=true
cmd/workspace/lakehouse-monitors/lakehouse-monitors.go linguist-generated=true
cmd/workspace/lakeview/lakeview.go linguist-generated=true cmd/workspace/lakeview/lakeview.go linguist-generated=true
cmd/workspace/libraries/libraries.go linguist-generated=true cmd/workspace/libraries/libraries.go linguist-generated=true
cmd/workspace/metastores/metastores.go linguist-generated=true cmd/workspace/metastores/metastores.go linguist-generated=true
@ -81,6 +80,7 @@ cmd/workspace/provider-personalization-requests/provider-personalization-request
cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true
cmd/workspace/provider-providers/provider-providers.go linguist-generated=true cmd/workspace/provider-providers/provider-providers.go linguist-generated=true
cmd/workspace/providers/providers.go linguist-generated=true cmd/workspace/providers/providers.go linguist-generated=true
cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true
cmd/workspace/queries/queries.go linguist-generated=true cmd/workspace/queries/queries.go linguist-generated=true
cmd/workspace/query-history/query-history.go linguist-generated=true cmd/workspace/query-history/query-history.go linguist-generated=true
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true

View File

@ -1,5 +1,81 @@
# Version changelog # Version changelog
## 0.220.0
CLI:
* Add line about Docker installation to README.md ([#1363](https://github.com/databricks/cli/pull/1363)).
* Improve token refresh flow ([#1434](https://github.com/databricks/cli/pull/1434)).
Bundles:
* Upgrade Terraform provider to v1.42.0 ([#1418](https://github.com/databricks/cli/pull/1418)).
* Upgrade Terraform provider to v1.43.0 ([#1429](https://github.com/databricks/cli/pull/1429)).
* Don't merge-in remote resources during deployments ([#1432](https://github.com/databricks/cli/pull/1432)).
* Remove dependency on `ConfigFilePath` from path translation mutator ([#1437](https://github.com/databricks/cli/pull/1437)).
* Add `merge.Override` transform ([#1428](https://github.com/databricks/cli/pull/1428)).
* Fixed panic when loading incorrectly defined jobs ([#1402](https://github.com/databricks/cli/pull/1402)).
* Add more tests for `merge.Override` ([#1439](https://github.com/databricks/cli/pull/1439)).
* Fixed seg fault when specifying environment key for tasks ([#1443](https://github.com/databricks/cli/pull/1443)).
* Fix conversion of zero valued scalar pointers to a dynamic value ([#1433](https://github.com/databricks/cli/pull/1433)).
Internal:
* Don't hide commands of services that are already hidden ([#1438](https://github.com/databricks/cli/pull/1438)).
API Changes:
* Renamed `lakehouse-monitors` command group to `quality-monitors`.
* Added `apps` command group.
* Renamed `csp-enablement` command group to `compliance-security-profile`.
* Renamed `esm-enablement` command group to `enhanced-security-monitoring`.
* Added `databricks vector-search-indexes scan-index` command.
OpenAPI commit 7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92 (2024-05-21)
Dependency updates:
* Bump golang.org/x/text from 0.14.0 to 0.15.0 ([#1419](https://github.com/databricks/cli/pull/1419)).
* Bump golang.org/x/oauth2 from 0.19.0 to 0.20.0 ([#1421](https://github.com/databricks/cli/pull/1421)).
* Bump golang.org/x/term from 0.19.0 to 0.20.0 ([#1422](https://github.com/databricks/cli/pull/1422)).
* Bump github.com/databricks/databricks-sdk-go from 0.39.0 to 0.40.1 ([#1431](https://github.com/databricks/cli/pull/1431)).
* Bump github.com/fatih/color from 1.16.0 to 1.17.0 ([#1441](https://github.com/databricks/cli/pull/1441)).
* Bump github.com/hashicorp/terraform-json from 0.21.0 to 0.22.1 ([#1440](https://github.com/databricks/cli/pull/1440)).
* Bump github.com/hashicorp/terraform-exec from 0.20.0 to 0.21.0 ([#1442](https://github.com/databricks/cli/pull/1442)).
* Update Go SDK to v0.41.0 ([#1445](https://github.com/databricks/cli/pull/1445)).
## 0.219.0
Bundles:
* Don't fail while parsing outdated terraform state ([#1404](https://github.com/databricks/cli/pull/1404)).
* Annotate DLT pipelines when deployed using DABs ([#1410](https://github.com/databricks/cli/pull/1410)).
API Changes:
* Changed `databricks libraries cluster-status` command. New request type is compute.ClusterStatus.
* Changed `databricks libraries cluster-status` command to return .
* Added `databricks serving-endpoints get-open-api` command.
OpenAPI commit 21f9f1482f9d0d15228da59f2cd9f0863d2a6d55 (2024-04-23)
Dependency updates:
* Bump github.com/databricks/databricks-sdk-go from 0.38.0 to 0.39.0 ([#1405](https://github.com/databricks/cli/pull/1405)).
## 0.218.1
This is a bugfix release.
CLI:
* Pass `DATABRICKS_CONFIG_FILE` for `auth profiles` ([#1394](https://github.com/databricks/cli/pull/1394)).
Bundles:
* Show a better error message for using wheel tasks with older DBR versions ([#1373](https://github.com/databricks/cli/pull/1373)).
* Allow variable references in non-string fields in the JSON schema ([#1398](https://github.com/databricks/cli/pull/1398)).
* Fix variable overrides in targets for non-string variables ([#1397](https://github.com/databricks/cli/pull/1397)).
* Fix bundle schema for variables ([#1396](https://github.com/databricks/cli/pull/1396)).
* Fix bundle documentation URL ([#1399](https://github.com/databricks/cli/pull/1399)).
Internal:
* Removed autogenerated docs for the CLI commands ([#1392](https://github.com/databricks/cli/pull/1392)).
* Remove `JSON.parse` call from homebrew-tap action ([#1393](https://github.com/databricks/cli/pull/1393)).
* Ensure that Python dependencies are installed during upgrade ([#1390](https://github.com/databricks/cli/pull/1390)).
## 0.218.0 ## 0.218.0
This release marks the general availability of Databricks Asset Bundles. This release marks the general availability of Databricks Asset Bundles.

View File

@ -15,6 +15,18 @@ See https://github.com/databricks/cli/releases for releases and
[the docs pages](https://docs.databricks.com/dev-tools/cli/databricks-cli.html) for [the docs pages](https://docs.databricks.com/dev-tools/cli/databricks-cli.html) for
installation instructions. installation instructions.
------
You can use the CLI via a Docker image by pulling the image from `ghcr.io`. You can find all available versions
at: https://github.com/databricks/cli/pkgs/container/cli.
```
docker pull ghcr.io/databricks/cli:latest
```
Example of how to run the CLI using the Docker image. More documentation is available at https://docs.databricks.com/dev-tools/bundles/airgapped-environment.html.
```
docker run -e DATABRICKS_HOST=$YOUR_HOST_URL -e DATABRICKS_TOKEN=$YOUR_TOKEN ghcr.io/databricks/cli:latest current-user me
```
## Authentication ## Authentication
This CLI follows the Databricks Unified Authentication principles. This CLI follows the Databricks Unified Authentication principles.

View File

@ -150,6 +150,10 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u
for i := range job.Environments { for i := range job.Environments {
env := &job.Environments[i] env := &job.Environments[i]
if env.Spec == nil {
continue
}
for j := range env.Spec.Dependencies { for j := range env.Spec.Dependencies {
lib := env.Spec.Dependencies[j] lib := env.Spec.Dependencies[j]
if isArtifactMatchLibrary(f, lib, b) { if isArtifactMatchLibrary(f, lib, b) {

View File

@ -22,6 +22,7 @@ import (
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/cli/libs/tags" "github.com/databricks/cli/libs/tags"
"github.com/databricks/cli/libs/terraform" "github.com/databricks/cli/libs/terraform"
"github.com/databricks/cli/libs/vfs"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
sdkconfig "github.com/databricks/databricks-sdk-go/config" sdkconfig "github.com/databricks/databricks-sdk-go/config"
"github.com/hashicorp/terraform-exec/tfexec" "github.com/hashicorp/terraform-exec/tfexec"
@ -208,7 +209,7 @@ func (b *Bundle) GitRepository() (*git.Repository, error) {
return nil, fmt.Errorf("unable to locate repository root: %w", err) return nil, fmt.Errorf("unable to locate repository root: %w", err)
} }
return git.NewRepository(rootPath) return git.NewRepository(vfs.MustNew(rootPath))
} }
// AuthEnv returns a map with environment variables and their values // AuthEnv returns a map with environment variables and their values

View File

@ -56,7 +56,11 @@ func TestDefaultQueueingApplyEnableQueueing(t *testing.T) {
Config: config.Root{ Config: config.Root{
Resources: config.Resources{ Resources: config.Resources{
Jobs: map[string]*resources.Job{ Jobs: map[string]*resources.Job{
"job": {}, "job": {
JobSettings: &jobs.JobSettings{
Name: "job",
},
},
}, },
}, },
}, },
@ -77,7 +81,11 @@ func TestDefaultQueueingApplyWithMultipleJobs(t *testing.T) {
Queue: &jobs.QueueSettings{Enabled: false}, Queue: &jobs.QueueSettings{Enabled: false},
}, },
}, },
"job2": {}, "job2": {
JobSettings: &jobs.JobSettings{
Name: "job",
},
},
"job3": { "job3": {
JobSettings: &jobs.JobSettings{ JobSettings: &jobs.JobSettings{
Queue: &jobs.QueueSettings{Enabled: true}, Queue: &jobs.QueueSettings{Enabled: true},

View File

@ -8,6 +8,7 @@ import (
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/git"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/cli/libs/vfs"
) )
type loadGitDetails struct{} type loadGitDetails struct{}
@ -22,7 +23,7 @@ func (m *loadGitDetails) Name() string {
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
// Load relevant git repository // Load relevant git repository
repo, err := git.NewRepository(b.RootPath) repo, err := git.NewRepository(vfs.MustNew(b.RootPath))
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }

View File

@ -97,6 +97,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
RegisteredModels: map[string]*resources.RegisteredModel{ RegisteredModels: map[string]*resources.RegisteredModel{
"registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}}, "registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}},
}, },
QualityMonitors: map[string]*resources.QualityMonitor{
"qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}},
},
}, },
}, },
// Use AWS implementation for testing. // Use AWS implementation for testing.
@ -145,6 +148,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
// Registered model 1 // Registered model 1
assert.Equal(t, "dev_lennart_registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) assert.Equal(t, "dev_lennart_registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
// Quality Monitor 1
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
} }
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
@ -200,6 +206,7 @@ func TestProcessTargetModeDefault(t *testing.T) {
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
} }
func TestProcessTargetModeProduction(t *testing.T) { func TestProcessTargetModeProduction(t *testing.T) {
@ -240,6 +247,7 @@ func TestProcessTargetModeProduction(t *testing.T) {
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
} }
func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {

View File

@ -86,6 +86,16 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
) )
} }
// Monitors do not support run_as in the API.
if len(b.Config.Resources.QualityMonitors) > 0 {
return errUnsupportedResourceTypeForRunAs{
resourceType: "quality_monitors",
resourceLocation: b.Config.GetLocation("resources.quality_monitors"),
currentUser: b.Config.Workspace.CurrentUser.UserName,
runAsUser: identity,
}
}
return nil return nil
} }

View File

@ -37,6 +37,7 @@ func allResourceTypes(t *testing.T) []string {
"model_serving_endpoints", "model_serving_endpoints",
"models", "models",
"pipelines", "pipelines",
"quality_monitors",
"registered_models", "registered_models",
}, },
resourceTypes, resourceTypes,

View File

@ -53,8 +53,6 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Di
} }
// We should have had a value to set for the variable at this point. // We should have had a value to set for the variable at this point.
// TODO: use cmdio to request values for unassigned variables if current
// terminal is a tty. Tracked in https://github.com/databricks/cli/issues/379
return diag.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) return diag.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
} }

View File

@ -213,3 +213,31 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
return diag.FromErr(err) return diag.FromErr(err)
} }
func gatherFallbackPaths(v dyn.Value, typ string) (map[string]string, error) {
var fallback = make(map[string]string)
var pattern = dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey())
// Previous behavior was to use a resource's location as the base path to resolve
// relative paths in its definition. With the introduction of [dyn.Value] throughout,
// we can use the location of the [dyn.Value] of the relative path itself.
//
// This is more flexible, as resources may have overrides that are not
// located in the same directory as the resource configuration file.
//
// To maintain backwards compatibility, we allow relative paths to be resolved using
// the original approach as fallback if the [dyn.Value] location cannot be resolved.
_, err := dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
key := p[2].Key()
dir, err := v.Location().Directory()
if err != nil {
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for %s: %w", p, err)
}
fallback[key] = dir
return v, nil
})
if err != nil {
return nil, err
}
return fallback, nil
}

View File

@ -55,21 +55,14 @@ func rewritePatterns(base dyn.Pattern) []jobRewritePattern {
} }
func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) {
var fallback = make(map[string]string) fallback, err := gatherFallbackPaths(v, "jobs")
var ignore []string
var err error
for key, job := range b.Config.Resources.Jobs {
dir, err := job.ConfigFileDirectory()
if err != nil { if err != nil {
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err) return dyn.InvalidValue, err
} }
// If we cannot resolve the relative path using the [dyn.Value] location itself, // Do not translate job task paths if using Git source
// use the job's location as fallback. This is necessary for backwards compatibility. var ignore []string
fallback[key] = dir for key, job := range b.Config.Resources.Jobs {
// Do not translate job task paths if using git source
if job.GitSource != nil { if job.GitSource != nil {
ignore = append(ignore, key) ignore = append(ignore, key)
} }

View File

@ -8,18 +8,9 @@ import (
) )
func (m *translatePaths) applyPipelineTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { func (m *translatePaths) applyPipelineTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) {
var fallback = make(map[string]string) fallback, err := gatherFallbackPaths(v, "pipelines")
var err error
for key, pipeline := range b.Config.Resources.Pipelines {
dir, err := pipeline.ConfigFileDirectory()
if err != nil { if err != nil {
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) return dyn.InvalidValue, err
}
// If we cannot resolve the relative path using the [dyn.Value] location itself,
// use the pipeline's location as fallback. This is necessary for backwards compatibility.
fallback[key] = dir
} }
// Base pattern to match all libraries in all pipelines. // Base pattern to match all libraries in all pipelines.

View File

@ -1,9 +1,6 @@
package paths package paths
import ( import (
"fmt"
"path/filepath"
"github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn"
) )
@ -23,10 +20,3 @@ func (p *Paths) ConfigureConfigFilePath() {
} }
p.ConfigFilePath = p.DynamicValue.Location().File p.ConfigFilePath = p.DynamicValue.Location().File
} }
func (p *Paths) ConfigFileDirectory() (string, error) {
if p.ConfigFilePath == "" {
return "", fmt.Errorf("config file path not configured")
}
return filepath.Dir(p.ConfigFilePath), nil
}

View File

@ -18,6 +18,7 @@ type Resources struct {
Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"` Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"`
ModelServingEndpoints map[string]*resources.ModelServingEndpoint `json:"model_serving_endpoints,omitempty"` ModelServingEndpoints map[string]*resources.ModelServingEndpoint `json:"model_serving_endpoints,omitempty"`
RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"` RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"`
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
} }
type UniqueResourceIdTracker struct { type UniqueResourceIdTracker struct {
@ -28,6 +29,7 @@ type UniqueResourceIdTracker struct {
type ConfigResource interface { type ConfigResource interface {
Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error)
TerraformResourceName() string TerraformResourceName() string
Validate() error
json.Marshaler json.Marshaler
json.Unmarshaler json.Unmarshaler
@ -132,9 +134,66 @@ func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker,
tracker.Type[k] = "registered_model" tracker.Type[k] = "registered_model"
tracker.ConfigPath[k] = r.RegisteredModels[k].ConfigFilePath tracker.ConfigPath[k] = r.RegisteredModels[k].ConfigFilePath
} }
for k := range r.QualityMonitors {
if _, ok := tracker.Type[k]; ok {
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
k,
tracker.Type[k],
tracker.ConfigPath[k],
"quality_monitor",
r.QualityMonitors[k].ConfigFilePath,
)
}
tracker.Type[k] = "quality_monitor"
tracker.ConfigPath[k] = r.QualityMonitors[k].ConfigFilePath
}
return tracker, nil return tracker, nil
} }
type resource struct {
resource ConfigResource
resource_type string
key string
}
func (r *Resources) allResources() []resource {
all := make([]resource, 0)
for k, e := range r.Jobs {
all = append(all, resource{resource_type: "job", resource: e, key: k})
}
for k, e := range r.Pipelines {
all = append(all, resource{resource_type: "pipeline", resource: e, key: k})
}
for k, e := range r.Models {
all = append(all, resource{resource_type: "model", resource: e, key: k})
}
for k, e := range r.Experiments {
all = append(all, resource{resource_type: "experiment", resource: e, key: k})
}
for k, e := range r.ModelServingEndpoints {
all = append(all, resource{resource_type: "serving endpoint", resource: e, key: k})
}
for k, e := range r.RegisteredModels {
all = append(all, resource{resource_type: "registered model", resource: e, key: k})
}
for k, e := range r.QualityMonitors {
all = append(all, resource{resource_type: "quality monitor", resource: e, key: k})
}
return all
}
func (r *Resources) VerifyAllResourcesDefined() error {
all := r.allResources()
for _, e := range all {
err := e.resource.Validate()
if err != nil {
return fmt.Errorf("%s %s is not defined", e.resource_type, e.key)
}
}
return nil
}
// ConfigureConfigFilePath sets the specified path for all resources contained in this instance. // ConfigureConfigFilePath sets the specified path for all resources contained in this instance.
// This property is used to correctly resolve paths relative to the path // This property is used to correctly resolve paths relative to the path
// of the configuration file they were defined in. // of the configuration file they were defined in.
@ -157,6 +216,9 @@ func (r *Resources) ConfigureConfigFilePath() {
for _, e := range r.RegisteredModels { for _, e := range r.RegisteredModels {
e.ConfigureConfigFilePath() e.ConfigureConfigFilePath()
} }
for _, e := range r.QualityMonitors {
e.ConfigureConfigFilePath()
}
} }
func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) { func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) {

View File

@ -2,6 +2,7 @@ package resources
import ( import (
"context" "context"
"fmt"
"strconv" "strconv"
"github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/paths"
@ -47,3 +48,11 @@ func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id stri
func (j *Job) TerraformResourceName() string { func (j *Job) TerraformResourceName() string {
return "databricks_job" return "databricks_job"
} }
func (j *Job) Validate() error {
if j == nil || !j.DynamicValue.IsValid() || j.JobSettings == nil {
return fmt.Errorf("job is not defined")
}
return nil
}

View File

@ -1,7 +1,12 @@
package resources package resources
import ( import (
"context"
"fmt"
"github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/ml"
) )
@ -23,3 +28,26 @@ func (s *MlflowExperiment) UnmarshalJSON(b []byte) error {
func (s MlflowExperiment) MarshalJSON() ([]byte, error) { func (s MlflowExperiment) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s) return marshal.Marshal(s)
} }
func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
_, err := w.Experiments.GetExperiment(ctx, ml.GetExperimentRequest{
ExperimentId: id,
})
if err != nil {
log.Debugf(ctx, "experiment %s does not exist", id)
return false, err
}
return true, nil
}
func (s *MlflowExperiment) TerraformResourceName() string {
return "databricks_mlflow_experiment"
}
func (s *MlflowExperiment) Validate() error {
if s == nil || !s.DynamicValue.IsValid() {
return fmt.Errorf("experiment is not defined")
}
return nil
}

View File

@ -1,7 +1,12 @@
package resources package resources
import ( import (
"context"
"fmt"
"github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/ml"
) )
@ -23,3 +28,26 @@ func (s *MlflowModel) UnmarshalJSON(b []byte) error {
func (s MlflowModel) MarshalJSON() ([]byte, error) { func (s MlflowModel) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s) return marshal.Marshal(s)
} }
func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
_, err := w.ModelRegistry.GetModel(ctx, ml.GetModelRequest{
Name: id,
})
if err != nil {
log.Debugf(ctx, "model %s does not exist", id)
return false, err
}
return true, nil
}
func (s *MlflowModel) TerraformResourceName() string {
return "databricks_mlflow_model"
}
func (s *MlflowModel) Validate() error {
if s == nil || !s.DynamicValue.IsValid() {
return fmt.Errorf("model is not defined")
}
return nil
}

View File

@ -1,7 +1,12 @@
package resources package resources
import ( import (
"context"
"fmt"
"github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/serving" "github.com/databricks/databricks-sdk-go/service/serving"
) )
@ -33,3 +38,26 @@ func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error {
func (s ModelServingEndpoint) MarshalJSON() ([]byte, error) { func (s ModelServingEndpoint) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s) return marshal.Marshal(s)
} }
func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
_, err := w.ServingEndpoints.Get(ctx, serving.GetServingEndpointRequest{
Name: id,
})
if err != nil {
log.Debugf(ctx, "serving endpoint %s does not exist", id)
return false, err
}
return true, nil
}
func (s *ModelServingEndpoint) TerraformResourceName() string {
return "databricks_model_serving"
}
func (s *ModelServingEndpoint) Validate() error {
if s == nil || !s.DynamicValue.IsValid() {
return fmt.Errorf("serving endpoint is not defined")
}
return nil
}

View File

@ -2,6 +2,7 @@ package resources
import ( import (
"context" "context"
"fmt"
"github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
@ -42,3 +43,11 @@ func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id
func (p *Pipeline) TerraformResourceName() string { func (p *Pipeline) TerraformResourceName() string {
return "databricks_pipeline" return "databricks_pipeline"
} }
func (p *Pipeline) Validate() error {
if p == nil || !p.DynamicValue.IsValid() {
return fmt.Errorf("pipeline is not defined")
}
return nil
}

View File

@ -0,0 +1,60 @@
package resources
import (
"context"
"fmt"
"github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/catalog"
)
type QualityMonitor struct {
// Represents the Input Arguments for Terraform and will get
// converted to a HCL representation for CRUD
*catalog.CreateMonitor
// This represents the id which is the full name of the monitor
// (catalog_name.schema_name.table_name) that can be used
// as a reference in other resources. This value is returned by terraform.
ID string `json:"id,omitempty" bundle:"readonly"`
// Path to config file where the resource is defined. All bundle resources
// include this for interpolation purposes.
paths.Paths
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
}
func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, s)
}
func (s QualityMonitor) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s)
}
func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
_, err := w.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{
TableName: id,
})
if err != nil {
log.Debugf(ctx, "quality monitor %s does not exist", id)
return false, err
}
return true, nil
}
func (s *QualityMonitor) TerraformResourceName() string {
return "databricks_quality_monitor"
}
func (s *QualityMonitor) Validate() error {
if s == nil || !s.DynamicValue.IsValid() {
return fmt.Errorf("quality monitor is not defined")
}
return nil
}

View File

@ -1,7 +1,12 @@
package resources package resources
import ( import (
"context"
"fmt"
"github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/paths"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/catalog"
) )
@ -34,3 +39,26 @@ func (s *RegisteredModel) UnmarshalJSON(b []byte) error {
func (s RegisteredModel) MarshalJSON() ([]byte, error) { func (s RegisteredModel) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s) return marshal.Marshal(s)
} }
func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
_, err := w.RegisteredModels.Get(ctx, catalog.GetRegisteredModelRequest{
FullName: id,
})
if err != nil {
log.Debugf(ctx, "registered model %s does not exist", id)
return false, err
}
return true, nil
}
func (s *RegisteredModel) TerraformResourceName() string {
return "databricks_registered_model"
}
func (s *RegisteredModel) Validate() error {
if s == nil || !s.DynamicValue.IsValid() {
return fmt.Errorf("registered model is not defined")
}
return nil
}

View File

@ -1,6 +1,8 @@
package config package config
import ( import (
"encoding/json"
"reflect"
"testing" "testing"
"github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/paths"
@ -125,3 +127,57 @@ func TestVerifySafeMergeForRegisteredModels(t *testing.T) {
err := r.VerifySafeMerge(&other) err := r.VerifySafeMerge(&other)
assert.ErrorContains(t, err, "multiple resources named bar (registered_model at bar.yml, registered_model at bar2.yml)") assert.ErrorContains(t, err, "multiple resources named bar (registered_model at bar.yml, registered_model at bar2.yml)")
} }
// This test ensures that all resources have a custom marshaller and unmarshaller.
// This is required because DABs resources map to Databricks APIs, and they do so
// by embedding the corresponding Go SDK structs.
//
// Go SDK structs often implement custom marshalling and unmarshalling methods (based on the API specifics).
// If the Go SDK struct implements custom marshalling and unmarshalling and we do not
// for the resources at the top level, marshalling and unmarshalling operations will panic.
// Thus we will be overly cautious and ensure that all resources need a custom marshaller and unmarshaller.
//
// Why do we not assert this using an interface to assert MarshalJSON and UnmarshalJSON
// are implemented at the top level?
// If a method is implemented for an embedded struct, the top level struct will
// also have that method and satisfy the interface. This is why we cannot assert
// that the methods are implemented at the top level using an interface.
//
// Why don't we use reflection to assert that the methods are implemented at the
// top level?
// Same problem as above, the golang reflection package does not seem to provide
// a way to directly assert that MarshalJSON and UnmarshalJSON are implemented
// at the top level.
func TestCustomMarshallerIsImplemented(t *testing.T) {
r := Resources{}
rt := reflect.TypeOf(r)
for i := 0; i < rt.NumField(); i++ {
field := rt.Field(i)
// Fields in Resources are expected be of the form map[string]*resourceStruct
assert.Equal(t, field.Type.Kind(), reflect.Map, "Resource %s is not a map", field.Name)
kt := field.Type.Key()
assert.Equal(t, kt.Kind(), reflect.String, "Resource %s is not a map with string keys", field.Name)
vt := field.Type.Elem()
assert.Equal(t, vt.Kind(), reflect.Ptr, "Resource %s is not a map with pointer values", field.Name)
// Marshalling a resourceStruct will panic if resourceStruct does not have a custom marshaller
// This is because resourceStruct embeds a Go SDK struct that implements
// a custom marshaller.
// Eg: resource.Job implements MarshalJSON
v := reflect.Zero(vt.Elem()).Interface()
assert.NotPanics(t, func() {
json.Marshal(v)
}, "Resource %s does not have a custom marshaller", field.Name)
// Unmarshalling a *resourceStruct will panic if the resource does not have a custom unmarshaller
// This is because resourceStruct embeds a Go SDK struct that implements
// a custom unmarshaller.
// Eg: *resource.Job implements UnmarshalJSON
v = reflect.New(vt.Elem()).Interface()
assert.NotPanics(t, func() {
json.Unmarshal([]byte("{}"), v)
}, "Resource %s does not have a custom unmarshaller", field.Name)
}
}

View File

@ -138,6 +138,14 @@ func (r *Root) updateWithDynamicValue(nv dyn.Value) error {
// Assign the normalized configuration tree. // Assign the normalized configuration tree.
r.value = nv r.value = nv
// At the moment the check has to be done as part of updateWithDynamicValue
// because otherwise ConfigureConfigFilePath will fail with a panic.
// In the future, we should move this check to a separate mutator in initialise phase.
err = r.Resources.VerifyAllResourcesDefined()
if err != nil {
return err
}
// Assign config file paths after converting to typed configuration. // Assign config file paths after converting to typed configuration.
r.ConfigureConfigFilePath() r.ConfigureConfigFilePath()
return nil return nil
@ -408,15 +416,19 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
// For each variable, normalize its contents if it is a single string. // For each variable, normalize its contents if it is a single string.
return dyn.Map(target, "variables", dyn.Foreach(func(_ dyn.Path, variable dyn.Value) (dyn.Value, error) { return dyn.Map(target, "variables", dyn.Foreach(func(_ dyn.Path, variable dyn.Value) (dyn.Value, error) {
if variable.Kind() != dyn.KindString { switch variable.Kind() {
return variable, nil
}
case dyn.KindString, dyn.KindBool, dyn.KindFloat, dyn.KindInt:
// Rewrite the variable to a map with a single key called "default". // Rewrite the variable to a map with a single key called "default".
// This conforms to the variable type. // This conforms to the variable type. Normalization back to the typed
// configuration will convert this to a string if necessary.
return dyn.NewValue(map[string]dyn.Value{ return dyn.NewValue(map[string]dyn.Value{
"default": variable, "default": variable,
}, variable.Location()), nil }, variable.Location()), nil
default:
return variable, nil
}
})) }))
})) }))
} }

View File

@ -8,6 +8,7 @@ import (
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/fileset"
"github.com/databricks/cli/libs/vfs"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -50,7 +51,7 @@ func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (di
index := i index := i
p := pattern p := pattern
errs.Go(func() error { errs.Go(func() error {
fs, err := fileset.NewGlobSet(rb.RootPath(), []string{p}) fs, err := fileset.NewGlobSet(vfs.MustNew(rb.RootPath()), []string{p})
if err != nil { if err != nil {
return err return err
} }

View File

@ -6,6 +6,7 @@ import (
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/sync" "github.com/databricks/cli/libs/sync"
"github.com/databricks/cli/libs/vfs"
) )
func GetSync(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.Sync, error) { func GetSync(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.Sync, error) {
@ -28,7 +29,7 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp
} }
opts := &sync.SyncOptions{ opts := &sync.SyncOptions{
LocalPath: rb.RootPath(), LocalPath: vfs.MustNew(rb.RootPath()),
RemotePath: rb.Config().Workspace.FilePath, RemotePath: rb.Config().Workspace.FilePath,
Include: includes, Include: includes,
Exclude: rb.Config().Sync.Exclude, Exclude: rb.Config().Sync.Exclude,

View File

@ -2,7 +2,6 @@ package metadata
import ( import (
"context" "context"
"path"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
@ -27,7 +26,7 @@ func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnosti
job.JobSettings.Deployment = &jobs.JobDeployment{ job.JobSettings.Deployment = &jobs.JobDeployment{
Kind: jobs.JobDeploymentKindBundle, Kind: jobs.JobDeploymentKindBundle,
MetadataFilePath: path.Join(b.Config.Workspace.StatePath, MetadataFileName), MetadataFilePath: metadataFilePath(b),
} }
job.JobSettings.EditMode = jobs.JobEditModeUiLocked job.JobSettings.EditMode = jobs.JobEditModeUiLocked
job.JobSettings.Format = jobs.FormatMultiTask job.JobSettings.Format = jobs.FormatMultiTask

View File

@ -0,0 +1,34 @@
package metadata
import (
"context"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/service/pipelines"
)
type annotatePipelines struct{}
func AnnotatePipelines() bundle.Mutator {
return &annotatePipelines{}
}
func (m *annotatePipelines) Name() string {
return "metadata.AnnotatePipelines"
}
func (m *annotatePipelines) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
for _, pipeline := range b.Config.Resources.Pipelines {
if pipeline.PipelineSpec == nil {
continue
}
pipeline.PipelineSpec.Deployment = &pipelines.PipelineDeployment{
Kind: pipelines.DeploymentKindBundle,
MetadataFilePath: metadataFilePath(b),
}
}
return nil
}

View File

@ -0,0 +1,72 @@
package metadata
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAnnotatePipelinesMutator(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
StatePath: "/a/b/c",
},
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"my-pipeline-1": {
PipelineSpec: &pipelines.PipelineSpec{
Name: "My Pipeline One",
},
},
"my-pipeline-2": {
PipelineSpec: &pipelines.PipelineSpec{
Name: "My Pipeline Two",
},
},
},
},
},
}
diags := bundle.Apply(context.Background(), b, AnnotatePipelines())
require.NoError(t, diags.Error())
assert.Equal(t,
&pipelines.PipelineDeployment{
Kind: pipelines.DeploymentKindBundle,
MetadataFilePath: "/a/b/c/metadata.json",
},
b.Config.Resources.Pipelines["my-pipeline-1"].PipelineSpec.Deployment)
assert.Equal(t,
&pipelines.PipelineDeployment{
Kind: pipelines.DeploymentKindBundle,
MetadataFilePath: "/a/b/c/metadata.json",
},
b.Config.Resources.Pipelines["my-pipeline-2"].PipelineSpec.Deployment)
}
func TestAnnotatePipelinesMutatorPipelineWithoutASpec(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
StatePath: "/a/b/c",
},
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"my-pipeline-1": {},
},
},
},
}
diags := bundle.Apply(context.Background(), b, AnnotatePipelines())
require.NoError(t, diags.Error())
}

View File

@ -4,13 +4,18 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"path"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/filer"
) )
const MetadataFileName = "metadata.json" const metadataFileName = "metadata.json"
func metadataFilePath(b *bundle.Bundle) string {
return path.Join(b.Config.Workspace.StatePath, metadataFileName)
}
type upload struct{} type upload struct{}
@ -33,5 +38,5 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
return diag.FromErr(err) return diag.FromErr(err)
} }
return diag.FromErr(f.Write(ctx, MetadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists)) return diag.FromErr(f.Write(ctx, metadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists))
} }

View File

@ -12,6 +12,7 @@ import (
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/fileset"
"github.com/databricks/cli/libs/vfs"
) )
const DeploymentStateFileName = "deployment.json" const DeploymentStateFileName = "deployment.json"
@ -112,12 +113,18 @@ func FromSlice(files []fileset.File) (Filelist, error) {
func (f Filelist) ToSlice(basePath string) []fileset.File { func (f Filelist) ToSlice(basePath string) []fileset.File {
var files []fileset.File var files []fileset.File
root := vfs.MustNew(basePath)
for _, file := range f { for _, file := range f {
absPath := filepath.Join(basePath, file.LocalPath) entry := newEntry(filepath.Join(basePath, file.LocalPath))
// Snapshots created with versions <= v0.220.0 use platform-specific
// paths (i.e. with backslashes). Files returned by [libs/fileset] always
// contain forward slashes after this version. Normalize before using.
relative := filepath.ToSlash(file.LocalPath)
if file.IsNotebook { if file.IsNotebook {
files = append(files, fileset.NewNotebookFile(newEntry(absPath), absPath, file.LocalPath)) files = append(files, fileset.NewNotebookFile(root, entry, relative))
} else { } else {
files = append(files, fileset.NewSourceFile(newEntry(absPath), absPath, file.LocalPath)) files = append(files, fileset.NewSourceFile(root, entry, relative))
} }
} }
return files return files

View File

@ -3,17 +3,17 @@ package deploy
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"path/filepath"
"testing" "testing"
"github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/internal/testutil"
"github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/fileset"
"github.com/databricks/cli/libs/vfs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestFromSlice(t *testing.T) { func TestFromSlice(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
fileset := fileset.New(tmpDir) fileset := fileset.New(vfs.MustNew(tmpDir))
testutil.Touch(t, tmpDir, "test1.py") testutil.Touch(t, tmpDir, "test1.py")
testutil.Touch(t, tmpDir, "test2.py") testutil.Touch(t, tmpDir, "test2.py")
testutil.Touch(t, tmpDir, "test3.py") testutil.Touch(t, tmpDir, "test3.py")
@ -32,7 +32,7 @@ func TestFromSlice(t *testing.T) {
func TestToSlice(t *testing.T) { func TestToSlice(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
fileset := fileset.New(tmpDir) fileset := fileset.New(vfs.MustNew(tmpDir))
testutil.Touch(t, tmpDir, "test1.py") testutil.Touch(t, tmpDir, "test1.py")
testutil.Touch(t, tmpDir, "test2.py") testutil.Touch(t, tmpDir, "test2.py")
testutil.Touch(t, tmpDir, "test3.py") testutil.Touch(t, tmpDir, "test3.py")
@ -48,18 +48,11 @@ func TestToSlice(t *testing.T) {
require.Len(t, s, 3) require.Len(t, s, 3)
for _, file := range s { for _, file := range s {
require.Contains(t, []string{"test1.py", "test2.py", "test3.py"}, file.Name()) require.Contains(t, []string{"test1.py", "test2.py", "test3.py"}, file.Relative)
require.Contains(t, []string{
filepath.Join(tmpDir, "test1.py"), // If the mtime is not zero we know we produced a valid fs.DirEntry.
filepath.Join(tmpDir, "test2.py"), ts := file.Modified()
filepath.Join(tmpDir, "test3.py"), require.NotZero(t, ts)
}, file.Absolute)
require.False(t, file.IsDir())
require.NotZero(t, file.Type())
info, err := file.Info()
require.NoError(t, err)
require.NotNil(t, info)
require.Equal(t, file.Name(), info.Name())
} }
} }

View File

@ -1,4 +1,4 @@
package deploy package terraform
import ( import (
"context" "context"
@ -10,7 +10,6 @@ import (
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/hashicorp/terraform-exec/tfexec"
tfjson "github.com/hashicorp/terraform-json" tfjson "github.com/hashicorp/terraform-json"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -36,26 +35,16 @@ func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) dia
return nil return nil
} }
tf := b.Terraform state, err := ParseResourcesState(ctx, b)
if tf == nil { if err != nil && state == nil {
return diag.Errorf("terraform not initialized")
}
err := tf.Init(ctx, tfexec.Upgrade(true))
if err != nil {
return diag.Errorf("terraform init: %v", err)
}
state, err := b.Terraform.Show(ctx)
if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
err = checkAnyResourceRunning(ctx, b.WorkspaceClient(), state) w := b.WorkspaceClient()
err = checkAnyResourceRunning(ctx, w, state)
if err != nil { if err != nil {
return diag.Errorf("deployment aborted, err: %v", err) return diag.FromErr(err)
} }
return nil return nil
} }
@ -63,25 +52,20 @@ func CheckRunningResource() *checkRunningResources {
return &checkRunningResources{} return &checkRunningResources{}
} }
func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, state *tfjson.State) error { func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, state *resourcesState) error {
if state.Values == nil || state.Values.RootModule == nil { if state == nil {
return nil return nil
} }
errs, errCtx := errgroup.WithContext(ctx) errs, errCtx := errgroup.WithContext(ctx)
for _, resource := range state.Values.RootModule.Resources { for _, resource := range state.Resources {
// Limit to resources.
if resource.Mode != tfjson.ManagedResourceMode { if resource.Mode != tfjson.ManagedResourceMode {
continue continue
} }
for _, instance := range resource.Instances {
value, ok := resource.AttributeValues["id"] id := instance.Attributes.ID
if !ok { if id == "" {
continue
}
id, ok := value.(string)
if !ok {
continue continue
} }
@ -112,6 +96,7 @@ func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient,
}) })
} }
} }
}
return errs.Wait() return errs.Wait()
} }

View File

@ -1,4 +1,4 @@
package deploy package terraform
import ( import (
"context" "context"
@ -8,31 +8,26 @@ import (
"github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/pipelines"
tfjson "github.com/hashicorp/terraform-json"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestIsAnyResourceRunningWithEmptyState(t *testing.T) { func TestIsAnyResourceRunningWithEmptyState(t *testing.T) {
mock := mocks.NewMockWorkspaceClient(t) mock := mocks.NewMockWorkspaceClient(t)
state := &tfjson.State{} err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, &resourcesState{})
err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, state)
require.NoError(t, err) require.NoError(t, err)
} }
func TestIsAnyResourceRunningWithJob(t *testing.T) { func TestIsAnyResourceRunningWithJob(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t) m := mocks.NewMockWorkspaceClient(t)
state := &tfjson.State{ resources := &resourcesState{
Values: &tfjson.StateValues{ Resources: []stateResource{
RootModule: &tfjson.StateModule{
Resources: []*tfjson.StateResource{
{ {
Type: "databricks_job", Type: "databricks_job",
AttributeValues: map[string]interface{}{ Mode: "managed",
"id": "123", Name: "job1",
}, Instances: []stateResourceInstance{
Mode: tfjson.ManagedResourceMode, {Attributes: stateInstanceAttributes{ID: "123"}},
},
}, },
}, },
}, },
@ -46,7 +41,7 @@ func TestIsAnyResourceRunningWithJob(t *testing.T) {
{RunId: 1234}, {RunId: 1234},
}, nil).Once() }, nil).Once()
err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources)
require.ErrorContains(t, err, "job 123 is running") require.ErrorContains(t, err, "job 123 is running")
jobsApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ jobsApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{
@ -54,23 +49,20 @@ func TestIsAnyResourceRunningWithJob(t *testing.T) {
ActiveOnly: true, ActiveOnly: true,
}).Return([]jobs.BaseRun{}, nil).Once() }).Return([]jobs.BaseRun{}, nil).Once()
err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources)
require.NoError(t, err) require.NoError(t, err)
} }
func TestIsAnyResourceRunningWithPipeline(t *testing.T) { func TestIsAnyResourceRunningWithPipeline(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t) m := mocks.NewMockWorkspaceClient(t)
state := &tfjson.State{ resources := &resourcesState{
Values: &tfjson.StateValues{ Resources: []stateResource{
RootModule: &tfjson.StateModule{
Resources: []*tfjson.StateResource{
{ {
Type: "databricks_pipeline", Type: "databricks_pipeline",
AttributeValues: map[string]interface{}{ Mode: "managed",
"id": "123", Name: "pipeline1",
}, Instances: []stateResourceInstance{
Mode: tfjson.ManagedResourceMode, {Attributes: stateInstanceAttributes{ID: "123"}},
},
}, },
}, },
}, },
@ -84,7 +76,7 @@ func TestIsAnyResourceRunningWithPipeline(t *testing.T) {
State: pipelines.PipelineStateRunning, State: pipelines.PipelineStateRunning,
}, nil).Once() }, nil).Once()
err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources)
require.ErrorContains(t, err, "pipeline 123 is running") require.ErrorContains(t, err, "pipeline 123 is running")
pipelineApi.EXPECT().Get(mock.Anything, pipelines.GetPipelineRequest{ pipelineApi.EXPECT().Get(mock.Anything, pipelines.GetPipelineRequest{
@ -93,23 +85,20 @@ func TestIsAnyResourceRunningWithPipeline(t *testing.T) {
PipelineId: "123", PipelineId: "123",
State: pipelines.PipelineStateIdle, State: pipelines.PipelineStateIdle,
}, nil).Once() }, nil).Once()
err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources)
require.NoError(t, err) require.NoError(t, err)
} }
func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) { func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t) m := mocks.NewMockWorkspaceClient(t)
state := &tfjson.State{ resources := &resourcesState{
Values: &tfjson.StateValues{ Resources: []stateResource{
RootModule: &tfjson.StateModule{
Resources: []*tfjson.StateResource{
{ {
Type: "databricks_pipeline", Type: "databricks_pipeline",
AttributeValues: map[string]interface{}{ Mode: "managed",
"id": "123", Name: "pipeline1",
}, Instances: []stateResourceInstance{
Mode: tfjson.ManagedResourceMode, {Attributes: stateInstanceAttributes{ID: "123"}},
},
}, },
}, },
}, },
@ -120,6 +109,6 @@ func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) {
PipelineId: "123", PipelineId: "123",
}).Return(nil, errors.New("API failure")).Once() }).Return(nil, errors.New("API failure")).Once()
err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources)
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"reflect"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/resources"
@ -19,15 +18,6 @@ func conv(from any, to any) {
json.Unmarshal(buf, &to) json.Unmarshal(buf, &to)
} }
func convRemoteToLocal(remote any, local any) resources.ModifiedStatus {
var modifiedStatus resources.ModifiedStatus
if reflect.ValueOf(local).Elem().IsNil() {
modifiedStatus = resources.ModifiedStatusDeleted
}
conv(remote, local)
return modifiedStatus
}
func convPermissions(acl []resources.Permission) *schema.ResourcePermissions { func convPermissions(acl []resources.Permission) *schema.ResourcePermissions {
if len(acl) == 0 { if len(acl) == 0 {
return nil return nil
@ -232,6 +222,13 @@ func BundleToTerraform(config *config.Root) *schema.Root {
} }
} }
for k, src := range config.Resources.QualityMonitors {
noResources = false
var dst schema.ResourceQualityMonitor
conv(src, &dst)
tfroot.Resource.QualityMonitor[k] = &dst
}
// We explicitly set "resource" to nil to omit it from a JSON encoding. // We explicitly set "resource" to nil to omit it from a JSON encoding.
// This is required because the terraform CLI requires >= 1 resources defined // This is required because the terraform CLI requires >= 1 resources defined
// if the "resource" property is used in a .tf.json file. // if the "resource" property is used in a .tf.json file.
@ -248,7 +245,7 @@ func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema
tfroot.Provider = schema.NewProviders() tfroot.Provider = schema.NewProviders()
// Convert each resource in the bundle to the equivalent Terraform representation. // Convert each resource in the bundle to the equivalent Terraform representation.
resources, err := dyn.Get(root, "resources") dynResources, err := dyn.Get(root, "resources")
if err != nil { if err != nil {
// If the resources key is missing, return an empty root. // If the resources key is missing, return an empty root.
if dyn.IsNoSuchKeyError(err) { if dyn.IsNoSuchKeyError(err) {
@ -260,11 +257,20 @@ func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema
tfroot.Resource = schema.NewResources() tfroot.Resource = schema.NewResources()
numResources := 0 numResources := 0
_, err = dyn.Walk(resources, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { _, err = dyn.Walk(dynResources, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
if len(p) < 2 { if len(p) < 2 {
return v, nil return v, nil
} }
// Skip resources that have been deleted locally.
modifiedStatus, err := dyn.Get(v, "modified_status")
if err == nil {
modifiedStatusStr, ok := modifiedStatus.AsString()
if ok && modifiedStatusStr == resources.ModifiedStatusDeleted {
return v, dyn.ErrSkip
}
}
typ := p[0].Key() typ := p[0].Key()
key := p[1].Key() key := p[1].Key()
@ -275,7 +281,7 @@ func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema
} }
// Convert resource to Terraform representation. // Convert resource to Terraform representation.
err := c.Convert(ctx, key, v, tfroot.Resource) err = c.Convert(ctx, key, v, tfroot.Resource)
if err != nil { if err != nil {
return dyn.InvalidValue, err return dyn.InvalidValue, err
} }
@ -299,76 +305,83 @@ func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema
return tfroot, nil return tfroot, nil
} }
func TerraformToBundle(state *tfjson.State, config *config.Root) error { func TerraformToBundle(state *resourcesState, config *config.Root) error {
if state.Values != nil && state.Values.RootModule != nil { for _, resource := range state.Resources {
for _, resource := range state.Values.RootModule.Resources {
// Limit to resources.
if resource.Mode != tfjson.ManagedResourceMode { if resource.Mode != tfjson.ManagedResourceMode {
continue continue
} }
for _, instance := range resource.Instances {
switch resource.Type { switch resource.Type {
case "databricks_job": case "databricks_job":
var tmp schema.ResourceJob
conv(resource.AttributeValues, &tmp)
if config.Resources.Jobs == nil { if config.Resources.Jobs == nil {
config.Resources.Jobs = make(map[string]*resources.Job) config.Resources.Jobs = make(map[string]*resources.Job)
} }
cur := config.Resources.Jobs[resource.Name] cur := config.Resources.Jobs[resource.Name]
// TODO: make sure we can unmarshall tf state properly and don't swallow errors if cur == nil {
modifiedStatus := convRemoteToLocal(tmp, &cur) cur = &resources.Job{ModifiedStatus: resources.ModifiedStatusDeleted}
cur.ModifiedStatus = modifiedStatus }
cur.ID = instance.Attributes.ID
config.Resources.Jobs[resource.Name] = cur config.Resources.Jobs[resource.Name] = cur
case "databricks_pipeline": case "databricks_pipeline":
var tmp schema.ResourcePipeline
conv(resource.AttributeValues, &tmp)
if config.Resources.Pipelines == nil { if config.Resources.Pipelines == nil {
config.Resources.Pipelines = make(map[string]*resources.Pipeline) config.Resources.Pipelines = make(map[string]*resources.Pipeline)
} }
cur := config.Resources.Pipelines[resource.Name] cur := config.Resources.Pipelines[resource.Name]
modifiedStatus := convRemoteToLocal(tmp, &cur) if cur == nil {
cur.ModifiedStatus = modifiedStatus cur = &resources.Pipeline{ModifiedStatus: resources.ModifiedStatusDeleted}
}
cur.ID = instance.Attributes.ID
config.Resources.Pipelines[resource.Name] = cur config.Resources.Pipelines[resource.Name] = cur
case "databricks_mlflow_model": case "databricks_mlflow_model":
var tmp schema.ResourceMlflowModel
conv(resource.AttributeValues, &tmp)
if config.Resources.Models == nil { if config.Resources.Models == nil {
config.Resources.Models = make(map[string]*resources.MlflowModel) config.Resources.Models = make(map[string]*resources.MlflowModel)
} }
cur := config.Resources.Models[resource.Name] cur := config.Resources.Models[resource.Name]
modifiedStatus := convRemoteToLocal(tmp, &cur) if cur == nil {
cur.ModifiedStatus = modifiedStatus cur = &resources.MlflowModel{ModifiedStatus: resources.ModifiedStatusDeleted}
}
cur.ID = instance.Attributes.ID
config.Resources.Models[resource.Name] = cur config.Resources.Models[resource.Name] = cur
case "databricks_mlflow_experiment": case "databricks_mlflow_experiment":
var tmp schema.ResourceMlflowExperiment
conv(resource.AttributeValues, &tmp)
if config.Resources.Experiments == nil { if config.Resources.Experiments == nil {
config.Resources.Experiments = make(map[string]*resources.MlflowExperiment) config.Resources.Experiments = make(map[string]*resources.MlflowExperiment)
} }
cur := config.Resources.Experiments[resource.Name] cur := config.Resources.Experiments[resource.Name]
modifiedStatus := convRemoteToLocal(tmp, &cur) if cur == nil {
cur.ModifiedStatus = modifiedStatus cur = &resources.MlflowExperiment{ModifiedStatus: resources.ModifiedStatusDeleted}
}
cur.ID = instance.Attributes.ID
config.Resources.Experiments[resource.Name] = cur config.Resources.Experiments[resource.Name] = cur
case "databricks_model_serving": case "databricks_model_serving":
var tmp schema.ResourceModelServing
conv(resource.AttributeValues, &tmp)
if config.Resources.ModelServingEndpoints == nil { if config.Resources.ModelServingEndpoints == nil {
config.Resources.ModelServingEndpoints = make(map[string]*resources.ModelServingEndpoint) config.Resources.ModelServingEndpoints = make(map[string]*resources.ModelServingEndpoint)
} }
cur := config.Resources.ModelServingEndpoints[resource.Name] cur := config.Resources.ModelServingEndpoints[resource.Name]
modifiedStatus := convRemoteToLocal(tmp, &cur) if cur == nil {
cur.ModifiedStatus = modifiedStatus cur = &resources.ModelServingEndpoint{ModifiedStatus: resources.ModifiedStatusDeleted}
}
cur.ID = instance.Attributes.ID
config.Resources.ModelServingEndpoints[resource.Name] = cur config.Resources.ModelServingEndpoints[resource.Name] = cur
case "databricks_registered_model": case "databricks_registered_model":
var tmp schema.ResourceRegisteredModel
conv(resource.AttributeValues, &tmp)
if config.Resources.RegisteredModels == nil { if config.Resources.RegisteredModels == nil {
config.Resources.RegisteredModels = make(map[string]*resources.RegisteredModel) config.Resources.RegisteredModels = make(map[string]*resources.RegisteredModel)
} }
cur := config.Resources.RegisteredModels[resource.Name] cur := config.Resources.RegisteredModels[resource.Name]
modifiedStatus := convRemoteToLocal(tmp, &cur) if cur == nil {
cur.ModifiedStatus = modifiedStatus cur = &resources.RegisteredModel{ModifiedStatus: resources.ModifiedStatusDeleted}
}
cur.ID = instance.Attributes.ID
config.Resources.RegisteredModels[resource.Name] = cur config.Resources.RegisteredModels[resource.Name] = cur
case "databricks_quality_monitor":
if config.Resources.QualityMonitors == nil {
config.Resources.QualityMonitors = make(map[string]*resources.QualityMonitor)
}
cur := config.Resources.QualityMonitors[resource.Name]
if cur == nil {
cur = &resources.QualityMonitor{ModifiedStatus: resources.ModifiedStatusDeleted}
}
cur.ID = instance.Attributes.ID
config.Resources.QualityMonitors[resource.Name] = cur
case "databricks_permissions": case "databricks_permissions":
case "databricks_grants": case "databricks_grants":
// Ignore; no need to pull these back into the configuration. // Ignore; no need to pull these back into the configuration.
@ -408,6 +421,11 @@ func TerraformToBundle(state *tfjson.State, config *config.Root) error {
src.ModifiedStatus = resources.ModifiedStatusCreated src.ModifiedStatus = resources.ModifiedStatusCreated
} }
} }
for _, src := range config.Resources.QualityMonitors {
if src.ModifiedStatus == "" && src.ID == "" {
src.ModifiedStatus = resources.ModifiedStatusCreated
}
}
return nil return nil
} }

View File

@ -17,7 +17,6 @@ import (
"github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/ml"
"github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/databricks/databricks-sdk-go/service/serving" "github.com/databricks/databricks-sdk-go/service/serving"
tfjson "github.com/hashicorp/terraform-json"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -548,51 +547,95 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) {
bundleToTerraformEquivalenceTest(t, &config) bundleToTerraformEquivalenceTest(t, &config)
} }
func TestBundleToTerraformDeletedResources(t *testing.T) {
var job1 = resources.Job{
JobSettings: &jobs.JobSettings{},
}
var job2 = resources.Job{
ModifiedStatus: resources.ModifiedStatusDeleted,
JobSettings: &jobs.JobSettings{},
}
var config = config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"my_job1": &job1,
"my_job2": &job2,
},
},
}
vin, err := convert.FromTyped(config, dyn.NilValue)
require.NoError(t, err)
out, err := BundleToTerraformWithDynValue(context.Background(), vin)
require.NoError(t, err)
_, ok := out.Resource.Job["my_job1"]
assert.True(t, ok)
_, ok = out.Resource.Job["my_job2"]
assert.False(t, ok)
}
func TestTerraformToBundleEmptyLocalResources(t *testing.T) { func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
var config = config.Root{ var config = config.Root{
Resources: config.Resources{}, Resources: config.Resources{},
} }
var tfState = tfjson.State{ var tfState = resourcesState{
Values: &tfjson.StateValues{ Resources: []stateResource{
RootModule: &tfjson.StateModule{
Resources: []*tfjson.StateResource{
{ {
Type: "databricks_job", Type: "databricks_job",
Mode: "managed", Mode: "managed",
Name: "test_job", Name: "test_job",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_pipeline", Type: "databricks_pipeline",
Mode: "managed", Mode: "managed",
Name: "test_pipeline", Name: "test_pipeline",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_mlflow_model", Type: "databricks_mlflow_model",
Mode: "managed", Mode: "managed",
Name: "test_mlflow_model", Name: "test_mlflow_model",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_mlflow_experiment", Type: "databricks_mlflow_experiment",
Mode: "managed", Mode: "managed",
Name: "test_mlflow_experiment", Name: "test_mlflow_experiment",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_model_serving", Type: "databricks_model_serving",
Mode: "managed", Mode: "managed",
Name: "test_model_serving", Name: "test_model_serving",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_registered_model", Type: "databricks_registered_model",
Mode: "managed", Mode: "managed",
Name: "test_registered_model", Name: "test_registered_model",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
}, },
}, },
{
Type: "databricks_quality_monitor",
Mode: "managed",
Name: "test_monitor",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
}, },
} }
@ -617,6 +660,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
assert.Equal(t, "1", config.Resources.RegisteredModels["test_registered_model"].ID) assert.Equal(t, "1", config.Resources.RegisteredModels["test_registered_model"].ID)
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.RegisteredModels["test_registered_model"].ModifiedStatus) assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.RegisteredModels["test_registered_model"].ModifiedStatus)
assert.Equal(t, "1", config.Resources.QualityMonitors["test_monitor"].ID)
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.QualityMonitors["test_monitor"].ModifiedStatus)
AssertFullResourceCoverage(t, &config) AssertFullResourceCoverage(t, &config)
} }
@ -665,10 +711,17 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
}, },
}, },
}, },
QualityMonitors: map[string]*resources.QualityMonitor{
"test_monitor": {
CreateMonitor: &catalog.CreateMonitor{
TableName: "test_monitor",
},
},
},
}, },
} }
var tfState = tfjson.State{ var tfState = resourcesState{
Values: nil, Resources: nil,
} }
err := TerraformToBundle(&tfState, &config) err := TerraformToBundle(&tfState, &config)
assert.NoError(t, err) assert.NoError(t, err)
@ -691,6 +744,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
assert.Equal(t, "", config.Resources.RegisteredModels["test_registered_model"].ID) assert.Equal(t, "", config.Resources.RegisteredModels["test_registered_model"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.RegisteredModels["test_registered_model"].ModifiedStatus) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.RegisteredModels["test_registered_model"].ModifiedStatus)
assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.QualityMonitors["test_monitor"].ModifiedStatus)
AssertFullResourceCoverage(t, &config) AssertFullResourceCoverage(t, &config)
} }
@ -769,85 +825,133 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
}, },
}, },
}, },
QualityMonitors: map[string]*resources.QualityMonitor{
"test_monitor": {
CreateMonitor: &catalog.CreateMonitor{
TableName: "test_monitor",
},
},
"test_monitor_new": {
CreateMonitor: &catalog.CreateMonitor{
TableName: "test_monitor_new",
},
},
},
}, },
} }
var tfState = tfjson.State{ var tfState = resourcesState{
Values: &tfjson.StateValues{ Resources: []stateResource{
RootModule: &tfjson.StateModule{
Resources: []*tfjson.StateResource{
{ {
Type: "databricks_job", Type: "databricks_job",
Mode: "managed", Mode: "managed",
Name: "test_job", Name: "test_job",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_job", Type: "databricks_job",
Mode: "managed", Mode: "managed",
Name: "test_job_old", Name: "test_job_old",
AttributeValues: map[string]interface{}{"id": "2"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "2"}},
},
}, },
{ {
Type: "databricks_pipeline", Type: "databricks_pipeline",
Mode: "managed", Mode: "managed",
Name: "test_pipeline", Name: "test_pipeline",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_pipeline", Type: "databricks_pipeline",
Mode: "managed", Mode: "managed",
Name: "test_pipeline_old", Name: "test_pipeline_old",
AttributeValues: map[string]interface{}{"id": "2"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "2"}},
},
}, },
{ {
Type: "databricks_mlflow_model", Type: "databricks_mlflow_model",
Mode: "managed", Mode: "managed",
Name: "test_mlflow_model", Name: "test_mlflow_model",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_mlflow_model", Type: "databricks_mlflow_model",
Mode: "managed", Mode: "managed",
Name: "test_mlflow_model_old", Name: "test_mlflow_model_old",
AttributeValues: map[string]interface{}{"id": "2"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "2"}},
},
}, },
{ {
Type: "databricks_mlflow_experiment", Type: "databricks_mlflow_experiment",
Mode: "managed", Mode: "managed",
Name: "test_mlflow_experiment", Name: "test_mlflow_experiment",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_mlflow_experiment", Type: "databricks_mlflow_experiment",
Mode: "managed", Mode: "managed",
Name: "test_mlflow_experiment_old", Name: "test_mlflow_experiment_old",
AttributeValues: map[string]interface{}{"id": "2"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "2"}},
},
}, },
{ {
Type: "databricks_model_serving", Type: "databricks_model_serving",
Mode: "managed", Mode: "managed",
Name: "test_model_serving", Name: "test_model_serving",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_model_serving", Type: "databricks_model_serving",
Mode: "managed", Mode: "managed",
Name: "test_model_serving_old", Name: "test_model_serving_old",
AttributeValues: map[string]interface{}{"id": "2"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "2"}},
},
}, },
{ {
Type: "databricks_registered_model", Type: "databricks_registered_model",
Mode: "managed", Mode: "managed",
Name: "test_registered_model", Name: "test_registered_model",
AttributeValues: map[string]interface{}{"id": "1"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "1"}},
},
}, },
{ {
Type: "databricks_registered_model", Type: "databricks_registered_model",
Mode: "managed", Mode: "managed",
Name: "test_registered_model_old", Name: "test_registered_model_old",
AttributeValues: map[string]interface{}{"id": "2"}, Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "2"}},
}, },
}, },
{
Type: "databricks_quality_monitor",
Mode: "managed",
Name: "test_monitor",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "test_monitor"}},
},
},
{
Type: "databricks_quality_monitor",
Mode: "managed",
Name: "test_monitor_old",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "test_monitor_old"}},
},
}, },
}, },
} }
@ -896,6 +1000,12 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
assert.Equal(t, "", config.Resources.ModelServingEndpoints["test_model_serving_new"].ID) assert.Equal(t, "", config.Resources.ModelServingEndpoints["test_model_serving_new"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.ModelServingEndpoints["test_model_serving_new"].ModifiedStatus) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.ModelServingEndpoints["test_model_serving_new"].ModifiedStatus)
assert.Equal(t, "test_monitor", config.Resources.QualityMonitors["test_monitor"].ID)
assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor"].ModifiedStatus)
assert.Equal(t, "test_monitor_old", config.Resources.QualityMonitors["test_monitor_old"].ID)
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.QualityMonitors["test_monitor_old"].ModifiedStatus)
assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor_new"].ID)
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.QualityMonitors["test_monitor_new"].ModifiedStatus)
AssertFullResourceCoverage(t, &config) AssertFullResourceCoverage(t, &config)
} }

View File

@ -54,6 +54,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
path = dyn.NewPath(dyn.Key("databricks_model_serving")).Append(path[2:]...) path = dyn.NewPath(dyn.Key("databricks_model_serving")).Append(path[2:]...)
case dyn.Key("registered_models"): case dyn.Key("registered_models"):
path = dyn.NewPath(dyn.Key("databricks_registered_model")).Append(path[2:]...) path = dyn.NewPath(dyn.Key("databricks_registered_model")).Append(path[2:]...)
case dyn.Key("quality_monitors"):
path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...)
default: default:
// Trigger "key not found" for unknown resource types. // Trigger "key not found" for unknown resource types.
return dyn.GetByPath(root, path) return dyn.GetByPath(root, path)

View File

@ -8,7 +8,6 @@ import (
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/diag"
"github.com/hashicorp/terraform-exec/tfexec" "github.com/hashicorp/terraform-exec/tfexec"
tfjson "github.com/hashicorp/terraform-json"
) )
type loadMode int type loadMode int
@ -34,7 +33,7 @@ func (l *load) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
return diag.Errorf("terraform init: %v", err) return diag.Errorf("terraform init: %v", err)
} }
state, err := b.Terraform.Show(ctx) state, err := ParseResourcesState(ctx, b)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -53,16 +52,13 @@ func (l *load) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
return nil return nil
} }
func (l *load) validateState(state *tfjson.State) error { func (l *load) validateState(state *resourcesState) error {
if state.Values == nil { if state.Version != SupportedStateVersion {
if slices.Contains(l.modes, ErrorOnEmptyState) { return fmt.Errorf("unsupported deployment state version: %d. Try re-deploying the bundle", state.Version)
return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?")
}
return nil
} }
if state.Values.RootModule == nil { if len(state.Resources) == 0 && slices.Contains(l.modes, ErrorOnEmptyState) {
return fmt.Errorf("malformed terraform state: RootModule not set") return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?")
} }
return nil return nil

View File

@ -0,0 +1,37 @@
package tfdyn
import (
"context"
"github.com/databricks/cli/bundle/internal/tf/schema"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/cli/libs/log"
)
func convertQualityMonitorResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
// Normalize the output value to the target schema.
vout, diags := convert.Normalize(schema.ResourceQualityMonitor{}, vin)
for _, diag := range diags {
log.Debugf(ctx, "monitor normalization diagnostic: %s", diag.Summary)
}
return vout, nil
}
type qualityMonitorConverter struct{}
func (qualityMonitorConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error {
vout, err := convertQualityMonitorResource(ctx, vin)
if err != nil {
return err
}
// Add the converted resource to the output.
out.QualityMonitor[key] = vout.AsAny()
return nil
}
func init() {
registerConverter("quality_monitors", qualityMonitorConverter{})
}

View File

@ -0,0 +1,46 @@
package tfdyn
import (
"context"
"testing"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/tf/schema"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConvertQualityMonitor(t *testing.T) {
var src = resources.QualityMonitor{
CreateMonitor: &catalog.CreateMonitor{
TableName: "test_table_name",
AssetsDir: "assets_dir",
OutputSchemaName: "output_schema_name",
InferenceLog: &catalog.MonitorInferenceLog{
ModelIdCol: "model_id",
PredictionCol: "test_prediction_col",
ProblemType: "PROBLEM_TYPE_CLASSIFICATION",
},
},
}
vin, err := convert.FromTyped(src, dyn.NilValue)
require.NoError(t, err)
ctx := context.Background()
out := schema.NewResources()
err = qualityMonitorConverter{}.Convert(ctx, "my_monitor", vin, out)
require.NoError(t, err)
assert.Equal(t, map[string]any{
"assets_dir": "assets_dir",
"output_schema_name": "output_schema_name",
"table_name": "test_table_name",
"inference_log": map[string]any{
"model_id_col": "model_id",
"prediction_col": "test_prediction_col",
"problem_type": "PROBLEM_TYPE_CLASSIFICATION",
},
}, out.QualityMonitor["my_monitor"])
}

View File

@ -1,14 +1,46 @@
package terraform package terraform
import ( import (
"context"
"encoding/json" "encoding/json"
"errors"
"io" "io"
"os"
"path/filepath"
"github.com/databricks/cli/bundle"
tfjson "github.com/hashicorp/terraform-json"
) )
type state struct { // Partial representation of the Terraform state file format.
// We are only interested global version and serial numbers,
// plus resource types, names, modes, and ids.
type resourcesState struct {
Version int `json:"version"`
Resources []stateResource `json:"resources"`
}
const SupportedStateVersion = 4
type serialState struct {
Serial int `json:"serial"` Serial int `json:"serial"`
} }
type stateResource struct {
Type string `json:"type"`
Name string `json:"name"`
Mode tfjson.ResourceMode `json:"mode"`
Instances []stateResourceInstance `json:"instances"`
}
type stateResourceInstance struct {
Attributes stateInstanceAttributes `json:"attributes"`
}
type stateInstanceAttributes struct {
ID string `json:"id"`
}
func IsLocalStateStale(local io.Reader, remote io.Reader) bool { func IsLocalStateStale(local io.Reader, remote io.Reader) bool {
localState, err := loadState(local) localState, err := loadState(local)
if err != nil { if err != nil {
@ -23,12 +55,12 @@ func IsLocalStateStale(local io.Reader, remote io.Reader) bool {
return localState.Serial < remoteState.Serial return localState.Serial < remoteState.Serial
} }
func loadState(input io.Reader) (*state, error) { func loadState(input io.Reader) (*serialState, error) {
content, err := io.ReadAll(input) content, err := io.ReadAll(input)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var s state var s serialState
err = json.Unmarshal(content, &s) err = json.Unmarshal(content, &s)
if err != nil { if err != nil {
return nil, err return nil, err
@ -36,3 +68,20 @@ func loadState(input io.Reader) (*state, error) {
return &s, nil return &s, nil
} }
func ParseResourcesState(ctx context.Context, b *bundle.Bundle) (*resourcesState, error) {
cacheDir, err := Dir(ctx, b)
if err != nil {
return nil, err
}
rawState, err := os.ReadFile(filepath.Join(cacheDir, TerraformStateFileName))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return &resourcesState{Version: SupportedStateVersion}, nil
}
return nil, err
}
var state resourcesState
err = json.Unmarshal(rawState, &state)
return &state, err
}

View File

@ -1,11 +1,16 @@
package terraform package terraform
import ( import (
"context"
"fmt" "fmt"
"os"
"path/filepath"
"strings" "strings"
"testing" "testing"
"testing/iotest" "testing/iotest"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -38,3 +43,97 @@ func TestLocalStateMarkNonStaleWhenRemoteFailsToLoad(t *testing.T) {
remote := iotest.ErrReader(fmt.Errorf("Random error")) remote := iotest.ErrReader(fmt.Errorf("Random error"))
assert.False(t, IsLocalStateStale(local, remote)) assert.False(t, IsLocalStateStale(local, remote))
} }
func TestParseResourcesStateWithNoFile(t *testing.T) {
b := &bundle.Bundle{
RootPath: t.TempDir(),
Config: config.Root{
Bundle: config.Bundle{
Target: "whatever",
Terraform: &config.Terraform{
ExecPath: "terraform",
},
},
},
}
state, err := ParseResourcesState(context.Background(), b)
assert.NoError(t, err)
assert.Equal(t, &resourcesState{Version: SupportedStateVersion}, state)
}
func TestParseResourcesStateWithExistingStateFile(t *testing.T) {
ctx := context.Background()
b := &bundle.Bundle{
RootPath: t.TempDir(),
Config: config.Root{
Bundle: config.Bundle{
Target: "whatever",
Terraform: &config.Terraform{
ExecPath: "terraform",
},
},
},
}
cacheDir, err := Dir(ctx, b)
assert.NoError(t, err)
data := []byte(`{
"version": 4,
"unknown_field": "hello",
"resources": [
{
"mode": "managed",
"type": "databricks_pipeline",
"name": "test_pipeline",
"provider": "provider[\"registry.terraform.io/databricks/databricks\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allow_duplicate_names": false,
"catalog": null,
"channel": "CURRENT",
"cluster": [],
"random_field": "random_value",
"configuration": {
"bundle.sourcePath": "/Workspace//Users/user/.bundle/test/dev/files/src"
},
"continuous": false,
"development": true,
"edition": "ADVANCED",
"filters": [],
"id": "123",
"library": [],
"name": "test_pipeline",
"notification": [],
"photon": false,
"serverless": false,
"storage": "dbfs:/123456",
"target": "test_dev",
"timeouts": null,
"url": "https://test.com"
},
"sensitive_attributes": []
}
]
}
]
}`)
err = os.WriteFile(filepath.Join(cacheDir, TerraformStateFileName), data, os.ModePerm)
assert.NoError(t, err)
state, err := ParseResourcesState(ctx, b)
assert.NoError(t, err)
expected := &resourcesState{
Version: 4,
Resources: []stateResource{
{
Mode: "managed",
Type: "databricks_pipeline",
Name: "test_pipeline",
Instances: []stateResourceInstance{
{Attributes: stateInstanceAttributes{ID: "123"}},
},
},
},
}
assert.Equal(t, expected, state)
}

View File

@ -1,3 +1,3 @@
package schema package schema
const ProviderVersion = "1.40.0" const ProviderVersion = "1.46.0"

View File

@ -0,0 +1,46 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceCatalogCatalogInfoEffectivePredictiveOptimizationFlag struct {
InheritedFromName string `json:"inherited_from_name,omitempty"`
InheritedFromType string `json:"inherited_from_type,omitempty"`
Value string `json:"value"`
}
type DataSourceCatalogCatalogInfoProvisioningInfo struct {
State string `json:"state,omitempty"`
}
type DataSourceCatalogCatalogInfo struct {
BrowseOnly bool `json:"browse_only,omitempty"`
CatalogType string `json:"catalog_type,omitempty"`
Comment string `json:"comment,omitempty"`
ConnectionName string `json:"connection_name,omitempty"`
CreatedAt int `json:"created_at,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"`
FullName string `json:"full_name,omitempty"`
IsolationMode string `json:"isolation_mode,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name,omitempty"`
Options map[string]string `json:"options,omitempty"`
Owner string `json:"owner,omitempty"`
Properties map[string]string `json:"properties,omitempty"`
ProviderName string `json:"provider_name,omitempty"`
SecurableKind string `json:"securable_kind,omitempty"`
SecurableType string `json:"securable_type,omitempty"`
ShareName string `json:"share_name,omitempty"`
StorageLocation string `json:"storage_location,omitempty"`
StorageRoot string `json:"storage_root,omitempty"`
UpdatedAt int `json:"updated_at,omitempty"`
UpdatedBy string `json:"updated_by,omitempty"`
EffectivePredictiveOptimizationFlag *DataSourceCatalogCatalogInfoEffectivePredictiveOptimizationFlag `json:"effective_predictive_optimization_flag,omitempty"`
ProvisioningInfo *DataSourceCatalogCatalogInfoProvisioningInfo `json:"provisioning_info,omitempty"`
}
type DataSourceCatalog struct {
Id string `json:"id,omitempty"`
Name string `json:"name"`
CatalogInfo *DataSourceCatalogCatalogInfo `json:"catalog_info,omitempty"`
}

View File

@ -55,9 +55,9 @@ type DataSourceJobJobSettingsSettingsGitSource struct {
} }
type DataSourceJobJobSettingsSettingsHealthRules struct { type DataSourceJobJobSettingsSettingsHealthRules struct {
Metric string `json:"metric,omitempty"` Metric string `json:"metric"`
Op string `json:"op,omitempty"` Op string `json:"op"`
Value int `json:"value,omitempty"` Value int `json:"value"`
} }
type DataSourceJobJobSettingsSettingsHealth struct { type DataSourceJobJobSettingsSettingsHealth struct {
@ -222,7 +222,7 @@ type DataSourceJobJobSettingsSettingsJobClusterNewCluster struct {
} }
type DataSourceJobJobSettingsSettingsJobCluster struct { type DataSourceJobJobSettingsSettingsJobCluster struct {
JobClusterKey string `json:"job_cluster_key,omitempty"` JobClusterKey string `json:"job_cluster_key"`
NewCluster *DataSourceJobJobSettingsSettingsJobClusterNewCluster `json:"new_cluster,omitempty"` NewCluster *DataSourceJobJobSettingsSettingsJobClusterNewCluster `json:"new_cluster,omitempty"`
} }
@ -245,6 +245,7 @@ type DataSourceJobJobSettingsSettingsLibraryPypi struct {
type DataSourceJobJobSettingsSettingsLibrary struct { type DataSourceJobJobSettingsSettingsLibrary struct {
Egg string `json:"egg,omitempty"` Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"` Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"` Whl string `json:"whl,omitempty"`
Cran *DataSourceJobJobSettingsSettingsLibraryCran `json:"cran,omitempty"` Cran *DataSourceJobJobSettingsSettingsLibraryCran `json:"cran,omitempty"`
Maven *DataSourceJobJobSettingsSettingsLibraryMaven `json:"maven,omitempty"` Maven *DataSourceJobJobSettingsSettingsLibraryMaven `json:"maven,omitempty"`
@ -532,9 +533,9 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications struc
} }
type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealthRules struct { type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealthRules struct {
Metric string `json:"metric,omitempty"` Metric string `json:"metric"`
Op string `json:"op,omitempty"` Op string `json:"op"`
Value int `json:"value,omitempty"` Value int `json:"value"`
} }
type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealth struct { type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealth struct {
@ -560,6 +561,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryPypi struct {
type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibrary struct { type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibrary struct {
Egg string `json:"egg,omitempty"` Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"` Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"` Whl string `json:"whl,omitempty"`
Cran *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` Cran *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"`
Maven *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` Maven *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"`
@ -803,7 +805,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskQuery struct {
type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTask struct { type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTask struct {
Parameters map[string]string `json:"parameters,omitempty"` Parameters map[string]string `json:"parameters,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"` WarehouseId string `json:"warehouse_id"`
Alert *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"` Alert *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"`
Dashboard *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"` Dashboard *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"`
File *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"` File *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"`
@ -842,7 +844,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTask struct {
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
RunIf string `json:"run_if,omitempty"` RunIf string `json:"run_if,omitempty"`
TaskKey string `json:"task_key,omitempty"` TaskKey string `json:"task_key"`
TimeoutSeconds int `json:"timeout_seconds,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"`
ConditionTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` ConditionTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"`
DbtTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` DbtTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"`
@ -870,9 +872,9 @@ type DataSourceJobJobSettingsSettingsTaskForEachTask struct {
} }
type DataSourceJobJobSettingsSettingsTaskHealthRules struct { type DataSourceJobJobSettingsSettingsTaskHealthRules struct {
Metric string `json:"metric,omitempty"` Metric string `json:"metric"`
Op string `json:"op,omitempty"` Op string `json:"op"`
Value int `json:"value,omitempty"` Value int `json:"value"`
} }
type DataSourceJobJobSettingsSettingsTaskHealth struct { type DataSourceJobJobSettingsSettingsTaskHealth struct {
@ -898,6 +900,7 @@ type DataSourceJobJobSettingsSettingsTaskLibraryPypi struct {
type DataSourceJobJobSettingsSettingsTaskLibrary struct { type DataSourceJobJobSettingsSettingsTaskLibrary struct {
Egg string `json:"egg,omitempty"` Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"` Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"` Whl string `json:"whl,omitempty"`
Cran *DataSourceJobJobSettingsSettingsTaskLibraryCran `json:"cran,omitempty"` Cran *DataSourceJobJobSettingsSettingsTaskLibraryCran `json:"cran,omitempty"`
Maven *DataSourceJobJobSettingsSettingsTaskLibraryMaven `json:"maven,omitempty"` Maven *DataSourceJobJobSettingsSettingsTaskLibraryMaven `json:"maven,omitempty"`
@ -1141,7 +1144,7 @@ type DataSourceJobJobSettingsSettingsTaskSqlTaskQuery struct {
type DataSourceJobJobSettingsSettingsTaskSqlTask struct { type DataSourceJobJobSettingsSettingsTaskSqlTask struct {
Parameters map[string]string `json:"parameters,omitempty"` Parameters map[string]string `json:"parameters,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"` WarehouseId string `json:"warehouse_id"`
Alert *DataSourceJobJobSettingsSettingsTaskSqlTaskAlert `json:"alert,omitempty"` Alert *DataSourceJobJobSettingsSettingsTaskSqlTaskAlert `json:"alert,omitempty"`
Dashboard *DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard `json:"dashboard,omitempty"` Dashboard *DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard `json:"dashboard,omitempty"`
File *DataSourceJobJobSettingsSettingsTaskSqlTaskFile `json:"file,omitempty"` File *DataSourceJobJobSettingsSettingsTaskSqlTaskFile `json:"file,omitempty"`
@ -1180,7 +1183,7 @@ type DataSourceJobJobSettingsSettingsTask struct {
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
RunIf string `json:"run_if,omitempty"` RunIf string `json:"run_if,omitempty"`
TaskKey string `json:"task_key,omitempty"` TaskKey string `json:"task_key"`
TimeoutSeconds int `json:"timeout_seconds,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"`
ConditionTask *DataSourceJobJobSettingsSettingsTaskConditionTask `json:"condition_task,omitempty"` ConditionTask *DataSourceJobJobSettingsSettingsTaskConditionTask `json:"condition_task,omitempty"`
DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"` DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"`

View File

@ -0,0 +1,19 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceMlflowExperimentTags struct {
Key string `json:"key,omitempty"`
Value string `json:"value,omitempty"`
}
type DataSourceMlflowExperiment struct {
ArtifactLocation string `json:"artifact_location,omitempty"`
CreationTime int `json:"creation_time,omitempty"`
ExperimentId string `json:"experiment_id,omitempty"`
Id string `json:"id,omitempty"`
LastUpdateTime int `json:"last_update_time,omitempty"`
LifecycleStage string `json:"lifecycle_stage,omitempty"`
Name string `json:"name,omitempty"`
Tags []DataSourceMlflowExperimentTags `json:"tags,omitempty"`
}

View File

@ -0,0 +1,127 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceTableTableInfoColumnsMask struct {
FunctionName string `json:"function_name,omitempty"`
UsingColumnNames []string `json:"using_column_names,omitempty"`
}
type DataSourceTableTableInfoColumns struct {
Comment string `json:"comment,omitempty"`
Name string `json:"name,omitempty"`
Nullable bool `json:"nullable,omitempty"`
PartitionIndex int `json:"partition_index,omitempty"`
Position int `json:"position,omitempty"`
TypeIntervalType string `json:"type_interval_type,omitempty"`
TypeJson string `json:"type_json,omitempty"`
TypeName string `json:"type_name,omitempty"`
TypePrecision int `json:"type_precision,omitempty"`
TypeScale int `json:"type_scale,omitempty"`
TypeText string `json:"type_text,omitempty"`
Mask *DataSourceTableTableInfoColumnsMask `json:"mask,omitempty"`
}
type DataSourceTableTableInfoDeltaRuntimePropertiesKvpairs struct {
DeltaRuntimeProperties map[string]string `json:"delta_runtime_properties"`
}
type DataSourceTableTableInfoEffectivePredictiveOptimizationFlag struct {
InheritedFromName string `json:"inherited_from_name,omitempty"`
InheritedFromType string `json:"inherited_from_type,omitempty"`
Value string `json:"value"`
}
type DataSourceTableTableInfoEncryptionDetailsSseEncryptionDetails struct {
Algorithm string `json:"algorithm,omitempty"`
AwsKmsKeyArn string `json:"aws_kms_key_arn,omitempty"`
}
type DataSourceTableTableInfoEncryptionDetails struct {
SseEncryptionDetails *DataSourceTableTableInfoEncryptionDetailsSseEncryptionDetails `json:"sse_encryption_details,omitempty"`
}
type DataSourceTableTableInfoRowFilter struct {
FunctionName string `json:"function_name"`
InputColumnNames []string `json:"input_column_names"`
}
type DataSourceTableTableInfoTableConstraintsForeignKeyConstraint struct {
ChildColumns []string `json:"child_columns"`
Name string `json:"name"`
ParentColumns []string `json:"parent_columns"`
ParentTable string `json:"parent_table"`
}
type DataSourceTableTableInfoTableConstraintsNamedTableConstraint struct {
Name string `json:"name"`
}
type DataSourceTableTableInfoTableConstraintsPrimaryKeyConstraint struct {
ChildColumns []string `json:"child_columns"`
Name string `json:"name"`
}
type DataSourceTableTableInfoTableConstraints struct {
ForeignKeyConstraint *DataSourceTableTableInfoTableConstraintsForeignKeyConstraint `json:"foreign_key_constraint,omitempty"`
NamedTableConstraint *DataSourceTableTableInfoTableConstraintsNamedTableConstraint `json:"named_table_constraint,omitempty"`
PrimaryKeyConstraint *DataSourceTableTableInfoTableConstraintsPrimaryKeyConstraint `json:"primary_key_constraint,omitempty"`
}
type DataSourceTableTableInfoViewDependenciesDependenciesFunction struct {
FunctionFullName string `json:"function_full_name"`
}
type DataSourceTableTableInfoViewDependenciesDependenciesTable struct {
TableFullName string `json:"table_full_name"`
}
type DataSourceTableTableInfoViewDependenciesDependencies struct {
Function *DataSourceTableTableInfoViewDependenciesDependenciesFunction `json:"function,omitempty"`
Table *DataSourceTableTableInfoViewDependenciesDependenciesTable `json:"table,omitempty"`
}
type DataSourceTableTableInfoViewDependencies struct {
Dependencies []DataSourceTableTableInfoViewDependenciesDependencies `json:"dependencies,omitempty"`
}
type DataSourceTableTableInfo struct {
AccessPoint string `json:"access_point,omitempty"`
BrowseOnly bool `json:"browse_only,omitempty"`
CatalogName string `json:"catalog_name,omitempty"`
Comment string `json:"comment,omitempty"`
CreatedAt int `json:"created_at,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
DataAccessConfigurationId string `json:"data_access_configuration_id,omitempty"`
DataSourceFormat string `json:"data_source_format,omitempty"`
DeletedAt int `json:"deleted_at,omitempty"`
EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"`
FullName string `json:"full_name,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name,omitempty"`
Owner string `json:"owner,omitempty"`
PipelineId string `json:"pipeline_id,omitempty"`
Properties map[string]string `json:"properties,omitempty"`
SchemaName string `json:"schema_name,omitempty"`
SqlPath string `json:"sql_path,omitempty"`
StorageCredentialName string `json:"storage_credential_name,omitempty"`
StorageLocation string `json:"storage_location,omitempty"`
TableId string `json:"table_id,omitempty"`
TableType string `json:"table_type,omitempty"`
UpdatedAt int `json:"updated_at,omitempty"`
UpdatedBy string `json:"updated_by,omitempty"`
ViewDefinition string `json:"view_definition,omitempty"`
Columns []DataSourceTableTableInfoColumns `json:"columns,omitempty"`
DeltaRuntimePropertiesKvpairs *DataSourceTableTableInfoDeltaRuntimePropertiesKvpairs `json:"delta_runtime_properties_kvpairs,omitempty"`
EffectivePredictiveOptimizationFlag *DataSourceTableTableInfoEffectivePredictiveOptimizationFlag `json:"effective_predictive_optimization_flag,omitempty"`
EncryptionDetails *DataSourceTableTableInfoEncryptionDetails `json:"encryption_details,omitempty"`
RowFilter *DataSourceTableTableInfoRowFilter `json:"row_filter,omitempty"`
TableConstraints []DataSourceTableTableInfoTableConstraints `json:"table_constraints,omitempty"`
ViewDependencies *DataSourceTableTableInfoViewDependencies `json:"view_dependencies,omitempty"`
}
type DataSourceTable struct {
Id string `json:"id,omitempty"`
Name string `json:"name"`
TableInfo *DataSourceTableTableInfo `json:"table_info,omitempty"`
}

View File

@ -7,6 +7,7 @@ type DataSources struct {
AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"`
AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"`
AwsUnityCatalogPolicy map[string]any `json:"databricks_aws_unity_catalog_policy,omitempty"` AwsUnityCatalogPolicy map[string]any `json:"databricks_aws_unity_catalog_policy,omitempty"`
Catalog map[string]any `json:"databricks_catalog,omitempty"`
Catalogs map[string]any `json:"databricks_catalogs,omitempty"` Catalogs map[string]any `json:"databricks_catalogs,omitempty"`
Cluster map[string]any `json:"databricks_cluster,omitempty"` Cluster map[string]any `json:"databricks_cluster,omitempty"`
ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"`
@ -26,6 +27,7 @@ type DataSources struct {
Jobs map[string]any `json:"databricks_jobs,omitempty"` Jobs map[string]any `json:"databricks_jobs,omitempty"`
Metastore map[string]any `json:"databricks_metastore,omitempty"` Metastore map[string]any `json:"databricks_metastore,omitempty"`
Metastores map[string]any `json:"databricks_metastores,omitempty"` Metastores map[string]any `json:"databricks_metastores,omitempty"`
MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"`
MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"`
MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"`
MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"`
@ -43,6 +45,7 @@ type DataSources struct {
SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"` SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"`
StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"`
StorageCredentials map[string]any `json:"databricks_storage_credentials,omitempty"` StorageCredentials map[string]any `json:"databricks_storage_credentials,omitempty"`
Table map[string]any `json:"databricks_table,omitempty"`
Tables map[string]any `json:"databricks_tables,omitempty"` Tables map[string]any `json:"databricks_tables,omitempty"`
User map[string]any `json:"databricks_user,omitempty"` User map[string]any `json:"databricks_user,omitempty"`
Views map[string]any `json:"databricks_views,omitempty"` Views map[string]any `json:"databricks_views,omitempty"`
@ -56,6 +59,7 @@ func NewDataSources() *DataSources {
AwsBucketPolicy: make(map[string]any), AwsBucketPolicy: make(map[string]any),
AwsCrossaccountPolicy: make(map[string]any), AwsCrossaccountPolicy: make(map[string]any),
AwsUnityCatalogPolicy: make(map[string]any), AwsUnityCatalogPolicy: make(map[string]any),
Catalog: make(map[string]any),
Catalogs: make(map[string]any), Catalogs: make(map[string]any),
Cluster: make(map[string]any), Cluster: make(map[string]any),
ClusterPolicy: make(map[string]any), ClusterPolicy: make(map[string]any),
@ -75,6 +79,7 @@ func NewDataSources() *DataSources {
Jobs: make(map[string]any), Jobs: make(map[string]any),
Metastore: make(map[string]any), Metastore: make(map[string]any),
Metastores: make(map[string]any), Metastores: make(map[string]any),
MlflowExperiment: make(map[string]any),
MlflowModel: make(map[string]any), MlflowModel: make(map[string]any),
MwsCredentials: make(map[string]any), MwsCredentials: make(map[string]any),
MwsWorkspaces: make(map[string]any), MwsWorkspaces: make(map[string]any),
@ -92,6 +97,7 @@ func NewDataSources() *DataSources {
SqlWarehouses: make(map[string]any), SqlWarehouses: make(map[string]any),
StorageCredential: make(map[string]any), StorageCredential: make(map[string]any),
StorageCredentials: make(map[string]any), StorageCredentials: make(map[string]any),
Table: make(map[string]any),
Tables: make(map[string]any), Tables: make(map[string]any),
User: make(map[string]any), User: make(map[string]any),
Views: make(map[string]any), Views: make(map[string]any),

View File

@ -0,0 +1,39 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails struct {
ForcedForComplianceMode bool `json:"forced_for_compliance_mode,omitempty"`
UnavailableForDisabledEntitlement bool `json:"unavailable_for_disabled_entitlement,omitempty"`
UnavailableForNonEnterpriseTier bool `json:"unavailable_for_non_enterprise_tier,omitempty"`
}
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime struct {
Hours int `json:"hours,omitempty"`
Minutes int `json:"minutes,omitempty"`
}
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule struct {
DayOfWeek string `json:"day_of_week,omitempty"`
Frequency string `json:"frequency,omitempty"`
WindowStartTime *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime `json:"window_start_time,omitempty"`
}
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow struct {
WeekDayBasedSchedule *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule `json:"week_day_based_schedule,omitempty"`
}
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace struct {
CanToggle bool `json:"can_toggle,omitempty"`
Enabled bool `json:"enabled,omitempty"`
RestartEvenIfNoUpdatesAvailable bool `json:"restart_even_if_no_updates_available,omitempty"`
EnablementDetails *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails `json:"enablement_details,omitempty"`
MaintenanceWindow *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow `json:"maintenance_window,omitempty"`
}
type ResourceAutomaticClusterUpdateWorkspaceSetting struct {
Etag string `json:"etag,omitempty"`
Id string `json:"id,omitempty"`
SettingName string `json:"setting_name,omitempty"`
AutomaticClusterUpdateWorkspace *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace `json:"automatic_cluster_update_workspace,omitempty"`
}

View File

@ -32,10 +32,6 @@ type ResourceClusterAzureAttributes struct {
LogAnalyticsInfo *ResourceClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` LogAnalyticsInfo *ResourceClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"`
} }
type ResourceClusterCloneFrom struct {
SourceClusterId string `json:"source_cluster_id"`
}
type ResourceClusterClusterLogConfDbfs struct { type ResourceClusterClusterLogConfDbfs struct {
Destination string `json:"destination"` Destination string `json:"destination"`
} }
@ -148,6 +144,7 @@ type ResourceClusterLibraryPypi struct {
type ResourceClusterLibrary struct { type ResourceClusterLibrary struct {
Egg string `json:"egg,omitempty"` Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"` Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"` Whl string `json:"whl,omitempty"`
Cran *ResourceClusterLibraryCran `json:"cran,omitempty"` Cran *ResourceClusterLibraryCran `json:"cran,omitempty"`
Maven *ResourceClusterLibraryMaven `json:"maven,omitempty"` Maven *ResourceClusterLibraryMaven `json:"maven,omitempty"`
@ -168,7 +165,6 @@ type ResourceCluster struct {
AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
ClusterId string `json:"cluster_id,omitempty"` ClusterId string `json:"cluster_id,omitempty"`
ClusterName string `json:"cluster_name,omitempty"` ClusterName string `json:"cluster_name,omitempty"`
ClusterSource string `json:"cluster_source,omitempty"`
CustomTags map[string]string `json:"custom_tags,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"`
DataSecurityMode string `json:"data_security_mode,omitempty"` DataSecurityMode string `json:"data_security_mode,omitempty"`
DefaultTags map[string]string `json:"default_tags,omitempty"` DefaultTags map[string]string `json:"default_tags,omitempty"`
@ -194,7 +190,6 @@ type ResourceCluster struct {
Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"` Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"`
AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"` AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"`
AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"` AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"`
CloneFrom *ResourceClusterCloneFrom `json:"clone_from,omitempty"`
ClusterLogConf *ResourceClusterClusterLogConf `json:"cluster_log_conf,omitempty"` ClusterLogConf *ResourceClusterClusterLogConf `json:"cluster_log_conf,omitempty"`
ClusterMountInfo []ResourceClusterClusterMountInfo `json:"cluster_mount_info,omitempty"` ClusterMountInfo []ResourceClusterClusterMountInfo `json:"cluster_mount_info,omitempty"`
DockerImage *ResourceClusterDockerImage `json:"docker_image,omitempty"` DockerImage *ResourceClusterDockerImage `json:"docker_image,omitempty"`

View File

@ -21,6 +21,7 @@ type ResourceClusterPolicyLibrariesPypi struct {
type ResourceClusterPolicyLibraries struct { type ResourceClusterPolicyLibraries struct {
Egg string `json:"egg,omitempty"` Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"` Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"` Whl string `json:"whl,omitempty"`
Cran *ResourceClusterPolicyLibrariesCran `json:"cran,omitempty"` Cran *ResourceClusterPolicyLibrariesCran `json:"cran,omitempty"`
Maven *ResourceClusterPolicyLibrariesMaven `json:"maven,omitempty"` Maven *ResourceClusterPolicyLibrariesMaven `json:"maven,omitempty"`

View File

@ -0,0 +1,15 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace struct {
ComplianceStandards []string `json:"compliance_standards,omitempty"`
IsEnabled bool `json:"is_enabled,omitempty"`
}
type ResourceComplianceSecurityProfileWorkspaceSetting struct {
Etag string `json:"etag,omitempty"`
Id string `json:"id,omitempty"`
SettingName string `json:"setting_name,omitempty"`
ComplianceSecurityProfileWorkspace *ResourceComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace `json:"compliance_security_profile_workspace,omitempty"`
}

View File

@ -0,0 +1,14 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceEnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace struct {
IsEnabled bool `json:"is_enabled,omitempty"`
}
type ResourceEnhancedSecurityMonitoringWorkspaceSetting struct {
Etag string `json:"etag,omitempty"`
Id string `json:"id,omitempty"`
SettingName string `json:"setting_name,omitempty"`
EnhancedSecurityMonitoringWorkspace *ResourceEnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace `json:"enhanced_security_monitoring_workspace,omitempty"`
}

View File

@ -39,6 +39,10 @@ type ResourceJobEnvironment struct {
Spec *ResourceJobEnvironmentSpec `json:"spec,omitempty"` Spec *ResourceJobEnvironmentSpec `json:"spec,omitempty"`
} }
type ResourceJobGitSourceGitSnapshot struct {
UsedCommit string `json:"used_commit,omitempty"`
}
type ResourceJobGitSourceJobSource struct { type ResourceJobGitSourceJobSource struct {
DirtyState string `json:"dirty_state,omitempty"` DirtyState string `json:"dirty_state,omitempty"`
ImportFromGitBranch string `json:"import_from_git_branch"` ImportFromGitBranch string `json:"import_from_git_branch"`
@ -51,13 +55,14 @@ type ResourceJobGitSource struct {
Provider string `json:"provider,omitempty"` Provider string `json:"provider,omitempty"`
Tag string `json:"tag,omitempty"` Tag string `json:"tag,omitempty"`
Url string `json:"url"` Url string `json:"url"`
GitSnapshot *ResourceJobGitSourceGitSnapshot `json:"git_snapshot,omitempty"`
JobSource *ResourceJobGitSourceJobSource `json:"job_source,omitempty"` JobSource *ResourceJobGitSourceJobSource `json:"job_source,omitempty"`
} }
type ResourceJobHealthRules struct { type ResourceJobHealthRules struct {
Metric string `json:"metric,omitempty"` Metric string `json:"metric"`
Op string `json:"op,omitempty"` Op string `json:"op"`
Value int `json:"value,omitempty"` Value int `json:"value"`
} }
type ResourceJobHealth struct { type ResourceJobHealth struct {
@ -72,7 +77,9 @@ type ResourceJobJobClusterNewClusterAutoscale struct {
type ResourceJobJobClusterNewClusterAwsAttributes struct { type ResourceJobJobClusterNewClusterAwsAttributes struct {
Availability string `json:"availability,omitempty"` Availability string `json:"availability,omitempty"`
EbsVolumeCount int `json:"ebs_volume_count,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"`
EbsVolumeIops int `json:"ebs_volume_iops,omitempty"`
EbsVolumeSize int `json:"ebs_volume_size,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"`
EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"`
EbsVolumeType string `json:"ebs_volume_type,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"`
FirstOnDemand int `json:"first_on_demand,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"`
InstanceProfileArn string `json:"instance_profile_arn,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
@ -80,10 +87,16 @@ type ResourceJobJobClusterNewClusterAwsAttributes struct {
ZoneId string `json:"zone_id,omitempty"` ZoneId string `json:"zone_id,omitempty"`
} }
type ResourceJobJobClusterNewClusterAzureAttributesLogAnalyticsInfo struct {
LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"`
LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"`
}
type ResourceJobJobClusterNewClusterAzureAttributes struct { type ResourceJobJobClusterNewClusterAzureAttributes struct {
Availability string `json:"availability,omitempty"` Availability string `json:"availability,omitempty"`
FirstOnDemand int `json:"first_on_demand,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"`
SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"`
LogAnalyticsInfo *ResourceJobJobClusterNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"`
} }
type ResourceJobJobClusterNewClusterClusterLogConfDbfs struct { type ResourceJobJobClusterNewClusterClusterLogConfDbfs struct {
@ -179,6 +192,32 @@ type ResourceJobJobClusterNewClusterInitScripts struct {
Workspace *ResourceJobJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` Workspace *ResourceJobJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
} }
type ResourceJobJobClusterNewClusterLibraryCran struct {
Package string `json:"package"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobJobClusterNewClusterLibraryMaven struct {
Coordinates string `json:"coordinates"`
Exclusions []string `json:"exclusions,omitempty"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobJobClusterNewClusterLibraryPypi struct {
Package string `json:"package"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobJobClusterNewClusterLibrary struct {
Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"`
Cran *ResourceJobJobClusterNewClusterLibraryCran `json:"cran,omitempty"`
Maven *ResourceJobJobClusterNewClusterLibraryMaven `json:"maven,omitempty"`
Pypi *ResourceJobJobClusterNewClusterLibraryPypi `json:"pypi,omitempty"`
}
type ResourceJobJobClusterNewClusterWorkloadTypeClients struct { type ResourceJobJobClusterNewClusterWorkloadTypeClients struct {
Jobs bool `json:"jobs,omitempty"` Jobs bool `json:"jobs,omitempty"`
Notebooks bool `json:"notebooks,omitempty"` Notebooks bool `json:"notebooks,omitempty"`
@ -190,7 +229,6 @@ type ResourceJobJobClusterNewClusterWorkloadType struct {
type ResourceJobJobClusterNewCluster struct { type ResourceJobJobClusterNewCluster struct {
ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"`
AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
ClusterId string `json:"cluster_id,omitempty"` ClusterId string `json:"cluster_id,omitempty"`
ClusterName string `json:"cluster_name,omitempty"` ClusterName string `json:"cluster_name,omitempty"`
CustomTags map[string]string `json:"custom_tags,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"`
@ -218,11 +256,12 @@ type ResourceJobJobClusterNewCluster struct {
DockerImage *ResourceJobJobClusterNewClusterDockerImage `json:"docker_image,omitempty"` DockerImage *ResourceJobJobClusterNewClusterDockerImage `json:"docker_image,omitempty"`
GcpAttributes *ResourceJobJobClusterNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` GcpAttributes *ResourceJobJobClusterNewClusterGcpAttributes `json:"gcp_attributes,omitempty"`
InitScripts []ResourceJobJobClusterNewClusterInitScripts `json:"init_scripts,omitempty"` InitScripts []ResourceJobJobClusterNewClusterInitScripts `json:"init_scripts,omitempty"`
Library []ResourceJobJobClusterNewClusterLibrary `json:"library,omitempty"`
WorkloadType *ResourceJobJobClusterNewClusterWorkloadType `json:"workload_type,omitempty"` WorkloadType *ResourceJobJobClusterNewClusterWorkloadType `json:"workload_type,omitempty"`
} }
type ResourceJobJobCluster struct { type ResourceJobJobCluster struct {
JobClusterKey string `json:"job_cluster_key,omitempty"` JobClusterKey string `json:"job_cluster_key"`
NewCluster *ResourceJobJobClusterNewCluster `json:"new_cluster,omitempty"` NewCluster *ResourceJobJobClusterNewCluster `json:"new_cluster,omitempty"`
} }
@ -245,6 +284,7 @@ type ResourceJobLibraryPypi struct {
type ResourceJobLibrary struct { type ResourceJobLibrary struct {
Egg string `json:"egg,omitempty"` Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"` Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"` Whl string `json:"whl,omitempty"`
Cran *ResourceJobLibraryCran `json:"cran,omitempty"` Cran *ResourceJobLibraryCran `json:"cran,omitempty"`
Maven *ResourceJobLibraryMaven `json:"maven,omitempty"` Maven *ResourceJobLibraryMaven `json:"maven,omitempty"`
@ -259,7 +299,9 @@ type ResourceJobNewClusterAutoscale struct {
type ResourceJobNewClusterAwsAttributes struct { type ResourceJobNewClusterAwsAttributes struct {
Availability string `json:"availability,omitempty"` Availability string `json:"availability,omitempty"`
EbsVolumeCount int `json:"ebs_volume_count,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"`
EbsVolumeIops int `json:"ebs_volume_iops,omitempty"`
EbsVolumeSize int `json:"ebs_volume_size,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"`
EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"`
EbsVolumeType string `json:"ebs_volume_type,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"`
FirstOnDemand int `json:"first_on_demand,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"`
InstanceProfileArn string `json:"instance_profile_arn,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
@ -267,10 +309,16 @@ type ResourceJobNewClusterAwsAttributes struct {
ZoneId string `json:"zone_id,omitempty"` ZoneId string `json:"zone_id,omitempty"`
} }
type ResourceJobNewClusterAzureAttributesLogAnalyticsInfo struct {
LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"`
LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"`
}
type ResourceJobNewClusterAzureAttributes struct { type ResourceJobNewClusterAzureAttributes struct {
Availability string `json:"availability,omitempty"` Availability string `json:"availability,omitempty"`
FirstOnDemand int `json:"first_on_demand,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"`
SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"`
LogAnalyticsInfo *ResourceJobNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"`
} }
type ResourceJobNewClusterClusterLogConfDbfs struct { type ResourceJobNewClusterClusterLogConfDbfs struct {
@ -366,6 +414,32 @@ type ResourceJobNewClusterInitScripts struct {
Workspace *ResourceJobNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` Workspace *ResourceJobNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
} }
type ResourceJobNewClusterLibraryCran struct {
Package string `json:"package"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobNewClusterLibraryMaven struct {
Coordinates string `json:"coordinates"`
Exclusions []string `json:"exclusions,omitempty"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobNewClusterLibraryPypi struct {
Package string `json:"package"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobNewClusterLibrary struct {
Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"`
Cran *ResourceJobNewClusterLibraryCran `json:"cran,omitempty"`
Maven *ResourceJobNewClusterLibraryMaven `json:"maven,omitempty"`
Pypi *ResourceJobNewClusterLibraryPypi `json:"pypi,omitempty"`
}
type ResourceJobNewClusterWorkloadTypeClients struct { type ResourceJobNewClusterWorkloadTypeClients struct {
Jobs bool `json:"jobs,omitempty"` Jobs bool `json:"jobs,omitempty"`
Notebooks bool `json:"notebooks,omitempty"` Notebooks bool `json:"notebooks,omitempty"`
@ -377,7 +451,6 @@ type ResourceJobNewClusterWorkloadType struct {
type ResourceJobNewCluster struct { type ResourceJobNewCluster struct {
ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"`
AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
ClusterId string `json:"cluster_id,omitempty"` ClusterId string `json:"cluster_id,omitempty"`
ClusterName string `json:"cluster_name,omitempty"` ClusterName string `json:"cluster_name,omitempty"`
CustomTags map[string]string `json:"custom_tags,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"`
@ -405,6 +478,7 @@ type ResourceJobNewCluster struct {
DockerImage *ResourceJobNewClusterDockerImage `json:"docker_image,omitempty"` DockerImage *ResourceJobNewClusterDockerImage `json:"docker_image,omitempty"`
GcpAttributes *ResourceJobNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` GcpAttributes *ResourceJobNewClusterGcpAttributes `json:"gcp_attributes,omitempty"`
InitScripts []ResourceJobNewClusterInitScripts `json:"init_scripts,omitempty"` InitScripts []ResourceJobNewClusterInitScripts `json:"init_scripts,omitempty"`
Library []ResourceJobNewClusterLibrary `json:"library,omitempty"`
WorkloadType *ResourceJobNewClusterWorkloadType `json:"workload_type,omitempty"` WorkloadType *ResourceJobNewClusterWorkloadType `json:"workload_type,omitempty"`
} }
@ -532,9 +606,9 @@ type ResourceJobTaskForEachTaskTaskEmailNotifications struct {
} }
type ResourceJobTaskForEachTaskTaskHealthRules struct { type ResourceJobTaskForEachTaskTaskHealthRules struct {
Metric string `json:"metric,omitempty"` Metric string `json:"metric"`
Op string `json:"op,omitempty"` Op string `json:"op"`
Value int `json:"value,omitempty"` Value int `json:"value"`
} }
type ResourceJobTaskForEachTaskTaskHealth struct { type ResourceJobTaskForEachTaskTaskHealth struct {
@ -560,6 +634,7 @@ type ResourceJobTaskForEachTaskTaskLibraryPypi struct {
type ResourceJobTaskForEachTaskTaskLibrary struct { type ResourceJobTaskForEachTaskTaskLibrary struct {
Egg string `json:"egg,omitempty"` Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"` Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"` Whl string `json:"whl,omitempty"`
Cran *ResourceJobTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` Cran *ResourceJobTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"`
Maven *ResourceJobTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` Maven *ResourceJobTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"`
@ -574,7 +649,9 @@ type ResourceJobTaskForEachTaskTaskNewClusterAutoscale struct {
type ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes struct { type ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes struct {
Availability string `json:"availability,omitempty"` Availability string `json:"availability,omitempty"`
EbsVolumeCount int `json:"ebs_volume_count,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"`
EbsVolumeIops int `json:"ebs_volume_iops,omitempty"`
EbsVolumeSize int `json:"ebs_volume_size,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"`
EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"`
EbsVolumeType string `json:"ebs_volume_type,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"`
FirstOnDemand int `json:"first_on_demand,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"`
InstanceProfileArn string `json:"instance_profile_arn,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
@ -582,10 +659,16 @@ type ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes struct {
ZoneId string `json:"zone_id,omitempty"` ZoneId string `json:"zone_id,omitempty"`
} }
type ResourceJobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfo struct {
LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"`
LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"`
}
type ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes struct { type ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes struct {
Availability string `json:"availability,omitempty"` Availability string `json:"availability,omitempty"`
FirstOnDemand int `json:"first_on_demand,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"`
SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"`
LogAnalyticsInfo *ResourceJobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"`
} }
type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfDbfs struct { type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfDbfs struct {
@ -681,6 +764,32 @@ type ResourceJobTaskForEachTaskTaskNewClusterInitScripts struct {
Workspace *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` Workspace *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
} }
type ResourceJobTaskForEachTaskTaskNewClusterLibraryCran struct {
Package string `json:"package"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobTaskForEachTaskTaskNewClusterLibraryMaven struct {
Coordinates string `json:"coordinates"`
Exclusions []string `json:"exclusions,omitempty"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobTaskForEachTaskTaskNewClusterLibraryPypi struct {
Package string `json:"package"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobTaskForEachTaskTaskNewClusterLibrary struct {
Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"`
Cran *ResourceJobTaskForEachTaskTaskNewClusterLibraryCran `json:"cran,omitempty"`
Maven *ResourceJobTaskForEachTaskTaskNewClusterLibraryMaven `json:"maven,omitempty"`
Pypi *ResourceJobTaskForEachTaskTaskNewClusterLibraryPypi `json:"pypi,omitempty"`
}
type ResourceJobTaskForEachTaskTaskNewClusterWorkloadTypeClients struct { type ResourceJobTaskForEachTaskTaskNewClusterWorkloadTypeClients struct {
Jobs bool `json:"jobs,omitempty"` Jobs bool `json:"jobs,omitempty"`
Notebooks bool `json:"notebooks,omitempty"` Notebooks bool `json:"notebooks,omitempty"`
@ -692,7 +801,6 @@ type ResourceJobTaskForEachTaskTaskNewClusterWorkloadType struct {
type ResourceJobTaskForEachTaskTaskNewCluster struct { type ResourceJobTaskForEachTaskTaskNewCluster struct {
ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"`
AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
ClusterId string `json:"cluster_id,omitempty"` ClusterId string `json:"cluster_id,omitempty"`
ClusterName string `json:"cluster_name,omitempty"` ClusterName string `json:"cluster_name,omitempty"`
CustomTags map[string]string `json:"custom_tags,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"`
@ -704,7 +812,7 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct {
IdempotencyToken string `json:"idempotency_token,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"`
InstancePoolId string `json:"instance_pool_id,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"`
NodeTypeId string `json:"node_type_id,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"`
NumWorkers int `json:"num_workers"` NumWorkers int `json:"num_workers,omitempty"`
PolicyId string `json:"policy_id,omitempty"` PolicyId string `json:"policy_id,omitempty"`
RuntimeEngine string `json:"runtime_engine,omitempty"` RuntimeEngine string `json:"runtime_engine,omitempty"`
SingleUserName string `json:"single_user_name,omitempty"` SingleUserName string `json:"single_user_name,omitempty"`
@ -720,6 +828,7 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct {
DockerImage *ResourceJobTaskForEachTaskTaskNewClusterDockerImage `json:"docker_image,omitempty"` DockerImage *ResourceJobTaskForEachTaskTaskNewClusterDockerImage `json:"docker_image,omitempty"`
GcpAttributes *ResourceJobTaskForEachTaskTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` GcpAttributes *ResourceJobTaskForEachTaskTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"`
InitScripts []ResourceJobTaskForEachTaskTaskNewClusterInitScripts `json:"init_scripts,omitempty"` InitScripts []ResourceJobTaskForEachTaskTaskNewClusterInitScripts `json:"init_scripts,omitempty"`
Library []ResourceJobTaskForEachTaskTaskNewClusterLibrary `json:"library,omitempty"`
WorkloadType *ResourceJobTaskForEachTaskTaskNewClusterWorkloadType `json:"workload_type,omitempty"` WorkloadType *ResourceJobTaskForEachTaskTaskNewClusterWorkloadType `json:"workload_type,omitempty"`
} }
@ -748,9 +857,21 @@ type ResourceJobTaskForEachTaskTaskPythonWheelTask struct {
Parameters []string `json:"parameters,omitempty"` Parameters []string `json:"parameters,omitempty"`
} }
type ResourceJobTaskForEachTaskTaskRunJobTaskPipelineParams struct {
FullRefresh bool `json:"full_refresh,omitempty"`
}
type ResourceJobTaskForEachTaskTaskRunJobTask struct { type ResourceJobTaskForEachTaskTaskRunJobTask struct {
DbtCommands []string `json:"dbt_commands,omitempty"`
JarParams []string `json:"jar_params,omitempty"`
JobId int `json:"job_id"` JobId int `json:"job_id"`
JobParameters map[string]string `json:"job_parameters,omitempty"` JobParameters map[string]string `json:"job_parameters,omitempty"`
NotebookParams map[string]string `json:"notebook_params,omitempty"`
PythonNamedParams map[string]string `json:"python_named_params,omitempty"`
PythonParams []string `json:"python_params,omitempty"`
SparkSubmitParams []string `json:"spark_submit_params,omitempty"`
SqlParams map[string]string `json:"sql_params,omitempty"`
PipelineParams *ResourceJobTaskForEachTaskTaskRunJobTaskPipelineParams `json:"pipeline_params,omitempty"`
} }
type ResourceJobTaskForEachTaskTaskSparkJarTask struct { type ResourceJobTaskForEachTaskTaskSparkJarTask struct {
@ -803,7 +924,7 @@ type ResourceJobTaskForEachTaskTaskSqlTaskQuery struct {
type ResourceJobTaskForEachTaskTaskSqlTask struct { type ResourceJobTaskForEachTaskTaskSqlTask struct {
Parameters map[string]string `json:"parameters,omitempty"` Parameters map[string]string `json:"parameters,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"` WarehouseId string `json:"warehouse_id"`
Alert *ResourceJobTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"` Alert *ResourceJobTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"`
Dashboard *ResourceJobTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"` Dashboard *ResourceJobTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"`
File *ResourceJobTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"` File *ResourceJobTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"`
@ -835,6 +956,7 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct {
type ResourceJobTaskForEachTaskTask struct { type ResourceJobTaskForEachTaskTask struct {
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"`
EnvironmentKey string `json:"environment_key,omitempty"` EnvironmentKey string `json:"environment_key,omitempty"`
ExistingClusterId string `json:"existing_cluster_id,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"`
JobClusterKey string `json:"job_cluster_key,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"`
@ -842,7 +964,7 @@ type ResourceJobTaskForEachTaskTask struct {
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
RunIf string `json:"run_if,omitempty"` RunIf string `json:"run_if,omitempty"`
TaskKey string `json:"task_key,omitempty"` TaskKey string `json:"task_key"`
TimeoutSeconds int `json:"timeout_seconds,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"`
ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"`
DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"`
@ -870,9 +992,9 @@ type ResourceJobTaskForEachTask struct {
} }
type ResourceJobTaskHealthRules struct { type ResourceJobTaskHealthRules struct {
Metric string `json:"metric,omitempty"` Metric string `json:"metric"`
Op string `json:"op,omitempty"` Op string `json:"op"`
Value int `json:"value,omitempty"` Value int `json:"value"`
} }
type ResourceJobTaskHealth struct { type ResourceJobTaskHealth struct {
@ -898,6 +1020,7 @@ type ResourceJobTaskLibraryPypi struct {
type ResourceJobTaskLibrary struct { type ResourceJobTaskLibrary struct {
Egg string `json:"egg,omitempty"` Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"` Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"` Whl string `json:"whl,omitempty"`
Cran *ResourceJobTaskLibraryCran `json:"cran,omitempty"` Cran *ResourceJobTaskLibraryCran `json:"cran,omitempty"`
Maven *ResourceJobTaskLibraryMaven `json:"maven,omitempty"` Maven *ResourceJobTaskLibraryMaven `json:"maven,omitempty"`
@ -912,7 +1035,9 @@ type ResourceJobTaskNewClusterAutoscale struct {
type ResourceJobTaskNewClusterAwsAttributes struct { type ResourceJobTaskNewClusterAwsAttributes struct {
Availability string `json:"availability,omitempty"` Availability string `json:"availability,omitempty"`
EbsVolumeCount int `json:"ebs_volume_count,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"`
EbsVolumeIops int `json:"ebs_volume_iops,omitempty"`
EbsVolumeSize int `json:"ebs_volume_size,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"`
EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"`
EbsVolumeType string `json:"ebs_volume_type,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"`
FirstOnDemand int `json:"first_on_demand,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"`
InstanceProfileArn string `json:"instance_profile_arn,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
@ -920,10 +1045,16 @@ type ResourceJobTaskNewClusterAwsAttributes struct {
ZoneId string `json:"zone_id,omitempty"` ZoneId string `json:"zone_id,omitempty"`
} }
type ResourceJobTaskNewClusterAzureAttributesLogAnalyticsInfo struct {
LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"`
LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"`
}
type ResourceJobTaskNewClusterAzureAttributes struct { type ResourceJobTaskNewClusterAzureAttributes struct {
Availability string `json:"availability,omitempty"` Availability string `json:"availability,omitempty"`
FirstOnDemand int `json:"first_on_demand,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"`
SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"`
LogAnalyticsInfo *ResourceJobTaskNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"`
} }
type ResourceJobTaskNewClusterClusterLogConfDbfs struct { type ResourceJobTaskNewClusterClusterLogConfDbfs struct {
@ -1019,6 +1150,32 @@ type ResourceJobTaskNewClusterInitScripts struct {
Workspace *ResourceJobTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` Workspace *ResourceJobTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
} }
type ResourceJobTaskNewClusterLibraryCran struct {
Package string `json:"package"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobTaskNewClusterLibraryMaven struct {
Coordinates string `json:"coordinates"`
Exclusions []string `json:"exclusions,omitempty"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobTaskNewClusterLibraryPypi struct {
Package string `json:"package"`
Repo string `json:"repo,omitempty"`
}
type ResourceJobTaskNewClusterLibrary struct {
Egg string `json:"egg,omitempty"`
Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"`
Cran *ResourceJobTaskNewClusterLibraryCran `json:"cran,omitempty"`
Maven *ResourceJobTaskNewClusterLibraryMaven `json:"maven,omitempty"`
Pypi *ResourceJobTaskNewClusterLibraryPypi `json:"pypi,omitempty"`
}
type ResourceJobTaskNewClusterWorkloadTypeClients struct { type ResourceJobTaskNewClusterWorkloadTypeClients struct {
Jobs bool `json:"jobs,omitempty"` Jobs bool `json:"jobs,omitempty"`
Notebooks bool `json:"notebooks,omitempty"` Notebooks bool `json:"notebooks,omitempty"`
@ -1030,7 +1187,6 @@ type ResourceJobTaskNewClusterWorkloadType struct {
type ResourceJobTaskNewCluster struct { type ResourceJobTaskNewCluster struct {
ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"`
AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
ClusterId string `json:"cluster_id,omitempty"` ClusterId string `json:"cluster_id,omitempty"`
ClusterName string `json:"cluster_name,omitempty"` ClusterName string `json:"cluster_name,omitempty"`
CustomTags map[string]string `json:"custom_tags,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"`
@ -1058,6 +1214,7 @@ type ResourceJobTaskNewCluster struct {
DockerImage *ResourceJobTaskNewClusterDockerImage `json:"docker_image,omitempty"` DockerImage *ResourceJobTaskNewClusterDockerImage `json:"docker_image,omitempty"`
GcpAttributes *ResourceJobTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` GcpAttributes *ResourceJobTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"`
InitScripts []ResourceJobTaskNewClusterInitScripts `json:"init_scripts,omitempty"` InitScripts []ResourceJobTaskNewClusterInitScripts `json:"init_scripts,omitempty"`
Library []ResourceJobTaskNewClusterLibrary `json:"library,omitempty"`
WorkloadType *ResourceJobTaskNewClusterWorkloadType `json:"workload_type,omitempty"` WorkloadType *ResourceJobTaskNewClusterWorkloadType `json:"workload_type,omitempty"`
} }
@ -1086,9 +1243,21 @@ type ResourceJobTaskPythonWheelTask struct {
Parameters []string `json:"parameters,omitempty"` Parameters []string `json:"parameters,omitempty"`
} }
type ResourceJobTaskRunJobTaskPipelineParams struct {
FullRefresh bool `json:"full_refresh,omitempty"`
}
type ResourceJobTaskRunJobTask struct { type ResourceJobTaskRunJobTask struct {
DbtCommands []string `json:"dbt_commands,omitempty"`
JarParams []string `json:"jar_params,omitempty"`
JobId int `json:"job_id"` JobId int `json:"job_id"`
JobParameters map[string]string `json:"job_parameters,omitempty"` JobParameters map[string]string `json:"job_parameters,omitempty"`
NotebookParams map[string]string `json:"notebook_params,omitempty"`
PythonNamedParams map[string]string `json:"python_named_params,omitempty"`
PythonParams []string `json:"python_params,omitempty"`
SparkSubmitParams []string `json:"spark_submit_params,omitempty"`
SqlParams map[string]string `json:"sql_params,omitempty"`
PipelineParams *ResourceJobTaskRunJobTaskPipelineParams `json:"pipeline_params,omitempty"`
} }
type ResourceJobTaskSparkJarTask struct { type ResourceJobTaskSparkJarTask struct {
@ -1141,7 +1310,7 @@ type ResourceJobTaskSqlTaskQuery struct {
type ResourceJobTaskSqlTask struct { type ResourceJobTaskSqlTask struct {
Parameters map[string]string `json:"parameters,omitempty"` Parameters map[string]string `json:"parameters,omitempty"`
WarehouseId string `json:"warehouse_id,omitempty"` WarehouseId string `json:"warehouse_id"`
Alert *ResourceJobTaskSqlTaskAlert `json:"alert,omitempty"` Alert *ResourceJobTaskSqlTaskAlert `json:"alert,omitempty"`
Dashboard *ResourceJobTaskSqlTaskDashboard `json:"dashboard,omitempty"` Dashboard *ResourceJobTaskSqlTaskDashboard `json:"dashboard,omitempty"`
File *ResourceJobTaskSqlTaskFile `json:"file,omitempty"` File *ResourceJobTaskSqlTaskFile `json:"file,omitempty"`
@ -1173,6 +1342,7 @@ type ResourceJobTaskWebhookNotifications struct {
type ResourceJobTask struct { type ResourceJobTask struct {
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"`
EnvironmentKey string `json:"environment_key,omitempty"` EnvironmentKey string `json:"environment_key,omitempty"`
ExistingClusterId string `json:"existing_cluster_id,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"`
JobClusterKey string `json:"job_cluster_key,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"`
@ -1180,7 +1350,7 @@ type ResourceJobTask struct {
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
RunIf string `json:"run_if,omitempty"` RunIf string `json:"run_if,omitempty"`
TaskKey string `json:"task_key,omitempty"` TaskKey string `json:"task_key"`
TimeoutSeconds int `json:"timeout_seconds,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"`
ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"`
DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"`
@ -1208,6 +1378,13 @@ type ResourceJobTriggerFileArrival struct {
WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"`
} }
type ResourceJobTriggerTable struct {
Condition string `json:"condition,omitempty"`
MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"`
TableNames []string `json:"table_names,omitempty"`
WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"`
}
type ResourceJobTriggerTableUpdate struct { type ResourceJobTriggerTableUpdate struct {
Condition string `json:"condition,omitempty"` Condition string `json:"condition,omitempty"`
MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"`
@ -1218,6 +1395,7 @@ type ResourceJobTriggerTableUpdate struct {
type ResourceJobTrigger struct { type ResourceJobTrigger struct {
PauseStatus string `json:"pause_status,omitempty"` PauseStatus string `json:"pause_status,omitempty"`
FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"` FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"`
Table *ResourceJobTriggerTable `json:"table,omitempty"`
TableUpdate *ResourceJobTriggerTableUpdate `json:"table_update,omitempty"` TableUpdate *ResourceJobTriggerTableUpdate `json:"table_update,omitempty"`
} }

View File

@ -23,6 +23,7 @@ type ResourceLibrary struct {
Egg string `json:"egg,omitempty"` Egg string `json:"egg,omitempty"`
Id string `json:"id,omitempty"` Id string `json:"id,omitempty"`
Jar string `json:"jar,omitempty"` Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"` Whl string `json:"whl,omitempty"`
Cran *ResourceLibraryCran `json:"cran,omitempty"` Cran *ResourceLibraryCran `json:"cran,omitempty"`
Maven *ResourceLibraryMaven `json:"maven,omitempty"` Maven *ResourceLibraryMaven `json:"maven,omitempty"`

View File

@ -34,8 +34,11 @@ type ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServing
} }
type ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig struct { type ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig struct {
MicrosoftEntraClientId string `json:"microsoft_entra_client_id,omitempty"`
MicrosoftEntraClientSecret string `json:"microsoft_entra_client_secret,omitempty"`
MicrosoftEntraTenantId string `json:"microsoft_entra_tenant_id,omitempty"`
OpenaiApiBase string `json:"openai_api_base,omitempty"` OpenaiApiBase string `json:"openai_api_base,omitempty"`
OpenaiApiKey string `json:"openai_api_key"` OpenaiApiKey string `json:"openai_api_key,omitempty"`
OpenaiApiType string `json:"openai_api_type,omitempty"` OpenaiApiType string `json:"openai_api_type,omitempty"`
OpenaiApiVersion string `json:"openai_api_version,omitempty"` OpenaiApiVersion string `json:"openai_api_version,omitempty"`
OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"` OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"`
@ -114,6 +117,7 @@ type ResourceModelServingTags struct {
type ResourceModelServing struct { type ResourceModelServing struct {
Id string `json:"id,omitempty"` Id string `json:"id,omitempty"`
Name string `json:"name"` Name string `json:"name"`
RouteOptimized bool `json:"route_optimized,omitempty"`
ServingEndpointId string `json:"serving_endpoint_id,omitempty"` ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
Config *ResourceModelServingConfig `json:"config,omitempty"` Config *ResourceModelServingConfig `json:"config,omitempty"`
RateLimits []ResourceModelServingRateLimits `json:"rate_limits,omitempty"` RateLimits []ResourceModelServingRateLimits `json:"rate_limits,omitempty"`

View File

@ -0,0 +1,9 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceMwsNccBinding struct {
Id string `json:"id,omitempty"`
NetworkConnectivityConfigId string `json:"network_connectivity_config_id"`
WorkspaceId int `json:"workspace_id"`
}

View File

@ -0,0 +1,17 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceMwsNccPrivateEndpointRule struct {
ConnectionState string `json:"connection_state,omitempty"`
CreationTime int `json:"creation_time,omitempty"`
Deactivated bool `json:"deactivated,omitempty"`
DeactivatedAt int `json:"deactivated_at,omitempty"`
EndpointName string `json:"endpoint_name,omitempty"`
GroupId string `json:"group_id"`
Id string `json:"id,omitempty"`
NetworkConnectivityConfigId string `json:"network_connectivity_config_id"`
ResourceId string `json:"resource_id"`
RuleId string `json:"rule_id,omitempty"`
UpdatedTime int `json:"updated_time,omitempty"`
}

View File

@ -0,0 +1,51 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule struct {
CidrBlocks []string `json:"cidr_blocks,omitempty"`
}
type ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule struct {
Subnets []string `json:"subnets,omitempty"`
TargetRegion string `json:"target_region,omitempty"`
TargetServices []string `json:"target_services,omitempty"`
}
type ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRules struct {
AwsStableIpRule *ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule `json:"aws_stable_ip_rule,omitempty"`
AzureServiceEndpointRule *ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule `json:"azure_service_endpoint_rule,omitempty"`
}
type ResourceMwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRules struct {
ConnectionState string `json:"connection_state,omitempty"`
CreationTime int `json:"creation_time,omitempty"`
Deactivated bool `json:"deactivated,omitempty"`
DeactivatedAt int `json:"deactivated_at,omitempty"`
EndpointName string `json:"endpoint_name,omitempty"`
GroupId string `json:"group_id,omitempty"`
NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"`
ResourceId string `json:"resource_id,omitempty"`
RuleId string `json:"rule_id,omitempty"`
UpdatedTime int `json:"updated_time,omitempty"`
}
type ResourceMwsNetworkConnectivityConfigEgressConfigTargetRules struct {
AzurePrivateEndpointRules []ResourceMwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRules `json:"azure_private_endpoint_rules,omitempty"`
}
type ResourceMwsNetworkConnectivityConfigEgressConfig struct {
DefaultRules *ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRules `json:"default_rules,omitempty"`
TargetRules *ResourceMwsNetworkConnectivityConfigEgressConfigTargetRules `json:"target_rules,omitempty"`
}
type ResourceMwsNetworkConnectivityConfig struct {
AccountId string `json:"account_id,omitempty"`
CreationTime int `json:"creation_time,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name"`
NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"`
Region string `json:"region"`
UpdatedTime int `json:"updated_time,omitempty"`
EgressConfig *ResourceMwsNetworkConnectivityConfigEgressConfig `json:"egress_config,omitempty"`
}

View File

@ -0,0 +1,76 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceQualityMonitorCustomMetrics struct {
Definition string `json:"definition"`
InputColumns []string `json:"input_columns"`
Name string `json:"name"`
OutputDataType string `json:"output_data_type"`
Type string `json:"type"`
}
type ResourceQualityMonitorDataClassificationConfig struct {
Enabled bool `json:"enabled,omitempty"`
}
type ResourceQualityMonitorInferenceLog struct {
Granularities []string `json:"granularities"`
LabelCol string `json:"label_col,omitempty"`
ModelIdCol string `json:"model_id_col"`
PredictionCol string `json:"prediction_col"`
PredictionProbaCol string `json:"prediction_proba_col,omitempty"`
ProblemType string `json:"problem_type"`
TimestampCol string `json:"timestamp_col"`
}
type ResourceQualityMonitorNotificationsOnFailure struct {
EmailAddresses []string `json:"email_addresses,omitempty"`
}
type ResourceQualityMonitorNotificationsOnNewClassificationTagDetected struct {
EmailAddresses []string `json:"email_addresses,omitempty"`
}
type ResourceQualityMonitorNotifications struct {
OnFailure *ResourceQualityMonitorNotificationsOnFailure `json:"on_failure,omitempty"`
OnNewClassificationTagDetected *ResourceQualityMonitorNotificationsOnNewClassificationTagDetected `json:"on_new_classification_tag_detected,omitempty"`
}
type ResourceQualityMonitorSchedule struct {
PauseStatus string `json:"pause_status,omitempty"`
QuartzCronExpression string `json:"quartz_cron_expression"`
TimezoneId string `json:"timezone_id"`
}
type ResourceQualityMonitorSnapshot struct {
}
type ResourceQualityMonitorTimeSeries struct {
Granularities []string `json:"granularities"`
TimestampCol string `json:"timestamp_col"`
}
type ResourceQualityMonitor struct {
AssetsDir string `json:"assets_dir"`
BaselineTableName string `json:"baseline_table_name,omitempty"`
DashboardId string `json:"dashboard_id,omitempty"`
DriftMetricsTableName string `json:"drift_metrics_table_name,omitempty"`
Id string `json:"id,omitempty"`
LatestMonitorFailureMsg string `json:"latest_monitor_failure_msg,omitempty"`
MonitorVersion string `json:"monitor_version,omitempty"`
OutputSchemaName string `json:"output_schema_name"`
ProfileMetricsTableName string `json:"profile_metrics_table_name,omitempty"`
SkipBuiltinDashboard bool `json:"skip_builtin_dashboard,omitempty"`
SlicingExprs []string `json:"slicing_exprs,omitempty"`
Status string `json:"status,omitempty"`
TableName string `json:"table_name"`
WarehouseId string `json:"warehouse_id,omitempty"`
CustomMetrics []ResourceQualityMonitorCustomMetrics `json:"custom_metrics,omitempty"`
DataClassificationConfig *ResourceQualityMonitorDataClassificationConfig `json:"data_classification_config,omitempty"`
InferenceLog *ResourceQualityMonitorInferenceLog `json:"inference_log,omitempty"`
Notifications *ResourceQualityMonitorNotifications `json:"notifications,omitempty"`
Schedule *ResourceQualityMonitorSchedule `json:"schedule,omitempty"`
Snapshot *ResourceQualityMonitorSnapshot `json:"snapshot,omitempty"`
TimeSeries *ResourceQualityMonitorTimeSeries `json:"time_series,omitempty"`
}

View File

@ -18,6 +18,7 @@ type ResourceSqlTable struct {
Id string `json:"id,omitempty"` Id string `json:"id,omitempty"`
Name string `json:"name"` Name string `json:"name"`
Options map[string]string `json:"options,omitempty"` Options map[string]string `json:"options,omitempty"`
Owner string `json:"owner,omitempty"`
Partitions []string `json:"partitions,omitempty"` Partitions []string `json:"partitions,omitempty"`
Properties map[string]string `json:"properties,omitempty"` Properties map[string]string `json:"properties,omitempty"`
SchemaName string `json:"schema_name"` SchemaName string `json:"schema_name"`

View File

@ -13,6 +13,7 @@ type ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingVectorColumns struct {
} }
type ResourceVectorSearchIndexDeltaSyncIndexSpec struct { type ResourceVectorSearchIndexDeltaSyncIndexSpec struct {
EmbeddingWritebackTable string `json:"embedding_writeback_table,omitempty"`
PipelineId string `json:"pipeline_id,omitempty"` PipelineId string `json:"pipeline_id,omitempty"`
PipelineType string `json:"pipeline_type,omitempty"` PipelineType string `json:"pipeline_type,omitempty"`
SourceTable string `json:"source_table,omitempty"` SourceTable string `json:"source_table,omitempty"`

View File

@ -5,6 +5,7 @@ package schema
type Resources struct { type Resources struct {
AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"`
ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"`
AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"`
AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"`
AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"` AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"`
AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"` AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"`
@ -13,10 +14,12 @@ type Resources struct {
CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"` CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"`
Cluster map[string]any `json:"databricks_cluster,omitempty"` Cluster map[string]any `json:"databricks_cluster,omitempty"`
ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"`
ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"`
Connection map[string]any `json:"databricks_connection,omitempty"` Connection map[string]any `json:"databricks_connection,omitempty"`
DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"`
DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"` DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"`
Directory map[string]any `json:"databricks_directory,omitempty"` Directory map[string]any `json:"databricks_directory,omitempty"`
EnhancedSecurityMonitoringWorkspaceSetting map[string]any `json:"databricks_enhanced_security_monitoring_workspace_setting,omitempty"`
Entitlements map[string]any `json:"databricks_entitlements,omitempty"` Entitlements map[string]any `json:"databricks_entitlements,omitempty"`
ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` ExternalLocation map[string]any `json:"databricks_external_location,omitempty"`
File map[string]any `json:"databricks_file,omitempty"` File map[string]any `json:"databricks_file,omitempty"`
@ -45,6 +48,9 @@ type Resources struct {
MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"`
MwsCustomerManagedKeys map[string]any `json:"databricks_mws_customer_managed_keys,omitempty"` MwsCustomerManagedKeys map[string]any `json:"databricks_mws_customer_managed_keys,omitempty"`
MwsLogDelivery map[string]any `json:"databricks_mws_log_delivery,omitempty"` MwsLogDelivery map[string]any `json:"databricks_mws_log_delivery,omitempty"`
MwsNccBinding map[string]any `json:"databricks_mws_ncc_binding,omitempty"`
MwsNccPrivateEndpointRule map[string]any `json:"databricks_mws_ncc_private_endpoint_rule,omitempty"`
MwsNetworkConnectivityConfig map[string]any `json:"databricks_mws_network_connectivity_config,omitempty"`
MwsNetworks map[string]any `json:"databricks_mws_networks,omitempty"` MwsNetworks map[string]any `json:"databricks_mws_networks,omitempty"`
MwsPermissionAssignment map[string]any `json:"databricks_mws_permission_assignment,omitempty"` MwsPermissionAssignment map[string]any `json:"databricks_mws_permission_assignment,omitempty"`
MwsPrivateAccessSettings map[string]any `json:"databricks_mws_private_access_settings,omitempty"` MwsPrivateAccessSettings map[string]any `json:"databricks_mws_private_access_settings,omitempty"`
@ -58,6 +64,7 @@ type Resources struct {
Permissions map[string]any `json:"databricks_permissions,omitempty"` Permissions map[string]any `json:"databricks_permissions,omitempty"`
Pipeline map[string]any `json:"databricks_pipeline,omitempty"` Pipeline map[string]any `json:"databricks_pipeline,omitempty"`
Provider map[string]any `json:"databricks_provider,omitempty"` Provider map[string]any `json:"databricks_provider,omitempty"`
QualityMonitor map[string]any `json:"databricks_quality_monitor,omitempty"`
Recipient map[string]any `json:"databricks_recipient,omitempty"` Recipient map[string]any `json:"databricks_recipient,omitempty"`
RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"`
Repo map[string]any `json:"databricks_repo,omitempty"` Repo map[string]any `json:"databricks_repo,omitempty"`
@ -97,6 +104,7 @@ func NewResources() *Resources {
return &Resources{ return &Resources{
AccessControlRuleSet: make(map[string]any), AccessControlRuleSet: make(map[string]any),
ArtifactAllowlist: make(map[string]any), ArtifactAllowlist: make(map[string]any),
AutomaticClusterUpdateWorkspaceSetting: make(map[string]any),
AwsS3Mount: make(map[string]any), AwsS3Mount: make(map[string]any),
AzureAdlsGen1Mount: make(map[string]any), AzureAdlsGen1Mount: make(map[string]any),
AzureAdlsGen2Mount: make(map[string]any), AzureAdlsGen2Mount: make(map[string]any),
@ -105,10 +113,12 @@ func NewResources() *Resources {
CatalogWorkspaceBinding: make(map[string]any), CatalogWorkspaceBinding: make(map[string]any),
Cluster: make(map[string]any), Cluster: make(map[string]any),
ClusterPolicy: make(map[string]any), ClusterPolicy: make(map[string]any),
ComplianceSecurityProfileWorkspaceSetting: make(map[string]any),
Connection: make(map[string]any), Connection: make(map[string]any),
DbfsFile: make(map[string]any), DbfsFile: make(map[string]any),
DefaultNamespaceSetting: make(map[string]any), DefaultNamespaceSetting: make(map[string]any),
Directory: make(map[string]any), Directory: make(map[string]any),
EnhancedSecurityMonitoringWorkspaceSetting: make(map[string]any),
Entitlements: make(map[string]any), Entitlements: make(map[string]any),
ExternalLocation: make(map[string]any), ExternalLocation: make(map[string]any),
File: make(map[string]any), File: make(map[string]any),
@ -137,6 +147,9 @@ func NewResources() *Resources {
MwsCredentials: make(map[string]any), MwsCredentials: make(map[string]any),
MwsCustomerManagedKeys: make(map[string]any), MwsCustomerManagedKeys: make(map[string]any),
MwsLogDelivery: make(map[string]any), MwsLogDelivery: make(map[string]any),
MwsNccBinding: make(map[string]any),
MwsNccPrivateEndpointRule: make(map[string]any),
MwsNetworkConnectivityConfig: make(map[string]any),
MwsNetworks: make(map[string]any), MwsNetworks: make(map[string]any),
MwsPermissionAssignment: make(map[string]any), MwsPermissionAssignment: make(map[string]any),
MwsPrivateAccessSettings: make(map[string]any), MwsPrivateAccessSettings: make(map[string]any),
@ -150,6 +163,7 @@ func NewResources() *Resources {
Permissions: make(map[string]any), Permissions: make(map[string]any),
Pipeline: make(map[string]any), Pipeline: make(map[string]any),
Provider: make(map[string]any), Provider: make(map[string]any),
QualityMonitor: make(map[string]any),
Recipient: make(map[string]any), Recipient: make(map[string]any),
RegisteredModel: make(map[string]any), RegisteredModel: make(map[string]any),
Repo: make(map[string]any), Repo: make(map[string]any),

View File

@ -21,7 +21,7 @@ type Root struct {
const ProviderHost = "registry.terraform.io" const ProviderHost = "registry.terraform.io"
const ProviderSource = "databricks/databricks" const ProviderSource = "databricks/databricks"
const ProviderVersion = "1.40.0" const ProviderVersion = "1.46.0"
func NewRoot() *Root { func NewRoot() *Root {
return &Root{ return &Root{

View File

@ -30,6 +30,10 @@ func FindAllEnvironments(b *bundle.Bundle) map[string]([]jobs.JobEnvironment) {
func isEnvsWithLocalLibraries(envs []jobs.JobEnvironment) bool { func isEnvsWithLocalLibraries(envs []jobs.JobEnvironment) bool {
for _, e := range envs { for _, e := range envs {
if e.Spec == nil {
continue
}
for _, l := range e.Spec.Dependencies { for _, l := range e.Spec.Dependencies {
if IsEnvironmentDependencyLocal(l) { if IsEnvironmentDependencyLocal(l) {
return true return true

View File

@ -62,6 +62,10 @@ func validateTaskLibraries(libs []compute.Library, b *bundle.Bundle) error {
func validateEnvironments(envs []jobs.JobEnvironment, b *bundle.Bundle) error { func validateEnvironments(envs []jobs.JobEnvironment, b *bundle.Bundle) error {
for _, env := range envs { for _, env := range envs {
if env.Spec == nil {
continue
}
for _, dep := range env.Spec.Dependencies { for _, dep := range env.Spec.Dependencies {
matches, err := filepath.Glob(filepath.Join(b.RootPath, dep)) matches, err := filepath.Glob(filepath.Join(b.RootPath, dep))
if err != nil { if err != nil {

View File

@ -2,6 +2,7 @@ package bundle
import ( import (
"context" "context"
"sync"
"testing" "testing"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
@ -10,9 +11,14 @@ import (
) )
type addToContainer struct { type addToContainer struct {
t *testing.T
container *[]int container *[]int
value int value int
err bool err bool
// mu is a mutex that protects container. It is used to ensure that the
// container slice is only modified by one goroutine at a time.
mu *sync.Mutex
} }
func (m *addToContainer) Apply(ctx context.Context, b ReadOnlyBundle) diag.Diagnostics { func (m *addToContainer) Apply(ctx context.Context, b ReadOnlyBundle) diag.Diagnostics {
@ -20,9 +26,10 @@ func (m *addToContainer) Apply(ctx context.Context, b ReadOnlyBundle) diag.Diagn
return diag.Errorf("error") return diag.Errorf("error")
} }
c := *m.container m.mu.Lock()
c = append(c, m.value) *m.container = append(*m.container, m.value)
*m.container = c m.mu.Unlock()
return nil return nil
} }
@ -36,9 +43,10 @@ func TestParallelMutatorWork(t *testing.T) {
} }
container := []int{} container := []int{}
m1 := &addToContainer{container: &container, value: 1} var mu sync.Mutex
m2 := &addToContainer{container: &container, value: 2} m1 := &addToContainer{t: t, container: &container, value: 1, mu: &mu}
m3 := &addToContainer{container: &container, value: 3} m2 := &addToContainer{t: t, container: &container, value: 2, mu: &mu}
m3 := &addToContainer{t: t, container: &container, value: 3, mu: &mu}
m := Parallel(m1, m2, m3) m := Parallel(m1, m2, m3)
@ -57,9 +65,10 @@ func TestParallelMutatorWorkWithErrors(t *testing.T) {
} }
container := []int{} container := []int{}
m1 := &addToContainer{container: &container, value: 1} var mu sync.Mutex
m2 := &addToContainer{container: &container, err: true, value: 2} m1 := &addToContainer{container: &container, value: 1, mu: &mu}
m3 := &addToContainer{container: &container, value: 3} m2 := &addToContainer{container: &container, err: true, value: 2, mu: &mu}
m3 := &addToContainer{container: &container, value: 3, mu: &mu}
m := Parallel(m1, m2, m3) m := Parallel(m1, m2, m3)

View File

@ -8,6 +8,7 @@ import (
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/iam"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -45,9 +46,15 @@ func testFixture(userName string) *bundle.Bundle {
Resources: config.Resources{ Resources: config.Resources{
Jobs: map[string]*resources.Job{ Jobs: map[string]*resources.Job{
"job1": { "job1": {
JobSettings: &jobs.JobSettings{
Name: "job1",
},
Permissions: p, Permissions: p,
}, },
"job2": { "job2": {
JobSettings: &jobs.JobSettings{
Name: "job2",
},
Permissions: p, Permissions: p,
}, },
}, },

View File

@ -7,6 +7,7 @@ import (
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -23,8 +24,16 @@ func TestApplyBundlePermissions(t *testing.T) {
}, },
Resources: config.Resources{ Resources: config.Resources{
Jobs: map[string]*resources.Job{ Jobs: map[string]*resources.Job{
"job_1": {}, "job_1": {
"job_2": {}, JobSettings: &jobs.JobSettings{
Name: "job_1",
},
},
"job_2": {
JobSettings: &jobs.JobSettings{
Name: "job_2",
},
},
}, },
Pipelines: map[string]*resources.Pipeline{ Pipelines: map[string]*resources.Pipeline{
"pipeline_1": {}, "pipeline_1": {},
@ -109,11 +118,17 @@ func TestWarningOnOverlapPermission(t *testing.T) {
Resources: config.Resources{ Resources: config.Resources{
Jobs: map[string]*resources.Job{ Jobs: map[string]*resources.Job{
"job_1": { "job_1": {
JobSettings: &jobs.JobSettings{
Name: "job_1",
},
Permissions: []resources.Permission{ Permissions: []resources.Permission{
{Level: CAN_VIEW, UserName: "TestUser"}, {Level: CAN_VIEW, UserName: "TestUser"},
}, },
}, },
"job_2": { "job_2": {
JobSettings: &jobs.JobSettings{
Name: "job_2",
},
Permissions: []resources.Permission{ Permissions: []resources.Permission{
{Level: CAN_VIEW, UserName: "TestUser2"}, {Level: CAN_VIEW, UserName: "TestUser2"},
}, },

View File

@ -30,8 +30,8 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) {
}, },
Resources: config.Resources{ Resources: config.Resources{
Jobs: map[string]*resources.Job{ Jobs: map[string]*resources.Job{
"job_1": {JobSettings: &jobs.JobSettings{}}, "job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}},
"job_2": {JobSettings: &jobs.JobSettings{}}, "job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}},
}, },
Pipelines: map[string]*resources.Pipeline{ Pipelines: map[string]*resources.Pipeline{
"pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}}, "pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}},

View File

@ -36,7 +36,7 @@ func Deploy() bundle.Mutator {
permissions.ApplyWorkspaceRootPermissions(), permissions.ApplyWorkspaceRootPermissions(),
terraform.Interpolate(), terraform.Interpolate(),
terraform.Write(), terraform.Write(),
deploy.CheckRunningResource(), terraform.CheckRunningResource(),
bundle.Defer( bundle.Defer(
terraform.Apply(), terraform.Apply(),
bundle.Seq( bundle.Seq(

View File

@ -46,6 +46,7 @@ func Initialize() bundle.Mutator {
permissions.ApplyBundlePermissions(), permissions.ApplyBundlePermissions(),
permissions.FilterCurrentUser(), permissions.FilterCurrentUser(),
metadata.AnnotateJobs(), metadata.AnnotateJobs(),
metadata.AnnotatePipelines(),
terraform.Initialize(), terraform.Initialize(),
scripts.Execute(config.ScriptPostInit), scripts.Execute(config.ScriptPostInit),
}, },

View File

@ -70,7 +70,7 @@ func UpdateBundleDescriptions(openapiSpecPath string) (*Docs, error) {
} }
openapiReader := &OpenapiReader{ openapiReader := &OpenapiReader{
OpenapiSpec: spec, OpenapiSpec: spec,
Memo: make(map[string]*jsonschema.Schema), memo: make(map[string]jsonschema.Schema),
} }
// Generate descriptions for the "resources" field // Generate descriptions for the "resources" field

View File

@ -46,6 +46,17 @@
"properties": { "properties": {
"fail_on_active_runs": { "fail_on_active_runs": {
"description": "" "description": ""
},
"lock": {
"description": "",
"properties": {
"enabled": {
"description": ""
},
"force": {
"description": ""
}
}
} }
} }
}, },
@ -76,6 +87,9 @@
"additionalproperties": { "additionalproperties": {
"description": "" "description": ""
} }
},
"use_legacy_run_as": {
"description": ""
} }
} }
}, },
@ -242,7 +256,7 @@
"description": "", "description": "",
"properties": { "properties": {
"client": { "client": {
"description": "*\nUser-friendly name for the client version: “client”: “1”\nThe version is a string, consisting of the major client version" "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version."
}, },
"dependencies": { "dependencies": {
"description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]",
@ -334,7 +348,7 @@
"description": "If new_cluster, a description of a cluster that is created for each task.", "description": "If new_cluster, a description of a cluster that is created for each task.",
"properties": { "properties": {
"apply_policy_default_values": { "apply_policy_default_values": {
"description": "" "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied."
}, },
"autoscale": { "autoscale": {
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
@ -410,14 +424,6 @@
} }
} }
}, },
"clone_from": {
"description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.",
"properties": {
"source_cluster_id": {
"description": "The cluster that is being cloned."
}
}
},
"cluster_log_conf": { "cluster_log_conf": {
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
"properties": { "properties": {
@ -460,9 +466,6 @@
"cluster_name": { "cluster_name": {
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
}, },
"cluster_source": {
"description": ""
},
"custom_tags": { "custom_tags": {
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
"additionalproperties": { "additionalproperties": {
@ -742,7 +745,7 @@
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
"properties": { "properties": {
"pause_status": { "pause_status": {
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." "description": "Indicate whether this schedule is paused or not."
}, },
"quartz_cron_expression": { "quartz_cron_expression": {
"description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required."
@ -799,7 +802,7 @@
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
}, },
"source": { "source": {
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in Databricks workspace.\n* `GIT`: Project is located in cloud Git provider."
}, },
"warehouse_id": { "warehouse_id": {
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
@ -909,10 +912,10 @@
} }
}, },
"egg": { "egg": {
"description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
}, },
"jar": { "jar": {
"description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
}, },
"maven": { "maven": {
"description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`",
@ -942,8 +945,11 @@
} }
} }
}, },
"requirements": {
"description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`"
},
"whl": { "whl": {
"description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
} }
} }
} }
@ -955,10 +961,10 @@
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
}, },
"new_cluster": { "new_cluster": {
"description": "If new_cluster, a description of a cluster that is created for each task.", "description": "If new_cluster, a description of a new cluster that is created for each run.",
"properties": { "properties": {
"apply_policy_default_values": { "apply_policy_default_values": {
"description": "" "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied."
}, },
"autoscale": { "autoscale": {
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
@ -1034,14 +1040,6 @@
} }
} }
}, },
"clone_from": {
"description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.",
"properties": {
"source_cluster_id": {
"description": "The cluster that is being cloned."
}
}
},
"cluster_log_conf": { "cluster_log_conf": {
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
"properties": { "properties": {
@ -1084,9 +1082,6 @@
"cluster_name": { "cluster_name": {
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
}, },
"cluster_source": {
"description": ""
},
"custom_tags": { "custom_tags": {
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
"additionalproperties": { "additionalproperties": {
@ -1303,6 +1298,9 @@
}, },
"source": { "source": {
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider."
},
"warehouse_id": {
"description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail."
} }
} }
}, },
@ -1399,7 +1397,7 @@
} }
}, },
"python_named_params": { "python_named_params": {
"description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.", "description": "",
"additionalproperties": { "additionalproperties": {
"description": "" "description": ""
} }
@ -1454,7 +1452,7 @@
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
}, },
"source": { "source": {
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
} }
} }
}, },
@ -1526,13 +1524,13 @@
} }
}, },
"file": { "file": {
"description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", "description": "If file, indicates that this job runs a SQL file in a remote Git repository.",
"properties": { "properties": {
"path": { "path": {
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
}, },
"source": { "source": {
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider."
} }
} }
}, },
@ -1634,10 +1632,10 @@
} }
}, },
"pause_status": { "pause_status": {
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." "description": "Whether this trigger is paused or not."
}, },
"table": { "table": {
"description": "", "description": "Old table trigger settings name. Deprecated in favor of `table_update`.",
"properties": { "properties": {
"condition": { "condition": {
"description": "The table(s) condition based on which to trigger a job run." "description": "The table(s) condition based on which to trigger a job run."
@ -1679,7 +1677,7 @@
} }
}, },
"webhook_notifications": { "webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"properties": { "properties": {
"on_duration_warning_threshold_exceeded": { "on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -1833,6 +1831,15 @@
"openai_config": { "openai_config": {
"description": "OpenAI Config. Only required if the provider is 'openai'.", "description": "OpenAI Config. Only required if the provider is 'openai'.",
"properties": { "properties": {
"microsoft_entra_client_id": {
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n"
},
"microsoft_entra_client_secret": {
"description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n"
},
"microsoft_entra_tenant_id": {
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n"
},
"openai_api_base": { "openai_api_base": {
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
}, },
@ -1989,6 +1996,9 @@
} }
} }
}, },
"route_optimized": {
"description": "Enable route optimization for the serving endpoint."
},
"tags": { "tags": {
"description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.",
"items": { "items": {
@ -2415,6 +2425,17 @@
"continuous": { "continuous": {
"description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`."
}, },
"deployment": {
"description": "Deployment type of this pipeline.",
"properties": {
"kind": {
"description": "The deployment method that manages the pipeline."
},
"metadata_file_path": {
"description": "The path to the file containing metadata about the deployment."
}
}
},
"development": { "development": {
"description": "Whether the pipeline is in Development mode. Defaults to false." "description": "Whether the pipeline is in Development mode. Defaults to false."
}, },
@ -2438,9 +2459,136 @@
} }
} }
}, },
"gateway_definition": {
"description": "The definition of a gateway pipeline to support CDC.",
"properties": {
"connection_id": {
"description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source."
},
"gateway_storage_catalog": {
"description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location."
},
"gateway_storage_name": {
"description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n"
},
"gateway_storage_schema": {
"description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location."
}
}
},
"id": { "id": {
"description": "Unique identifier for this pipeline." "description": "Unique identifier for this pipeline."
}, },
"ingestion_definition": {
"description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings.",
"properties": {
"connection_name": {
"description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name."
},
"ingestion_gateway_id": {
"description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name."
},
"objects": {
"description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.",
"items": {
"description": "",
"properties": {
"schema": {
"description": "Select tables from a specific source schema.",
"properties": {
"destination_catalog": {
"description": "Required. Destination catalog to store tables."
},
"destination_schema": {
"description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists."
},
"source_catalog": {
"description": "The source catalog name. Might be optional depending on the type of source."
},
"source_schema": {
"description": "Required. Schema name in the source database."
},
"table_configuration": {
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.",
"properties": {
"primary_keys": {
"description": "The primary key of the table used to apply changes.",
"items": {
"description": ""
}
},
"salesforce_include_formula_fields": {
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
},
"scd_type": {
"description": "The SCD type to use to ingest the table."
}
}
}
}
},
"table": {
"description": "Select tables from a specific source table.",
"properties": {
"destination_catalog": {
"description": "Required. Destination catalog to store table."
},
"destination_schema": {
"description": "Required. Destination schema to store table."
},
"destination_table": {
"description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used."
},
"source_catalog": {
"description": "Source catalog name. Might be optional depending on the type of source."
},
"source_schema": {
"description": "Schema name in the source database. Might be optional depending on the type of source."
},
"source_table": {
"description": "Required. Table name in the source database."
},
"table_configuration": {
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.",
"properties": {
"primary_keys": {
"description": "The primary key of the table used to apply changes.",
"items": {
"description": ""
}
},
"salesforce_include_formula_fields": {
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
},
"scd_type": {
"description": "The SCD type to use to ingest the table."
}
}
}
}
}
}
}
},
"table_configuration": {
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.",
"properties": {
"primary_keys": {
"description": "The primary key of the table used to apply changes.",
"items": {
"description": ""
}
},
"salesforce_include_formula_fields": {
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
},
"scd_type": {
"description": "The SCD type to use to ingest the table."
}
}
}
}
},
"libraries": { "libraries": {
"description": "Libraries or code needed by this deployment.", "description": "Libraries or code needed by this deployment.",
"items": { "items": {
@ -2682,6 +2830,17 @@
"properties": { "properties": {
"fail_on_active_runs": { "fail_on_active_runs": {
"description": "" "description": ""
},
"lock": {
"description": "",
"properties": {
"enabled": {
"description": ""
},
"force": {
"description": ""
}
}
} }
} }
}, },
@ -2878,7 +3037,7 @@
"description": "", "description": "",
"properties": { "properties": {
"client": { "client": {
"description": "*\nUser-friendly name for the client version: “client”: “1”\nThe version is a string, consisting of the major client version" "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version."
}, },
"dependencies": { "dependencies": {
"description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]",
@ -2970,7 +3129,7 @@
"description": "If new_cluster, a description of a cluster that is created for each task.", "description": "If new_cluster, a description of a cluster that is created for each task.",
"properties": { "properties": {
"apply_policy_default_values": { "apply_policy_default_values": {
"description": "" "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied."
}, },
"autoscale": { "autoscale": {
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
@ -3046,14 +3205,6 @@
} }
} }
}, },
"clone_from": {
"description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.",
"properties": {
"source_cluster_id": {
"description": "The cluster that is being cloned."
}
}
},
"cluster_log_conf": { "cluster_log_conf": {
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
"properties": { "properties": {
@ -3096,9 +3247,6 @@
"cluster_name": { "cluster_name": {
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
}, },
"cluster_source": {
"description": ""
},
"custom_tags": { "custom_tags": {
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
"additionalproperties": { "additionalproperties": {
@ -3378,7 +3526,7 @@
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
"properties": { "properties": {
"pause_status": { "pause_status": {
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." "description": "Indicate whether this schedule is paused or not."
}, },
"quartz_cron_expression": { "quartz_cron_expression": {
"description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required."
@ -3435,7 +3583,7 @@
"description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used."
}, },
"source": { "source": {
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in Databricks workspace.\n* `GIT`: Project is located in cloud Git provider."
}, },
"warehouse_id": { "warehouse_id": {
"description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument."
@ -3545,10 +3693,10 @@
} }
}, },
"egg": { "egg": {
"description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
}, },
"jar": { "jar": {
"description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
}, },
"maven": { "maven": {
"description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`",
@ -3578,8 +3726,11 @@
} }
} }
}, },
"requirements": {
"description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`"
},
"whl": { "whl": {
"description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI."
} }
} }
} }
@ -3591,10 +3742,10 @@
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
}, },
"new_cluster": { "new_cluster": {
"description": "If new_cluster, a description of a cluster that is created for each task.", "description": "If new_cluster, a description of a new cluster that is created for each run.",
"properties": { "properties": {
"apply_policy_default_values": { "apply_policy_default_values": {
"description": "" "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied."
}, },
"autoscale": { "autoscale": {
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
@ -3670,14 +3821,6 @@
} }
} }
}, },
"clone_from": {
"description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.",
"properties": {
"source_cluster_id": {
"description": "The cluster that is being cloned."
}
}
},
"cluster_log_conf": { "cluster_log_conf": {
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
"properties": { "properties": {
@ -3720,9 +3863,6 @@
"cluster_name": { "cluster_name": {
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n"
}, },
"cluster_source": {
"description": ""
},
"custom_tags": { "custom_tags": {
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
"additionalproperties": { "additionalproperties": {
@ -3939,6 +4079,9 @@
}, },
"source": { "source": {
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider."
},
"warehouse_id": {
"description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail."
} }
} }
}, },
@ -4035,7 +4178,7 @@
} }
}, },
"python_named_params": { "python_named_params": {
"description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.", "description": "",
"additionalproperties": { "additionalproperties": {
"description": "" "description": ""
} }
@ -4090,7 +4233,7 @@
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
}, },
"source": { "source": {
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository."
} }
} }
}, },
@ -4162,13 +4305,13 @@
} }
}, },
"file": { "file": {
"description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", "description": "If file, indicates that this job runs a SQL file in a remote Git repository.",
"properties": { "properties": {
"path": { "path": {
"description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths."
}, },
"source": { "source": {
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider."
} }
} }
}, },
@ -4270,10 +4413,10 @@
} }
}, },
"pause_status": { "pause_status": {
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." "description": "Whether this trigger is paused or not."
}, },
"table": { "table": {
"description": "", "description": "Old table trigger settings name. Deprecated in favor of `table_update`.",
"properties": { "properties": {
"condition": { "condition": {
"description": "The table(s) condition based on which to trigger a job run." "description": "The table(s) condition based on which to trigger a job run."
@ -4315,7 +4458,7 @@
} }
}, },
"webhook_notifications": { "webhook_notifications": {
"description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "description": "A collection of system notification IDs to notify when runs of this job begin or complete.",
"properties": { "properties": {
"on_duration_warning_threshold_exceeded": { "on_duration_warning_threshold_exceeded": {
"description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.",
@ -4469,6 +4612,15 @@
"openai_config": { "openai_config": {
"description": "OpenAI Config. Only required if the provider is 'openai'.", "description": "OpenAI Config. Only required if the provider is 'openai'.",
"properties": { "properties": {
"microsoft_entra_client_id": {
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n"
},
"microsoft_entra_client_secret": {
"description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n"
},
"microsoft_entra_tenant_id": {
"description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n"
},
"openai_api_base": { "openai_api_base": {
"description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n"
}, },
@ -4625,6 +4777,9 @@
} }
} }
}, },
"route_optimized": {
"description": "Enable route optimization for the serving endpoint."
},
"tags": { "tags": {
"description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.",
"items": { "items": {
@ -5051,6 +5206,17 @@
"continuous": { "continuous": {
"description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`."
}, },
"deployment": {
"description": "Deployment type of this pipeline.",
"properties": {
"kind": {
"description": "The deployment method that manages the pipeline."
},
"metadata_file_path": {
"description": "The path to the file containing metadata about the deployment."
}
}
},
"development": { "development": {
"description": "Whether the pipeline is in Development mode. Defaults to false." "description": "Whether the pipeline is in Development mode. Defaults to false."
}, },
@ -5074,9 +5240,136 @@
} }
} }
}, },
"gateway_definition": {
"description": "The definition of a gateway pipeline to support CDC.",
"properties": {
"connection_id": {
"description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source."
},
"gateway_storage_catalog": {
"description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location."
},
"gateway_storage_name": {
"description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n"
},
"gateway_storage_schema": {
"description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location."
}
}
},
"id": { "id": {
"description": "Unique identifier for this pipeline." "description": "Unique identifier for this pipeline."
}, },
"ingestion_definition": {
"description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings.",
"properties": {
"connection_name": {
"description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name."
},
"ingestion_gateway_id": {
"description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name."
},
"objects": {
"description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.",
"items": {
"description": "",
"properties": {
"schema": {
"description": "Select tables from a specific source schema.",
"properties": {
"destination_catalog": {
"description": "Required. Destination catalog to store tables."
},
"destination_schema": {
"description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists."
},
"source_catalog": {
"description": "The source catalog name. Might be optional depending on the type of source."
},
"source_schema": {
"description": "Required. Schema name in the source database."
},
"table_configuration": {
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.",
"properties": {
"primary_keys": {
"description": "The primary key of the table used to apply changes.",
"items": {
"description": ""
}
},
"salesforce_include_formula_fields": {
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
},
"scd_type": {
"description": "The SCD type to use to ingest the table."
}
}
}
}
},
"table": {
"description": "Select tables from a specific source table.",
"properties": {
"destination_catalog": {
"description": "Required. Destination catalog to store table."
},
"destination_schema": {
"description": "Required. Destination schema to store table."
},
"destination_table": {
"description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used."
},
"source_catalog": {
"description": "Source catalog name. Might be optional depending on the type of source."
},
"source_schema": {
"description": "Schema name in the source database. Might be optional depending on the type of source."
},
"source_table": {
"description": "Required. Table name in the source database."
},
"table_configuration": {
"description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.",
"properties": {
"primary_keys": {
"description": "The primary key of the table used to apply changes.",
"items": {
"description": ""
}
},
"salesforce_include_formula_fields": {
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
},
"scd_type": {
"description": "The SCD type to use to ingest the table."
}
}
}
}
}
}
}
},
"table_configuration": {
"description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.",
"properties": {
"primary_keys": {
"description": "The primary key of the table used to apply changes.",
"items": {
"description": ""
}
},
"salesforce_include_formula_fields": {
"description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector"
},
"scd_type": {
"description": "The SCD type to use to ingest the table."
}
}
}
}
},
"libraries": { "libraries": {
"description": "Libraries or code needed by this deployment.", "description": "Libraries or code needed by this deployment.",
"items": { "items": {

View File

@ -10,17 +10,21 @@ import (
) )
type OpenapiReader struct { type OpenapiReader struct {
// OpenAPI spec to read schemas from.
OpenapiSpec *openapi.Specification OpenapiSpec *openapi.Specification
Memo map[string]*jsonschema.Schema
// In-memory cache of schemas read from the OpenAPI spec.
memo map[string]jsonschema.Schema
} }
const SchemaPathPrefix = "#/components/schemas/" const SchemaPathPrefix = "#/components/schemas/"
func (reader *OpenapiReader) readOpenapiSchema(path string) (*jsonschema.Schema, error) { // Read a schema directly from the OpenAPI spec.
func (reader *OpenapiReader) readOpenapiSchema(path string) (jsonschema.Schema, error) {
schemaKey := strings.TrimPrefix(path, SchemaPathPrefix) schemaKey := strings.TrimPrefix(path, SchemaPathPrefix)
// return early if we already have a computed schema // return early if we already have a computed schema
memoSchema, ok := reader.Memo[schemaKey] memoSchema, ok := reader.memo[schemaKey]
if ok { if ok {
return memoSchema, nil return memoSchema, nil
} }
@ -28,18 +32,18 @@ func (reader *OpenapiReader) readOpenapiSchema(path string) (*jsonschema.Schema,
// check path is present in openapi spec // check path is present in openapi spec
openapiSchema, ok := reader.OpenapiSpec.Components.Schemas[schemaKey] openapiSchema, ok := reader.OpenapiSpec.Components.Schemas[schemaKey]
if !ok { if !ok {
return nil, fmt.Errorf("schema with path %s not found in openapi spec", path) return jsonschema.Schema{}, fmt.Errorf("schema with path %s not found in openapi spec", path)
} }
// convert openapi schema to the native schema struct // convert openapi schema to the native schema struct
bytes, err := json.Marshal(*openapiSchema) bytes, err := json.Marshal(*openapiSchema)
if err != nil { if err != nil {
return nil, err return jsonschema.Schema{}, err
} }
jsonSchema := &jsonschema.Schema{} jsonSchema := jsonschema.Schema{}
err = json.Unmarshal(bytes, jsonSchema) err = json.Unmarshal(bytes, &jsonSchema)
if err != nil { if err != nil {
return nil, err return jsonschema.Schema{}, err
} }
// A hack to convert a map[string]interface{} to *Schema // A hack to convert a map[string]interface{} to *Schema
@ -49,23 +53,28 @@ func (reader *OpenapiReader) readOpenapiSchema(path string) (*jsonschema.Schema,
if ok { if ok {
b, err := json.Marshal(jsonSchema.AdditionalProperties) b, err := json.Marshal(jsonSchema.AdditionalProperties)
if err != nil { if err != nil {
return nil, err return jsonschema.Schema{}, err
} }
additionalProperties := &jsonschema.Schema{} additionalProperties := &jsonschema.Schema{}
err = json.Unmarshal(b, additionalProperties) err = json.Unmarshal(b, additionalProperties)
if err != nil { if err != nil {
return nil, err return jsonschema.Schema{}, err
} }
jsonSchema.AdditionalProperties = additionalProperties jsonSchema.AdditionalProperties = additionalProperties
} }
// store read schema into memo // store read schema into memo
reader.Memo[schemaKey] = jsonSchema reader.memo[schemaKey] = jsonSchema
return jsonSchema, nil return jsonSchema, nil
} }
// safe againt loops in refs // Resolve all nested "$ref" references in the schema. This function unrolls a single
// level of "$ref" in the schema and calls into traverseSchema to resolve nested references.
// Thus this function and traverseSchema are mutually recursive.
//
// This function is safe against reference loops. If a reference loop is detected, an error
// is returned.
func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) { func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) {
if root.Reference == nil { if root.Reference == nil {
return reader.traverseSchema(root, tracker) return reader.traverseSchema(root, tracker)
@ -91,12 +100,12 @@ func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *t
// in the memo // in the memo
root.Reference = nil root.Reference = nil
// unroll one level of reference // unroll one level of reference.
selfRef, err := reader.readOpenapiSchema(ref) selfRef, err := reader.readOpenapiSchema(ref)
if err != nil { if err != nil {
return nil, err return nil, err
} }
root = selfRef root = &selfRef
root.Description = description root.Description = description
// traverse again to find new references // traverse again to find new references
@ -108,6 +117,8 @@ func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *t
return root, err return root, err
} }
// Traverse the nested properties of the schema to resolve "$ref" references. This function
// and safeResolveRefs are mutually recursive.
func (reader *OpenapiReader) traverseSchema(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) { func (reader *OpenapiReader) traverseSchema(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) {
// case primitive (or invalid) // case primitive (or invalid)
if root.Type != jsonschema.ObjectType && root.Type != jsonschema.ArrayType { if root.Type != jsonschema.ObjectType && root.Type != jsonschema.ArrayType {
@ -154,11 +165,11 @@ func (reader *OpenapiReader) readResolvedSchema(path string) (*jsonschema.Schema
} }
tracker := newTracker() tracker := newTracker()
tracker.push(path, path) tracker.push(path, path)
root, err = reader.safeResolveRefs(root, tracker) resolvedRoot, err := reader.safeResolveRefs(&root, tracker)
if err != nil { if err != nil {
return nil, tracker.errWithTrace(err.Error(), "") return nil, tracker.errWithTrace(err.Error(), "")
} }
return root, nil return resolvedRoot, nil
} }
func (reader *OpenapiReader) jobsDocs() (*Docs, error) { func (reader *OpenapiReader) jobsDocs() (*Docs, error) {

View File

@ -48,7 +48,7 @@ func TestReadSchemaForObject(t *testing.T) {
spec := &openapi.Specification{} spec := &openapi.Specification{}
reader := &OpenapiReader{ reader := &OpenapiReader{
OpenapiSpec: spec, OpenapiSpec: spec,
Memo: make(map[string]*jsonschema.Schema), memo: make(map[string]jsonschema.Schema),
} }
err := json.Unmarshal([]byte(specString), spec) err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err) require.NoError(t, err)
@ -106,7 +106,7 @@ func TestReadSchemaForArray(t *testing.T) {
spec := &openapi.Specification{} spec := &openapi.Specification{}
reader := &OpenapiReader{ reader := &OpenapiReader{
OpenapiSpec: spec, OpenapiSpec: spec,
Memo: make(map[string]*jsonschema.Schema), memo: make(map[string]jsonschema.Schema),
} }
err := json.Unmarshal([]byte(specString), spec) err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err) require.NoError(t, err)
@ -152,7 +152,7 @@ func TestReadSchemaForMap(t *testing.T) {
spec := &openapi.Specification{} spec := &openapi.Specification{}
reader := &OpenapiReader{ reader := &OpenapiReader{
OpenapiSpec: spec, OpenapiSpec: spec,
Memo: make(map[string]*jsonschema.Schema), memo: make(map[string]jsonschema.Schema),
} }
err := json.Unmarshal([]byte(specString), spec) err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err) require.NoError(t, err)
@ -201,7 +201,7 @@ func TestRootReferenceIsResolved(t *testing.T) {
spec := &openapi.Specification{} spec := &openapi.Specification{}
reader := &OpenapiReader{ reader := &OpenapiReader{
OpenapiSpec: spec, OpenapiSpec: spec,
Memo: make(map[string]*jsonschema.Schema), memo: make(map[string]jsonschema.Schema),
} }
err := json.Unmarshal([]byte(specString), spec) err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err) require.NoError(t, err)
@ -251,7 +251,7 @@ func TestSelfReferenceLoopErrors(t *testing.T) {
spec := &openapi.Specification{} spec := &openapi.Specification{}
reader := &OpenapiReader{ reader := &OpenapiReader{
OpenapiSpec: spec, OpenapiSpec: spec,
Memo: make(map[string]*jsonschema.Schema), memo: make(map[string]jsonschema.Schema),
} }
err := json.Unmarshal([]byte(specString), spec) err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err) require.NoError(t, err)
@ -285,7 +285,7 @@ func TestCrossReferenceLoopErrors(t *testing.T) {
spec := &openapi.Specification{} spec := &openapi.Specification{}
reader := &OpenapiReader{ reader := &OpenapiReader{
OpenapiSpec: spec, OpenapiSpec: spec,
Memo: make(map[string]*jsonschema.Schema), memo: make(map[string]jsonschema.Schema),
} }
err := json.Unmarshal([]byte(specString), spec) err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err) require.NoError(t, err)
@ -330,7 +330,7 @@ func TestReferenceResolutionForMapInObject(t *testing.T) {
spec := &openapi.Specification{} spec := &openapi.Specification{}
reader := &OpenapiReader{ reader := &OpenapiReader{
OpenapiSpec: spec, OpenapiSpec: spec,
Memo: make(map[string]*jsonschema.Schema), memo: make(map[string]jsonschema.Schema),
} }
err := json.Unmarshal([]byte(specString), spec) err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err) require.NoError(t, err)
@ -400,7 +400,7 @@ func TestReferenceResolutionForArrayInObject(t *testing.T) {
spec := &openapi.Specification{} spec := &openapi.Specification{}
reader := &OpenapiReader{ reader := &OpenapiReader{
OpenapiSpec: spec, OpenapiSpec: spec,
Memo: make(map[string]*jsonschema.Schema), memo: make(map[string]jsonschema.Schema),
} }
err := json.Unmarshal([]byte(specString), spec) err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err) require.NoError(t, err)
@ -434,3 +434,61 @@ func TestReferenceResolutionForArrayInObject(t *testing.T) {
t.Log("[DEBUG] expected: ", expected) t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(fruitsSchemaJson)) assert.Equal(t, expected, string(fruitsSchemaJson))
} }
func TestReferenceResolutionDoesNotOverwriteDescriptions(t *testing.T) {
specString := `{
"components": {
"schemas": {
"foo": {
"type": "number"
},
"fruits": {
"type": "object",
"properties": {
"guava": {
"type": "object",
"description": "Guava is a fruit",
"$ref": "#/components/schemas/foo"
},
"mango": {
"type": "object",
"description": "What is a mango?",
"$ref": "#/components/schemas/foo"
}
}
}
}
}
}`
spec := &openapi.Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits")
require.NoError(t, err)
fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ")
require.NoError(t, err)
expected := `{
"type": "object",
"properties": {
"guava": {
"type": "number",
"description": "Guava is a fruit"
},
"mango": {
"type": "number",
"description": "What is a mango?"
}
}
}`
t.Log("[DEBUG] actual: ", string(fruitsSchemaJson))
t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(fruitsSchemaJson))
}

View File

@ -6,6 +6,7 @@ import (
"reflect" "reflect"
"strings" "strings"
"github.com/databricks/cli/libs/dyn/dynvar"
"github.com/databricks/cli/libs/jsonschema" "github.com/databricks/cli/libs/jsonschema"
) )
@ -167,6 +168,22 @@ func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*jsonschem
} }
jsonSchema := &jsonschema.Schema{Type: rootJavascriptType} jsonSchema := &jsonschema.Schema{Type: rootJavascriptType}
// If the type is a non-string primitive, then we allow it to be a string
// provided it's a pure variable reference (ie only a single variable reference).
if rootJavascriptType == jsonschema.BooleanType || rootJavascriptType == jsonschema.NumberType {
jsonSchema = &jsonschema.Schema{
AnyOf: []*jsonschema.Schema{
{
Type: rootJavascriptType,
},
{
Type: jsonschema.StringType,
Pattern: dynvar.VariableRegex,
},
},
}
}
if docs != nil { if docs != nil {
jsonSchema.Description = docs.Description jsonSchema.Description = docs.Description
} }

View File

@ -14,7 +14,15 @@ func TestIntSchema(t *testing.T) {
expected := expected :=
`{ `{
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
}` }`
schema, err := New(reflect.TypeOf(elemInt), nil) schema, err := New(reflect.TypeOf(elemInt), nil)
@ -33,7 +41,15 @@ func TestBooleanSchema(t *testing.T) {
expected := expected :=
`{ `{
"anyOf": [
{
"type": "boolean" "type": "boolean"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
}` }`
schema, err := New(reflect.TypeOf(elem), nil) schema, err := New(reflect.TypeOf(elem), nil)
@ -101,46 +117,150 @@ func TestStructOfPrimitivesSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"bool_val": { "bool_val": {
"anyOf": [
{
"type": "boolean" "type": "boolean"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"float32_val": { "float32_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"float64_val": { "float64_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"int16_val": { "int16_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"int32_val": { "int32_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"int64_val": { "int64_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"int8_val": { "int8_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"int_val": { "int_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"string_val": { "string_val": {
"type": "string" "type": "string"
}, },
"uint16_val": { "uint16_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"uint32_val": { "uint32_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"uint64_val": { "uint64_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"uint8_val": { "uint8_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"uint_val": { "uint_val": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -200,8 +320,16 @@ func TestStructOfStructsSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"a": { "a": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"b": { "b": {
"type": "string" "type": "string"
} }
@ -257,7 +385,15 @@ func TestStructOfMapsSchema(t *testing.T) {
"my_map": { "my_map": {
"type": "object", "type": "object",
"additionalProperties": { "additionalProperties": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
} }
}, },
@ -339,7 +475,15 @@ func TestMapOfPrimitivesSchema(t *testing.T) {
`{ `{
"type": "object", "type": "object",
"additionalProperties": { "additionalProperties": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}` }`
@ -368,7 +512,15 @@ func TestMapOfStructSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"my_int": { "my_int": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -398,7 +550,15 @@ func TestMapOfMapSchema(t *testing.T) {
"additionalProperties": { "additionalProperties": {
"type": "object", "type": "object",
"additionalProperties": { "additionalProperties": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
} }
}` }`
@ -495,7 +655,15 @@ func TestSliceOfMapSchema(t *testing.T) {
"items": { "items": {
"type": "object", "type": "object",
"additionalProperties": { "additionalProperties": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
} }
}` }`
@ -525,7 +693,15 @@ func TestSliceOfStructSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"my_int": { "my_int": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -575,8 +751,16 @@ func TestEmbeddedStructSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"age": { "age": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"country": { "country": {
"type": "string" "type": "string"
}, },
@ -607,8 +791,16 @@ func TestEmbeddedStructSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"age": { "age": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"home": { "home": {
"type": "object", "type": "object",
"properties": { "properties": {
@ -694,7 +886,15 @@ func TestNonAnnotatedFieldsAreSkipped(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"bar": { "bar": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -728,7 +928,15 @@ func TestDashFieldsAreSkipped(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"bar": { "bar": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -773,7 +981,15 @@ func TestPointerInStructSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"ptr_val2": { "ptr_val2": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -782,13 +998,29 @@ func TestPointerInStructSchema(t *testing.T) {
] ]
}, },
"float_val": { "float_val": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"ptr_bar": { "ptr_bar": {
"type": "object", "type": "object",
"properties": { "properties": {
"ptr_val2": { "ptr_val2": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -797,8 +1029,16 @@ func TestPointerInStructSchema(t *testing.T) {
] ]
}, },
"ptr_int": { "ptr_int": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"ptr_string": { "ptr_string": {
"type": "string" "type": "string"
} }
@ -860,8 +1100,16 @@ func TestGenericSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"age": { "age": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"name": { "name": {
"type": "string" "type": "string"
} }
@ -875,8 +1123,16 @@ func TestGenericSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"age": { "age": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"name": { "name": {
"type": "string" "type": "string"
} }
@ -895,8 +1151,16 @@ func TestGenericSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"age": { "age": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"name": { "name": {
"type": "string" "type": "string"
} }
@ -910,8 +1174,16 @@ func TestGenericSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"age": { "age": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"name": { "name": {
"type": "string" "type": "string"
} }
@ -932,8 +1204,16 @@ func TestGenericSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"age": { "age": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"name": { "name": {
"type": "string" "type": "string"
} }
@ -950,8 +1230,16 @@ func TestGenericSchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"age": { "age": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"name": { "name": {
"type": "string" "type": "string"
} }
@ -1028,17 +1316,41 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"apple": { "apple": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"bar": { "bar": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"papaya": { "papaya": {
"type": "object", "type": "object",
"properties": { "properties": {
"a": { "a": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"b": { "b": {
"type": "string" "type": "string"
} }
@ -1111,7 +1423,15 @@ func TestDocIngestionForObject(t *testing.T) {
"description": "docs for a" "description": "docs for a"
}, },
"b": { "b": {
"anyOf": [
{
"type": "number" "type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -1185,12 +1505,28 @@ func TestDocIngestionForSlice(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"guava": { "guava": {
"type": "number", "description": "docs for guava",
"description": "docs for guava" "anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
}, },
"pineapple": { "pineapple": {
"type": "number", "description": "docs for pineapple",
"description": "docs for pineapple" "anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -1268,12 +1604,28 @@ func TestDocIngestionForMap(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"apple": { "apple": {
"type": "number", "description": "docs for apple",
"description": "docs for apple" "anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
}, },
"mango": { "mango": {
"type": "number", "description": "docs for mango",
"description": "docs for mango" "anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -1324,8 +1676,16 @@ func TestDocIngestionForTopLevelPrimitive(t *testing.T) {
"description": "docs for root", "description": "docs for root",
"properties": { "properties": {
"my_val": { "my_val": {
"type": "number", "description": "docs for my val",
"description": "docs for my val" "anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@ -1395,8 +1755,16 @@ func TestInterfaceGeneratesEmptySchema(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"apple": { "apple": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"mango": {} "mango": {}
}, },
"additionalProperties": false, "additionalProperties": false,
@ -1436,8 +1804,16 @@ func TestBundleReadOnlytag(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"apple": { "apple": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"pokemon": { "pokemon": {
"type": "object", "type": "object",
"properties": { "properties": {
@ -1488,8 +1864,16 @@ func TestBundleInternalTag(t *testing.T) {
"type": "object", "type": "object",
"properties": { "properties": {
"apple": { "apple": {
"anyOf": [
{
"type": "number" "type": "number"
}, },
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}"
}
]
},
"pokemon": { "pokemon": {
"type": "object", "type": "object",
"properties": { "properties": {

View File

@ -1,8 +1,11 @@
package config_tests package config_tests
import ( import (
"context"
"testing" "testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/libraries"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -10,3 +13,11 @@ func TestEnvironmentKeySupported(t *testing.T) {
_, diags := loadTargetWithDiags("./python_wheel/environment_key", "default") _, diags := loadTargetWithDiags("./python_wheel/environment_key", "default")
require.Empty(t, diags) require.Empty(t, diags)
} }
func TestEnvironmentKeyProvidedAndNoPanic(t *testing.T) {
b, diags := loadTargetWithDiags("./environment_key_only", "default")
require.Empty(t, diags)
diags = bundle.Apply(context.Background(), b, libraries.ValidateLocalLibrariesExist())
require.Empty(t, diags)
}

View File

@ -1,6 +1,8 @@
package config_tests package config_tests
import ( import (
"fmt"
"strings"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -9,12 +11,14 @@ import (
func TestGitAutoLoadWithEnvironment(t *testing.T) { func TestGitAutoLoadWithEnvironment(t *testing.T) {
b := load(t, "./environments_autoload_git") b := load(t, "./environments_autoload_git")
assert.True(t, b.Config.Bundle.Git.Inferred) assert.True(t, b.Config.Bundle.Git.Inferred)
assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
} }
func TestGitManuallySetBranchWithEnvironment(t *testing.T) { func TestGitManuallySetBranchWithEnvironment(t *testing.T) {
b := loadTarget(t, "./environments_autoload_git", "production") b := loadTarget(t, "./environments_autoload_git", "production")
assert.False(t, b.Config.Bundle.Git.Inferred) assert.False(t, b.Config.Bundle.Git.Inferred)
assert.Equal(t, "main", b.Config.Bundle.Git.Branch) assert.Equal(t, "main", b.Config.Bundle.Git.Branch)
assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
} }

View File

@ -0,0 +1,16 @@
bundle:
name: environment_key_only
resources:
jobs:
test_job:
name: "My Wheel Job"
tasks:
- task_key: TestTask
existing_cluster_id: "0717-132531-5opeqon1"
python_wheel_task:
package_name: "my_test_code"
entry_point: "run"
environment_key: "test_env"
environments:
- environment_key: "test_env"

View File

@ -2,6 +2,8 @@ package config_tests
import ( import (
"context" "context"
"fmt"
"strings"
"testing" "testing"
"github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle"
@ -13,14 +15,16 @@ import (
func TestGitAutoLoad(t *testing.T) { func TestGitAutoLoad(t *testing.T) {
b := load(t, "./autoload_git") b := load(t, "./autoload_git")
assert.True(t, b.Config.Bundle.Git.Inferred) assert.True(t, b.Config.Bundle.Git.Inferred)
assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
} }
func TestGitManuallySetBranch(t *testing.T) { func TestGitManuallySetBranch(t *testing.T) {
b := loadTarget(t, "./autoload_git", "production") b := loadTarget(t, "./autoload_git", "production")
assert.False(t, b.Config.Bundle.Git.Inferred) assert.False(t, b.Config.Bundle.Git.Inferred)
assert.Equal(t, "main", b.Config.Bundle.Git.Branch) assert.Equal(t, "main", b.Config.Bundle.Git.Branch)
assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks")
assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL))
} }
func TestGitBundleBranchValidation(t *testing.T) { func TestGitBundleBranchValidation(t *testing.T) {

View File

@ -2,3 +2,4 @@ resources:
jobs: jobs:
my_first_job: my_first_job:
id: 1 id: 1
name: "My First Job"

View File

@ -2,3 +2,4 @@ resources:
jobs: jobs:
my_second_job: my_second_job:
id: 2 id: 2
name: "My Second Job"

View File

@ -2,3 +2,4 @@ resources:
jobs: jobs:
my_job: my_job:
id: 1 id: 1
name: "My Job"

View File

@ -0,0 +1,40 @@
resources:
quality_monitors:
my_monitor:
table_name: "main.test.thing1"
assets_dir: "/Shared/provider-test/databricks_monitoring/main.test.thing1"
output_schema_name: "test"
inference_log:
granularities: ["1 day"]
timestamp_col: "timestamp"
prediction_col: "prediction"
model_id_col: "model_id"
problem_type: "PROBLEM_TYPE_REGRESSION"
targets:
development:
mode: development
resources:
quality_monitors:
my_monitor:
table_name: "main.test.dev"
staging:
resources:
quality_monitors:
my_monitor:
table_name: "main.test.staging"
output_schema_name: "staging"
production:
resources:
quality_monitors:
my_monitor:
table_name: "main.test.prod"
output_schema_name: "prod"
inference_log:
granularities: ["1 hour"]
timestamp_col: "timestamp_prod"
prediction_col: "prediction_prod"
model_id_col: "model_id_prod"
problem_type: "PROBLEM_TYPE_REGRESSION"

View File

@ -0,0 +1,59 @@
package config_tests
import (
"testing"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/stretchr/testify/assert"
)
func assertExpectedMonitor(t *testing.T, p *resources.QualityMonitor) {
assert.Equal(t, "timestamp", p.InferenceLog.TimestampCol)
assert.Equal(t, "prediction", p.InferenceLog.PredictionCol)
assert.Equal(t, "model_id", p.InferenceLog.ModelIdCol)
assert.Equal(t, catalog.MonitorInferenceLogProblemType("PROBLEM_TYPE_REGRESSION"), p.InferenceLog.ProblemType)
}
func TestMonitorTableNames(t *testing.T) {
b := loadTarget(t, "./quality_monitor", "development")
assert.Len(t, b.Config.Resources.QualityMonitors, 1)
assert.Equal(t, b.Config.Bundle.Mode, config.Development)
p := b.Config.Resources.QualityMonitors["my_monitor"]
assert.Equal(t, "main.test.dev", p.TableName)
assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir)
assert.Equal(t, "test", p.OutputSchemaName)
assertExpectedMonitor(t, p)
}
func TestMonitorStaging(t *testing.T) {
b := loadTarget(t, "./quality_monitor", "staging")
assert.Len(t, b.Config.Resources.QualityMonitors, 1)
p := b.Config.Resources.QualityMonitors["my_monitor"]
assert.Equal(t, "main.test.staging", p.TableName)
assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir)
assert.Equal(t, "staging", p.OutputSchemaName)
assertExpectedMonitor(t, p)
}
func TestMonitorProduction(t *testing.T) {
b := loadTarget(t, "./quality_monitor", "production")
assert.Len(t, b.Config.Resources.QualityMonitors, 1)
p := b.Config.Resources.QualityMonitors["my_monitor"]
assert.Equal(t, "main.test.prod", p.TableName)
assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir)
assert.Equal(t, "prod", p.OutputSchemaName)
inferenceLog := p.InferenceLog
assert.Equal(t, []string{"1 day", "1 hour"}, inferenceLog.Granularities)
assert.Equal(t, "timestamp_prod", p.InferenceLog.TimestampCol)
assert.Equal(t, "prediction_prod", p.InferenceLog.PredictionCol)
assert.Equal(t, "model_id_prod", p.InferenceLog.ModelIdCol)
assert.Equal(t, catalog.MonitorInferenceLogProblemType("PROBLEM_TYPE_REGRESSION"), p.InferenceLog.ProblemType)
}

View File

@ -0,0 +1,8 @@
bundle:
name: undefined-job
resources:
jobs:
undefined:
test:
name: "Test Job"

View File

@ -0,0 +1,12 @@
package config_tests
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestUndefinedJobLoadsWithError(t *testing.T) {
_, diags := loadTargetWithDiags("./undefined_job", "default")
assert.ErrorContains(t, diags.Error(), "job undefined is not defined")
}

View File

@ -0,0 +1,41 @@
bundle:
name: foobar
resources:
pipelines:
my_pipeline:
name: ${var.foo}
continuous: ${var.baz}
clusters:
- num_workers: ${var.bar}
variables:
foo:
default: "a_string"
description: "A string variable"
bar:
default: 42
description: "An integer variable"
baz:
default: true
description: "A boolean variable"
targets:
use-default-variable-values:
override-string-variable:
variables:
foo: "overridden_string"
override-int-variable:
variables:
bar: 43
override-both-bool-and-string-variables:
variables:
foo: "overridden_string"
baz: false

View File

@ -120,3 +120,52 @@ func TestVariablesWithTargetLookupOverrides(t *testing.T) {
assert.Equal(t, "cluster: some-test-cluster", b.Config.Variables["d"].Lookup.String()) assert.Equal(t, "cluster: some-test-cluster", b.Config.Variables["d"].Lookup.String())
assert.Equal(t, "instance-pool: some-test-instance-pool", b.Config.Variables["e"].Lookup.String()) assert.Equal(t, "instance-pool: some-test-instance-pool", b.Config.Variables["e"].Lookup.String())
} }
func TestVariableTargetOverrides(t *testing.T) {
var tcases = []struct {
targetName string
pipelineName string
pipelineContinuous bool
pipelineNumWorkers int
}{
{
"use-default-variable-values",
"a_string",
true,
42,
},
{
"override-string-variable",
"overridden_string",
true,
42,
},
{
"override-int-variable",
"a_string",
true,
43,
},
{
"override-both-bool-and-string-variables",
"overridden_string",
false,
42,
},
}
for _, tcase := range tcases {
t.Run(tcase.targetName, func(t *testing.T) {
b := loadTarget(t, "./variables/variable_overrides_in_target", tcase.targetName)
diags := bundle.Apply(context.Background(), b, bundle.Seq(
mutator.SetVariables(),
mutator.ResolveVariableReferences("variables")),
)
require.NoError(t, diags.Error())
assert.Equal(t, tcase.pipelineName, b.Config.Resources.Pipelines["my_pipeline"].Name)
assert.Equal(t, tcase.pipelineContinuous, b.Config.Resources.Pipelines["my_pipeline"].Continuous)
assert.Equal(t, tcase.pipelineNumWorkers, b.Config.Resources.Pipelines["my_pipeline"].Clusters[0].NumWorkers)
})
}
}

View File

@ -156,4 +156,4 @@ func newUpdate() *cobra.Command {
return cmd return cmd
} }
// end service CSPEnablementAccount // end service CspEnablementAccount

View File

@ -154,4 +154,4 @@ func newUpdate() *cobra.Command {
return cmd return cmd
} }
// end service ESMEnablementAccount // end service EsmEnablementAccount

View File

@ -10,7 +10,7 @@ import (
"net/url" "net/url"
"strings" "strings"
"github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/cli/libs/databrickscfg/profile"
"github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/config"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"gopkg.in/ini.v1" "gopkg.in/ini.v1"
@ -70,7 +70,7 @@ func resolveSection(cfg *config.Config, iniFile *config.File) (*ini.Section, err
} }
func loadFromDatabricksCfg(ctx context.Context, cfg *config.Config) error { func loadFromDatabricksCfg(ctx context.Context, cfg *config.Config) error {
iniFile, err := databrickscfg.Get(ctx) iniFile, err := profile.DefaultProfiler.Get(ctx)
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
// it's fine not to have ~/.databrickscfg // it's fine not to have ~/.databrickscfg
return nil return nil

View File

@ -11,6 +11,7 @@ import (
"github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/cli/libs/databrickscfg"
"github.com/databricks/cli/libs/databrickscfg/cfgpickers" "github.com/databricks/cli/libs/databrickscfg/cfgpickers"
"github.com/databricks/cli/libs/databrickscfg/profile"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/config"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -31,6 +32,7 @@ func configureHost(ctx context.Context, persistentAuth *auth.PersistentAuth, arg
} }
const minimalDbConnectVersion = "13.1" const minimalDbConnectVersion = "13.1"
const defaultTimeout = 1 * time.Hour
func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command {
defaultConfigPath := "~/.databrickscfg" defaultConfigPath := "~/.databrickscfg"
@ -84,7 +86,7 @@ depends on the existing profiles you have set in your configuration file
var loginTimeout time.Duration var loginTimeout time.Duration
var configureCluster bool var configureCluster bool
cmd.Flags().DurationVar(&loginTimeout, "timeout", auth.DefaultTimeout, cmd.Flags().DurationVar(&loginTimeout, "timeout", defaultTimeout,
"Timeout for completing login challenge in the browser") "Timeout for completing login challenge in the browser")
cmd.Flags().BoolVar(&configureCluster, "configure-cluster", false, cmd.Flags().BoolVar(&configureCluster, "configure-cluster", false,
"Prompts to configure cluster") "Prompts to configure cluster")
@ -108,7 +110,7 @@ depends on the existing profiles you have set in your configuration file
profileName = profile profileName = profile
} }
err := setHost(ctx, profileName, persistentAuth, args) err := setHostAndAccountId(ctx, profileName, persistentAuth, args)
if err != nil { if err != nil {
return err return err
} }
@ -118,16 +120,9 @@ depends on the existing profiles you have set in your configuration file
// Otherwise it will complain about non existing profile because it was not yet saved. // Otherwise it will complain about non existing profile because it was not yet saved.
cfg := config.Config{ cfg := config.Config{
Host: persistentAuth.Host, Host: persistentAuth.Host,
AccountID: persistentAuth.AccountID,
AuthType: "databricks-cli", AuthType: "databricks-cli",
} }
if cfg.IsAccountClient() && persistentAuth.AccountID == "" {
accountId, err := promptForAccountID(ctx)
if err != nil {
return err
}
persistentAuth.AccountID = accountId
}
cfg.AccountID = persistentAuth.AccountID
ctx, cancel := context.WithTimeout(ctx, loginTimeout) ctx, cancel := context.WithTimeout(ctx, loginTimeout)
defer cancel() defer cancel()
@ -172,15 +167,15 @@ depends on the existing profiles you have set in your configuration file
return cmd return cmd
} }
func setHost(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { func setHostAndAccountId(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error {
profiler := profile.GetProfiler(ctx)
// If the chosen profile has a hostname and the user hasn't specified a host, infer the host from the profile. // If the chosen profile has a hostname and the user hasn't specified a host, infer the host from the profile.
_, profiles, err := databrickscfg.LoadProfiles(ctx, func(p databrickscfg.Profile) bool { profiles, err := profiler.LoadProfiles(ctx, profile.WithName(profileName))
return p.Name == profileName
})
// Tolerate ErrNoConfiguration here, as we will write out a configuration as part of the login flow. // Tolerate ErrNoConfiguration here, as we will write out a configuration as part of the login flow.
if err != nil && !errors.Is(err, databrickscfg.ErrNoConfiguration) { if err != nil && !errors.Is(err, profile.ErrNoConfiguration) {
return err return err
} }
if persistentAuth.Host == "" { if persistentAuth.Host == "" {
if len(profiles) > 0 && profiles[0].Host != "" { if len(profiles) > 0 && profiles[0].Host != "" {
persistentAuth.Host = profiles[0].Host persistentAuth.Host = profiles[0].Host
@ -188,5 +183,17 @@ func setHost(ctx context.Context, profileName string, persistentAuth *auth.Persi
configureHost(ctx, persistentAuth, args, 0) configureHost(ctx, persistentAuth, args, 0)
} }
} }
isAccountClient := (&config.Config{Host: persistentAuth.Host}).IsAccountClient()
if isAccountClient && persistentAuth.AccountID == "" {
if len(profiles) > 0 && profiles[0].AccountID != "" {
persistentAuth.AccountID = profiles[0].AccountID
} else {
accountId, err := promptForAccountID(ctx)
if err != nil {
return err
}
persistentAuth.AccountID = accountId
}
}
return nil return nil
} }

View File

@ -12,6 +12,6 @@ import (
func TestSetHostDoesNotFailWithNoDatabrickscfg(t *testing.T) { func TestSetHostDoesNotFailWithNoDatabrickscfg(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./imaginary-file/databrickscfg") ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./imaginary-file/databrickscfg")
err := setHost(ctx, "foo", &auth.PersistentAuth{Host: "test"}, []string{}) err := setHostAndAccountId(ctx, "foo", &auth.PersistentAuth{Host: "test"}, []string{})
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@ -8,7 +8,7 @@ import (
"time" "time"
"github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/cli/libs/databrickscfg/profile"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/config"
@ -94,7 +94,7 @@ func newProfilesCommand() *cobra.Command {
cmd.RunE = func(cmd *cobra.Command, args []string) error { cmd.RunE = func(cmd *cobra.Command, args []string) error {
var profiles []*profileMetadata var profiles []*profileMetadata
iniFile, err := databrickscfg.Get(cmd.Context()) iniFile, err := profile.DefaultProfiler.Get(cmd.Context())
if os.IsNotExist(err) { if os.IsNotExist(err) {
// return empty list for non-configured machines // return empty list for non-configured machines
iniFile = &config.File{ iniFile = &config.File{

Some files were not shown because too many files have changed in this diff Show More