mirror of https://github.com/databricks/cli.git
Compare commits
5 Commits
02729ff159
...
5c7e3aefec
Author | SHA1 | Date |
---|---|---|
Lennart Kats (databricks) | 5c7e3aefec | |
Andrew Nester | 7f3fb10c4a | |
Pieter Noordhuis | 1db384018c | |
Lennart Kats | c4301b7a44 | |
Lennart Kats | a43bcc4c63 |
|
@ -65,9 +65,8 @@ func TestInitializeURLs(t *testing.T) {
|
||||||
},
|
},
|
||||||
QualityMonitors: map[string]*resources.QualityMonitor{
|
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||||
"qualityMonitor1": {
|
"qualityMonitor1": {
|
||||||
CreateMonitor: &catalog.CreateMonitor{
|
|
||||||
TableName: "catalog.schema.qualityMonitor1",
|
TableName: "catalog.schema.qualityMonitor1",
|
||||||
},
|
CreateMonitor: &catalog.CreateMonitor{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Schemas: map[string]*resources.Schema{
|
Schemas: map[string]*resources.Schema{
|
||||||
|
|
|
@ -38,18 +38,27 @@ func overrideJobCompute(j *resources.Job, compute string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Bundle.Mode != config.Development {
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
|
if b.Config.Bundle.Mode == config.Production {
|
||||||
if b.Config.Bundle.ClusterId != "" {
|
if b.Config.Bundle.ClusterId != "" {
|
||||||
return diag.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
// Overriding compute via a command-line flag for production works, but is not recommended.
|
||||||
|
diags = diags.Extend(diag.Warningf("overriding compute for a target that uses 'mode: production' is not recommended"))
|
||||||
|
}
|
||||||
|
if env.Get(ctx, "DATABRICKS_CLUSTER_ID") != "" {
|
||||||
|
// The DATABRICKS_CLUSTER_ID may be set by accident when doing a production deploy.
|
||||||
|
// Overriding the cluster in production is almost always a mistake since customers
|
||||||
|
// want consistency in production and not compute that is different each deploy.
|
||||||
|
// For this reason we log a warning and ignore the environment variable.
|
||||||
|
return diag.Warningf("the DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target uses 'mode: production'")
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
||||||
b.Config.Bundle.ClusterId = v
|
b.Config.Bundle.ClusterId = v
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.ClusterId == "" {
|
if b.Config.Bundle.ClusterId == "" {
|
||||||
return nil
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
r := b.Config.Resources
|
r := b.Config.Resources
|
||||||
|
@ -57,5 +66,5 @@ func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diag
|
||||||
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId)
|
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return diags
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOverrideDevelopment(t *testing.T) {
|
func TestOverrideComputeModeDevelopment(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
@ -62,10 +62,13 @@ func TestOverrideDevelopment(t *testing.T) {
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideDevelopmentEnv(t *testing.T) {
|
func TestOverrideComputeModeDefault(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Mode: "",
|
||||||
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job1": {JobSettings: &jobs.JobSettings{
|
"job1": {JobSettings: &jobs.JobSettings{
|
||||||
|
@ -86,11 +89,12 @@ func TestOverrideDevelopmentEnv(t *testing.T) {
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
diags := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, diags.Error())
|
require.Empty(t, diags)
|
||||||
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
assert.Equal(t, "newClusterId", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
|
assert.Equal(t, "newClusterId", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverridePipelineTask(t *testing.T) {
|
func TestOverrideComputePipelineTask(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
@ -115,7 +119,7 @@ func TestOverridePipelineTask(t *testing.T) {
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideForEachTask(t *testing.T) {
|
func TestOverrideComputeForEachTask(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
@ -140,10 +144,11 @@ func TestOverrideForEachTask(t *testing.T) {
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideProduction(t *testing.T) {
|
func TestOverrideComputeModeProduction(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
|
Mode: config.Production,
|
||||||
ClusterId: "newClusterID",
|
ClusterId: "newClusterID",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
|
@ -166,13 +171,18 @@ func TestOverrideProduction(t *testing.T) {
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
diags := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.True(t, diags.HasError())
|
require.Len(t, diags, 1)
|
||||||
|
assert.Equal(t, "overriding compute for a target that uses 'mode: production' is not recommended", diags[0].Summary)
|
||||||
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideProductionEnv(t *testing.T) {
|
func TestOverrideComputeModeProductionIgnoresVariable(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Mode: config.Production,
|
||||||
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job1": {JobSettings: &jobs.JobSettings{
|
"job1": {JobSettings: &jobs.JobSettings{
|
||||||
|
@ -193,5 +203,7 @@ func TestOverrideProductionEnv(t *testing.T) {
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
diags := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, diags.Error())
|
require.Len(t, diags, 1)
|
||||||
|
assert.Equal(t, "the DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target uses 'mode: production'", diags[0].Summary)
|
||||||
|
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,6 +44,11 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
|
||||||
return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind())
|
return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Skip prefixing if the path does not start with /, it might be variable reference or smth else.
|
||||||
|
if !strings.HasPrefix(path, "/") {
|
||||||
|
return pv, nil
|
||||||
|
}
|
||||||
|
|
||||||
for _, prefix := range skipPrefixes {
|
for _, prefix := range skipPrefixes {
|
||||||
if strings.HasPrefix(path, prefix) {
|
if strings.HasPrefix(path, prefix) {
|
||||||
return pv, nil
|
return pv, nil
|
||||||
|
|
|
@ -31,6 +31,14 @@ func TestPrependWorkspacePrefix(t *testing.T) {
|
||||||
path: "/Volumes/Users/test",
|
path: "/Volumes/Users/test",
|
||||||
expected: "/Volumes/Users/test",
|
expected: "/Volumes/Users/test",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
path: "~/test",
|
||||||
|
expected: "~/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: "${workspace.file_path}/test",
|
||||||
|
expected: "${workspace.file_path}/test",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
|
|
|
@ -102,16 +102,23 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
||||||
"registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}},
|
"registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}},
|
||||||
},
|
},
|
||||||
QualityMonitors: map[string]*resources.QualityMonitor{
|
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||||
"qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}},
|
"qualityMonitor1": {
|
||||||
"qualityMonitor2": {
|
TableName: "qualityMonitor1",
|
||||||
CreateMonitor: &catalog.CreateMonitor{
|
CreateMonitor: &catalog.CreateMonitor{
|
||||||
|
OutputSchemaName: "catalog.schema",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"qualityMonitor2": {
|
||||||
TableName: "qualityMonitor2",
|
TableName: "qualityMonitor2",
|
||||||
|
CreateMonitor: &catalog.CreateMonitor{
|
||||||
|
OutputSchemaName: "catalog.schema",
|
||||||
Schedule: &catalog.MonitorCronSchedule{},
|
Schedule: &catalog.MonitorCronSchedule{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"qualityMonitor3": {
|
"qualityMonitor3": {
|
||||||
CreateMonitor: &catalog.CreateMonitor{
|
|
||||||
TableName: "qualityMonitor3",
|
TableName: "qualityMonitor3",
|
||||||
|
CreateMonitor: &catalog.CreateMonitor{
|
||||||
|
OutputSchemaName: "catalog.schema",
|
||||||
Schedule: &catalog.MonitorCronSchedule{
|
Schedule: &catalog.MonitorCronSchedule{
|
||||||
PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused,
|
PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused,
|
||||||
},
|
},
|
||||||
|
|
|
@ -13,17 +13,15 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type QualityMonitor struct {
|
type QualityMonitor struct {
|
||||||
// Represents the Input Arguments for Terraform and will get
|
|
||||||
// converted to a HCL representation for CRUD
|
|
||||||
*catalog.CreateMonitor
|
|
||||||
|
|
||||||
// This represents the id which is the full name of the monitor
|
|
||||||
// (catalog_name.schema_name.table_name) that can be used
|
|
||||||
// as a reference in other resources. This value is returned by terraform.
|
|
||||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
URL string `json:"url,omitempty" bundle:"internal"`
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
|
|
||||||
|
// The table name is a required field but not included as a JSON field in [catalog.CreateMonitor].
|
||||||
|
TableName string `json:"table_name"`
|
||||||
|
|
||||||
|
// This struct defines the creation payload for a monitor.
|
||||||
|
*catalog.CreateMonitor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
|
func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
|
||||||
|
|
|
@ -15,8 +15,8 @@ import (
|
||||||
|
|
||||||
func TestConvertQualityMonitor(t *testing.T) {
|
func TestConvertQualityMonitor(t *testing.T) {
|
||||||
var src = resources.QualityMonitor{
|
var src = resources.QualityMonitor{
|
||||||
CreateMonitor: &catalog.CreateMonitor{
|
|
||||||
TableName: "test_table_name",
|
TableName: "test_table_name",
|
||||||
|
CreateMonitor: &catalog.CreateMonitor{
|
||||||
AssetsDir: "assets_dir",
|
AssetsDir: "assets_dir",
|
||||||
OutputSchemaName: "output_schema_name",
|
OutputSchemaName: "output_schema_name",
|
||||||
InferenceLog: &catalog.MonitorInferenceLog{
|
InferenceLog: &catalog.MonitorInferenceLog{
|
||||||
|
|
|
@ -4,6 +4,7 @@ bundle:
|
||||||
resources:
|
resources:
|
||||||
quality_monitors:
|
quality_monitors:
|
||||||
myqualitymonitor:
|
myqualitymonitor:
|
||||||
|
table_name: catalog.schema.quality_monitor
|
||||||
inference_log:
|
inference_log:
|
||||||
granularities:
|
granularities:
|
||||||
- a
|
- a
|
||||||
|
|
|
@ -684,6 +684,9 @@
|
||||||
"description": "Configuration for monitoring snapshot tables.",
|
"description": "Configuration for monitoring snapshot tables.",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot"
|
||||||
},
|
},
|
||||||
|
"table_name": {
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"time_series": {
|
"time_series": {
|
||||||
"description": "Configuration for monitoring time series tables.",
|
"description": "Configuration for monitoring time series tables.",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries"
|
||||||
|
@ -695,6 +698,7 @@
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
"required": [
|
"required": [
|
||||||
|
"table_name",
|
||||||
"assets_dir",
|
"assets_dir",
|
||||||
"output_schema_name"
|
"output_schema_name"
|
||||||
]
|
]
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/cli/libs/textutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// structInfo holds the type information we need to efficiently
|
// structInfo holds the type information we need to efficiently
|
||||||
|
@ -85,14 +84,6 @@ func buildStructInfo(typ reflect.Type) structInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
name, _, _ := strings.Cut(sf.Tag.Get("json"), ",")
|
name, _, _ := strings.Cut(sf.Tag.Get("json"), ",")
|
||||||
if typ.Name() == "QualityMonitor" && name == "-" {
|
|
||||||
urlName, _, _ := strings.Cut(sf.Tag.Get("url"), ",")
|
|
||||||
if urlName == "" || urlName == "-" {
|
|
||||||
name = textutil.CamelToSnakeCase(sf.Name)
|
|
||||||
} else {
|
|
||||||
name = urlName
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if name == "" || name == "-" {
|
if name == "" || name == "-" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue