From ce9c9148c96449a57b476659311c33bb843adc67 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 3 Aug 2023 13:20:30 +0200 Subject: [PATCH 001/310] Regenerate bundle resource structs from latest terraform provider (#633) ## Changes This PR: 1. Regenerates the terraform provider structs based off the latest terraform provider version: 1.22.0 2. Adds a debug launch configuration for regenerating the schema ## Tests Existing unit tests --- bundle/internal/tf/codegen/.gitignore | 1 + bundle/internal/tf/schema/data_source_job.go | 131 +++++++++++------ .../tf/schema/data_source_metastore.go | 30 ++++ .../tf/schema/data_source_metastores.go | 8 ++ .../tf/schema/data_source_sql_warehouse.go | 2 +- bundle/internal/tf/schema/data_sources.go | 4 + .../resource_access_control_rule_set.go | 15 ++ bundle/internal/tf/schema/resource_group.go | 1 + bundle/internal/tf/schema/resource_job.go | 132 ++++++++++++------ .../tf/schema/resource_service_principal.go | 1 + bundle/internal/tf/schema/resource_user.go | 1 + bundle/internal/tf/schema/resources.go | 2 + 12 files changed, 241 insertions(+), 87 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_metastore.go create mode 100644 bundle/internal/tf/schema/data_source_metastores.go create mode 100644 bundle/internal/tf/schema/resource_access_control_rule_set.go diff --git a/bundle/internal/tf/codegen/.gitignore b/bundle/internal/tf/codegen/.gitignore index d59e6e95..72f05fc4 100644 --- a/bundle/internal/tf/codegen/.gitignore +++ b/bundle/internal/tf/codegen/.gitignore @@ -1,2 +1,3 @@ /codegen /tmp +/.vscode diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index a633bd3a..6d2d1aa9 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -25,19 +25,37 @@ type DataSourceJobJobSettingsSettingsDbtTask struct { } type DataSourceJobJobSettingsSettingsEmailNotifications struct { - AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` - NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` - OnFailure []string `json:"on_failure,omitempty"` - OnStart []string `json:"on_start,omitempty"` - OnSuccess []string `json:"on_success,omitempty"` + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []string `json:"on_failure,omitempty"` + OnStart []string `json:"on_start,omitempty"` + OnSuccess []string `json:"on_success,omitempty"` +} + +type DataSourceJobJobSettingsSettingsGitSourceJobSource struct { + DirtyState string `json:"dirty_state,omitempty"` + ImportFromGitBranch string `json:"import_from_git_branch"` + JobConfigPath string `json:"job_config_path"` } type DataSourceJobJobSettingsSettingsGitSource struct { - Branch string `json:"branch,omitempty"` - Commit string `json:"commit,omitempty"` - Provider string `json:"provider,omitempty"` - Tag string `json:"tag,omitempty"` - Url string `json:"url"` + Branch string `json:"branch,omitempty"` + Commit string `json:"commit,omitempty"` + Provider string `json:"provider,omitempty"` + Tag string `json:"tag,omitempty"` + Url string `json:"url"` + JobSource *DataSourceJobJobSettingsSettingsGitSourceJobSource `json:"job_source,omitempty"` +} + +type DataSourceJobJobSettingsSettingsHealthRules struct { + Metric string `json:"metric,omitempty"` + Op string `json:"op,omitempty"` + Value int `json:"value,omitempty"` +} + +type DataSourceJobJobSettingsSettingsHealth struct { + Rules []DataSourceJobJobSettingsSettingsHealthRules `json:"rules,omitempty"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterAutoscale struct { @@ -384,7 +402,8 @@ type DataSourceJobJobSettingsSettingsNotificationSettings struct { } type DataSourceJobJobSettingsSettingsPipelineTask struct { - PipelineId string `json:"pipeline_id"` + FullRefresh bool `json:"full_refresh,omitempty"` + PipelineId string `json:"pipeline_id"` } type DataSourceJobJobSettingsSettingsPythonWheelTask struct { @@ -445,11 +464,22 @@ type DataSourceJobJobSettingsSettingsTaskDependsOn struct { } type DataSourceJobJobSettingsSettingsTaskEmailNotifications struct { - AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` - NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` - OnFailure []string `json:"on_failure,omitempty"` - OnStart []string `json:"on_start,omitempty"` - OnSuccess []string `json:"on_success,omitempty"` + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []string `json:"on_failure,omitempty"` + OnStart []string `json:"on_start,omitempty"` + OnSuccess []string `json:"on_success,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskHealthRules struct { + Metric string `json:"metric,omitempty"` + Op string `json:"op,omitempty"` + Value int `json:"value,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskHealth struct { + Rules []DataSourceJobJobSettingsSettingsTaskHealthRules `json:"rules,omitempty"` } type DataSourceJobJobSettingsSettingsTaskLibraryCran struct { @@ -634,8 +664,15 @@ type DataSourceJobJobSettingsSettingsTaskNotebookTask struct { Source string `json:"source,omitempty"` } +type DataSourceJobJobSettingsSettingsTaskNotificationSettings struct { + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` +} + type DataSourceJobJobSettingsSettingsTaskPipelineTask struct { - PipelineId string `json:"pipeline_id"` + FullRefresh bool `json:"full_refresh,omitempty"` + PipelineId string `json:"pipeline_id"` } type DataSourceJobJobSettingsSettingsTaskPythonWheelTask struct { @@ -702,29 +739,31 @@ type DataSourceJobJobSettingsSettingsTaskSqlTask struct { } type DataSourceJobJobSettingsSettingsTask struct { - ComputeKey string `json:"compute_key,omitempty"` - Description string `json:"description,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key,omitempty"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *DataSourceJobJobSettingsSettingsTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []DataSourceJobJobSettingsSettingsTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *DataSourceJobJobSettingsSettingsTaskEmailNotifications `json:"email_notifications,omitempty"` - Library []DataSourceJobJobSettingsSettingsTaskLibrary `json:"library,omitempty"` - NewCluster *DataSourceJobJobSettingsSettingsTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *DataSourceJobJobSettingsSettingsTaskNotebookTask `json:"notebook_task,omitempty"` - PipelineTask *DataSourceJobJobSettingsSettingsTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *DataSourceJobJobSettingsSettingsTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - SparkJarTask *DataSourceJobJobSettingsSettingsTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *DataSourceJobJobSettingsSettingsTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *DataSourceJobJobSettingsSettingsTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *DataSourceJobJobSettingsSettingsTaskSqlTask `json:"sql_task,omitempty"` + ComputeKey string `json:"compute_key,omitempty"` + Description string `json:"description,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key,omitempty"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + ConditionTask *DataSourceJobJobSettingsSettingsTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []DataSourceJobJobSettingsSettingsTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *DataSourceJobJobSettingsSettingsTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *DataSourceJobJobSettingsSettingsTaskHealth `json:"health,omitempty"` + Library []DataSourceJobJobSettingsSettingsTaskLibrary `json:"library,omitempty"` + NewCluster *DataSourceJobJobSettingsSettingsTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *DataSourceJobJobSettingsSettingsTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *DataSourceJobJobSettingsSettingsTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *DataSourceJobJobSettingsSettingsTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *DataSourceJobJobSettingsSettingsTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + SparkJarTask *DataSourceJobJobSettingsSettingsTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *DataSourceJobJobSettingsSettingsTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *DataSourceJobJobSettingsSettingsTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *DataSourceJobJobSettingsSettingsTaskSqlTask `json:"sql_task,omitempty"` } type DataSourceJobJobSettingsSettingsTriggerFileArrival struct { @@ -738,6 +777,10 @@ type DataSourceJobJobSettingsSettingsTrigger struct { FileArrival *DataSourceJobJobSettingsSettingsTriggerFileArrival `json:"file_arrival,omitempty"` } +type DataSourceJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded struct { + Id string `json:"id"` +} + type DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure struct { Id string `json:"id"` } @@ -751,9 +794,10 @@ type DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess struct { } type DataSourceJobJobSettingsSettingsWebhookNotifications struct { - OnFailure []DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure `json:"on_failure,omitempty"` - OnStart []DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart `json:"on_start,omitempty"` - OnSuccess []DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess `json:"on_success,omitempty"` + OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure `json:"on_failure,omitempty"` + OnStart []DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnSuccess []DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } type DataSourceJobJobSettingsSettings struct { @@ -771,6 +815,7 @@ type DataSourceJobJobSettingsSettings struct { DbtTask *DataSourceJobJobSettingsSettingsDbtTask `json:"dbt_task,omitempty"` EmailNotifications *DataSourceJobJobSettingsSettingsEmailNotifications `json:"email_notifications,omitempty"` GitSource *DataSourceJobJobSettingsSettingsGitSource `json:"git_source,omitempty"` + Health *DataSourceJobJobSettingsSettingsHealth `json:"health,omitempty"` JobCluster []DataSourceJobJobSettingsSettingsJobCluster `json:"job_cluster,omitempty"` Library []DataSourceJobJobSettingsSettingsLibrary `json:"library,omitempty"` NewCluster *DataSourceJobJobSettingsSettingsNewCluster `json:"new_cluster,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_metastore.go b/bundle/internal/tf/schema/data_source_metastore.go new file mode 100644 index 00000000..dd14be81 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_metastore.go @@ -0,0 +1,30 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceMetastoreMetastoreInfo struct { + Cloud string `json:"cloud,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + DefaultDataAccessConfigId string `json:"default_data_access_config_id,omitempty"` + DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"` + DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` + DeltaSharingScope string `json:"delta_sharing_scope,omitempty"` + GlobalMetastoreId string `json:"global_metastore_id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + PrivilegeModelVersion string `json:"privilege_model_version,omitempty"` + Region string `json:"region,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` + StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"` + StorageRootCredentialName string `json:"storage_root_credential_name,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` +} + +type DataSourceMetastore struct { + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id"` + MetastoreInfo *DataSourceMetastoreMetastoreInfo `json:"metastore_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_metastores.go b/bundle/internal/tf/schema/data_source_metastores.go new file mode 100644 index 00000000..c2b6854e --- /dev/null +++ b/bundle/internal/tf/schema/data_source_metastores.go @@ -0,0 +1,8 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceMetastores struct { + Id string `json:"id,omitempty"` + Ids map[string]string `json:"ids,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_sql_warehouse.go b/bundle/internal/tf/schema/data_source_sql_warehouse.go index f90cc9dd..218591d0 100644 --- a/bundle/internal/tf/schema/data_source_sql_warehouse.go +++ b/bundle/internal/tf/schema/data_source_sql_warehouse.go @@ -29,7 +29,7 @@ type DataSourceSqlWarehouse struct { DataSourceId string `json:"data_source_id,omitempty"` EnablePhoton bool `json:"enable_photon,omitempty"` EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` - Id string `json:"id"` + Id string `json:"id,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` JdbcUrl string `json:"jdbc_url,omitempty"` MaxNumClusters int `json:"max_num_clusters,omitempty"` diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 6fbcf680..79658298 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -18,6 +18,8 @@ type DataSources struct { InstancePool map[string]*DataSourceInstancePool `json:"databricks_instance_pool,omitempty"` Job map[string]*DataSourceJob `json:"databricks_job,omitempty"` Jobs map[string]*DataSourceJobs `json:"databricks_jobs,omitempty"` + Metastore map[string]*DataSourceMetastore `json:"databricks_metastore,omitempty"` + Metastores map[string]*DataSourceMetastores `json:"databricks_metastores,omitempty"` MwsCredentials map[string]*DataSourceMwsCredentials `json:"databricks_mws_credentials,omitempty"` MwsWorkspaces map[string]*DataSourceMwsWorkspaces `json:"databricks_mws_workspaces,omitempty"` NodeType map[string]*DataSourceNodeType `json:"databricks_node_type,omitempty"` @@ -55,6 +57,8 @@ func NewDataSources() *DataSources { InstancePool: make(map[string]*DataSourceInstancePool), Job: make(map[string]*DataSourceJob), Jobs: make(map[string]*DataSourceJobs), + Metastore: make(map[string]*DataSourceMetastore), + Metastores: make(map[string]*DataSourceMetastores), MwsCredentials: make(map[string]*DataSourceMwsCredentials), MwsWorkspaces: make(map[string]*DataSourceMwsWorkspaces), NodeType: make(map[string]*DataSourceNodeType), diff --git a/bundle/internal/tf/schema/resource_access_control_rule_set.go b/bundle/internal/tf/schema/resource_access_control_rule_set.go new file mode 100644 index 00000000..775c0708 --- /dev/null +++ b/bundle/internal/tf/schema/resource_access_control_rule_set.go @@ -0,0 +1,15 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceAccessControlRuleSetGrantRules struct { + Principals []string `json:"principals,omitempty"` + Role string `json:"role"` +} + +type ResourceAccessControlRuleSet struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + GrantRules []ResourceAccessControlRuleSetGrantRules `json:"grant_rules,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_group.go b/bundle/internal/tf/schema/resource_group.go index 252d2087..7d7860f5 100644 --- a/bundle/internal/tf/schema/resource_group.go +++ b/bundle/internal/tf/schema/resource_group.go @@ -3,6 +3,7 @@ package schema type ResourceGroup struct { + AclPrincipalId string `json:"acl_principal_id,omitempty"` AllowClusterCreate bool `json:"allow_cluster_create,omitempty"` AllowInstancePoolCreate bool `json:"allow_instance_pool_create,omitempty"` DatabricksSqlAccess bool `json:"databricks_sql_access,omitempty"` diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index e3137ea1..77b681ee 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -25,19 +25,37 @@ type ResourceJobDbtTask struct { } type ResourceJobEmailNotifications struct { - AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` - NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` - OnFailure []string `json:"on_failure,omitempty"` - OnStart []string `json:"on_start,omitempty"` - OnSuccess []string `json:"on_success,omitempty"` + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []string `json:"on_failure,omitempty"` + OnStart []string `json:"on_start,omitempty"` + OnSuccess []string `json:"on_success,omitempty"` +} + +type ResourceJobGitSourceJobSource struct { + DirtyState string `json:"dirty_state,omitempty"` + ImportFromGitBranch string `json:"import_from_git_branch"` + JobConfigPath string `json:"job_config_path"` } type ResourceJobGitSource struct { - Branch string `json:"branch,omitempty"` - Commit string `json:"commit,omitempty"` - Provider string `json:"provider,omitempty"` - Tag string `json:"tag,omitempty"` - Url string `json:"url"` + Branch string `json:"branch,omitempty"` + Commit string `json:"commit,omitempty"` + Provider string `json:"provider,omitempty"` + Tag string `json:"tag,omitempty"` + Url string `json:"url"` + JobSource *ResourceJobGitSourceJobSource `json:"job_source,omitempty"` +} + +type ResourceJobHealthRules struct { + Metric string `json:"metric,omitempty"` + Op string `json:"op,omitempty"` + Value int `json:"value,omitempty"` +} + +type ResourceJobHealth struct { + Rules []ResourceJobHealthRules `json:"rules,omitempty"` } type ResourceJobJobClusterNewClusterAutoscale struct { @@ -384,7 +402,8 @@ type ResourceJobNotificationSettings struct { } type ResourceJobPipelineTask struct { - PipelineId string `json:"pipeline_id"` + FullRefresh bool `json:"full_refresh,omitempty"` + PipelineId string `json:"pipeline_id"` } type ResourceJobPythonWheelTask struct { @@ -445,11 +464,22 @@ type ResourceJobTaskDependsOn struct { } type ResourceJobTaskEmailNotifications struct { - AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` - NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` - OnFailure []string `json:"on_failure,omitempty"` - OnStart []string `json:"on_start,omitempty"` - OnSuccess []string `json:"on_success,omitempty"` + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []string `json:"on_failure,omitempty"` + OnStart []string `json:"on_start,omitempty"` + OnSuccess []string `json:"on_success,omitempty"` +} + +type ResourceJobTaskHealthRules struct { + Metric string `json:"metric,omitempty"` + Op string `json:"op,omitempty"` + Value int `json:"value,omitempty"` +} + +type ResourceJobTaskHealth struct { + Rules []ResourceJobTaskHealthRules `json:"rules,omitempty"` } type ResourceJobTaskLibraryCran struct { @@ -634,8 +664,15 @@ type ResourceJobTaskNotebookTask struct { Source string `json:"source,omitempty"` } +type ResourceJobTaskNotificationSettings struct { + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` +} + type ResourceJobTaskPipelineTask struct { - PipelineId string `json:"pipeline_id"` + FullRefresh bool `json:"full_refresh,omitempty"` + PipelineId string `json:"pipeline_id"` } type ResourceJobTaskPythonWheelTask struct { @@ -702,29 +739,31 @@ type ResourceJobTaskSqlTask struct { } type ResourceJobTask struct { - ComputeKey string `json:"compute_key,omitempty"` - Description string `json:"description,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key,omitempty"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` - Library []ResourceJobTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` - PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` + ComputeKey string `json:"compute_key,omitempty"` + Description string `json:"description,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key,omitempty"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *ResourceJobTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` } type ResourceJobTriggerFileArrival struct { @@ -738,6 +777,10 @@ type ResourceJobTrigger struct { FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"` } +type ResourceJobWebhookNotificationsOnDurationWarningThresholdExceeded struct { + Id string `json:"id"` +} + type ResourceJobWebhookNotificationsOnFailure struct { Id string `json:"id"` } @@ -751,13 +794,15 @@ type ResourceJobWebhookNotificationsOnSuccess struct { } type ResourceJobWebhookNotifications struct { - OnFailure []ResourceJobWebhookNotificationsOnFailure `json:"on_failure,omitempty"` - OnStart []ResourceJobWebhookNotificationsOnStart `json:"on_start,omitempty"` - OnSuccess []ResourceJobWebhookNotificationsOnSuccess `json:"on_success,omitempty"` + OnDurationWarningThresholdExceeded []ResourceJobWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []ResourceJobWebhookNotificationsOnFailure `json:"on_failure,omitempty"` + OnStart []ResourceJobWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnSuccess []ResourceJobWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } type ResourceJob struct { AlwaysRunning bool `json:"always_running,omitempty"` + ControlRunState bool `json:"control_run_state,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"` Format string `json:"format,omitempty"` Id string `json:"id,omitempty"` @@ -774,6 +819,7 @@ type ResourceJob struct { DbtTask *ResourceJobDbtTask `json:"dbt_task,omitempty"` EmailNotifications *ResourceJobEmailNotifications `json:"email_notifications,omitempty"` GitSource *ResourceJobGitSource `json:"git_source,omitempty"` + Health *ResourceJobHealth `json:"health,omitempty"` JobCluster []ResourceJobJobCluster `json:"job_cluster,omitempty"` Library []ResourceJobLibrary `json:"library,omitempty"` NewCluster *ResourceJobNewCluster `json:"new_cluster,omitempty"` diff --git a/bundle/internal/tf/schema/resource_service_principal.go b/bundle/internal/tf/schema/resource_service_principal.go index bdbce227..5e9943a1 100644 --- a/bundle/internal/tf/schema/resource_service_principal.go +++ b/bundle/internal/tf/schema/resource_service_principal.go @@ -3,6 +3,7 @@ package schema type ResourceServicePrincipal struct { + AclPrincipalId string `json:"acl_principal_id,omitempty"` Active bool `json:"active,omitempty"` AllowClusterCreate bool `json:"allow_cluster_create,omitempty"` AllowInstancePoolCreate bool `json:"allow_instance_pool_create,omitempty"` diff --git a/bundle/internal/tf/schema/resource_user.go b/bundle/internal/tf/schema/resource_user.go index b9644093..2fe57b8b 100644 --- a/bundle/internal/tf/schema/resource_user.go +++ b/bundle/internal/tf/schema/resource_user.go @@ -3,6 +3,7 @@ package schema type ResourceUser struct { + AclPrincipalId string `json:"acl_principal_id,omitempty"` Active bool `json:"active,omitempty"` AllowClusterCreate bool `json:"allow_cluster_create,omitempty"` AllowInstancePoolCreate bool `json:"allow_instance_pool_create,omitempty"` diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 7a0c2eb8..c2361254 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -3,6 +3,7 @@ package schema type Resources struct { + AccessControlRuleSet map[string]*ResourceAccessControlRuleSet `json:"databricks_access_control_rule_set,omitempty"` AwsS3Mount map[string]*ResourceAwsS3Mount `json:"databricks_aws_s3_mount,omitempty"` AzureAdlsGen1Mount map[string]*ResourceAzureAdlsGen1Mount `json:"databricks_azure_adls_gen1_mount,omitempty"` AzureAdlsGen2Mount map[string]*ResourceAzureAdlsGen2Mount `json:"databricks_azure_adls_gen2_mount,omitempty"` @@ -82,6 +83,7 @@ type Resources struct { func NewResources() *Resources { return &Resources{ + AccessControlRuleSet: make(map[string]*ResourceAccessControlRuleSet), AwsS3Mount: make(map[string]*ResourceAwsS3Mount), AzureAdlsGen1Mount: make(map[string]*ResourceAzureAdlsGen1Mount), AzureAdlsGen2Mount: make(map[string]*ResourceAzureAdlsGen2Mount), From f7a76ff5d8677d5567fd61dcaa08485b2ad4fde4 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 7 Aug 2023 11:55:30 +0200 Subject: [PATCH 002/310] Fixed processing jobs libraries with remote path (#638) ## Changes Some library paths such as for Spark jobs, can reference a lib on remote path, for example DBFS. This PR fixes how CLI handles such libraries and do not report them as missing locally. ## Tests Added unit tests + ran `databricks bundle deploy` manually --- bundle/libraries/libraries.go | 18 ++++++++++++++- bundle/tests/bundle/python_wheel/bundle.yml | 2 ++ .../bundle/python_wheel_dbfs_lib/bundle.yml | 15 +++++++++++++ .../python_wheel_no_artifact/bundle.yml | 2 ++ bundle/tests/bundle/wheel_test.go | 22 +++++++++++++++++++ 5 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index f7a2574a..8ccf3fc7 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path/filepath" + "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" @@ -111,5 +112,20 @@ func libPath(library *compute.Library) string { } func isLocalLibrary(library *compute.Library) bool { - return libPath(library) != "" + path := libPath(library) + if path == "" { + return false + } + + return !isDbfsPath(path) && !isWorkspacePath(path) +} + +func isDbfsPath(path string) bool { + return strings.HasPrefix(path, "dbfs:/") +} + +func isWorkspacePath(path string) bool { + return strings.HasPrefix(path, "/Workspace/") || + strings.HasPrefix(path, "/Users/") || + strings.HasPrefix(path, "/Shared/") } diff --git a/bundle/tests/bundle/python_wheel/bundle.yml b/bundle/tests/bundle/python_wheel/bundle.yml index 4e272c9f..c82ff83f 100644 --- a/bundle/tests/bundle/python_wheel/bundle.yml +++ b/bundle/tests/bundle/python_wheel/bundle.yml @@ -17,3 +17,5 @@ resources: python_wheel_task: package_name: "my_test_code" entry_point: "run" + libraries: + - whl: ./my_test_code/dist/*.whl diff --git a/bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml b/bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml new file mode 100644 index 00000000..54577d65 --- /dev/null +++ b/bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml @@ -0,0 +1,15 @@ +bundle: + name: python-wheel + +resources: + jobs: + test_job: + name: "[${bundle.environment}] My Wheel Job" + tasks: + - task_key: TestTask + existing_cluster_id: "0717-132531-5opeqon1" + python_wheel_task: + package_name: "my_test_code" + entry_point: "run" + libraries: + - whl: dbfs://path/to/dist/mywheel.whl diff --git a/bundle/tests/bundle/python_wheel_no_artifact/bundle.yml b/bundle/tests/bundle/python_wheel_no_artifact/bundle.yml index 10908672..88cb47be 100644 --- a/bundle/tests/bundle/python_wheel_no_artifact/bundle.yml +++ b/bundle/tests/bundle/python_wheel_no_artifact/bundle.yml @@ -11,3 +11,5 @@ resources: python_wheel_task: package_name: "my_test_code" entry_point: "run" + libraries: + - whl: ./dist/*.whl diff --git a/bundle/tests/bundle/wheel_test.go b/bundle/tests/bundle/wheel_test.go index 2290e47c..bfc1fa04 100644 --- a/bundle/tests/bundle/wheel_test.go +++ b/bundle/tests/bundle/wheel_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/bundle/phases" "github.com/stretchr/testify/require" ) @@ -21,6 +22,10 @@ func TestBundlePythonWheelBuild(t *testing.T) { matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") require.NoError(t, err) require.Equal(t, 1, len(matches)) + + match := libraries.MatchWithArtifacts() + err = match.Apply(context.Background(), b) + require.NoError(t, err) } func TestBundlePythonWheelBuildAutoDetect(t *testing.T) { @@ -34,4 +39,21 @@ func TestBundlePythonWheelBuildAutoDetect(t *testing.T) { matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") require.NoError(t, err) require.Equal(t, 1, len(matches)) + + match := libraries.MatchWithArtifacts() + err = match.Apply(context.Background(), b) + require.NoError(t, err) +} + +func TestBundlePythonWheelWithDBFSLib(t *testing.T) { + b, err := bundle.Load("./python_wheel_dbfs_lib") + require.NoError(t, err) + + m := phases.Build() + err = m.Apply(context.Background(), b) + require.NoError(t, err) + + match := libraries.MatchWithArtifacts() + err = match.Apply(context.Background(), b) + require.NoError(t, err) } From 55e62366fa48aba8c28aaf0c297b47658861ff69 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 7 Aug 2023 14:44:01 +0200 Subject: [PATCH 003/310] Add unit test for file name execution during rendering (#640) ## Changes Adds a Unit test that directories and files in the file tree are executed as templates --- libs/template/renderer_test.go | 25 +++++++++++++++++++ .../{{.dir_name}}/{{.file_name}}.tmpl | 0 2 files changed, 25 insertions(+) create mode 100644 libs/template/testdata/file-tree-rendering/template/{{.dir_name}}/{{.file_name}}.tmpl diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 8cd89ae9..37b94b1e 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -434,3 +434,28 @@ func TestRendererNonTemplatesAreCreatedAsCopyFiles(t *testing.T) { assert.Equal(t, r.files[0].(*copyFile).srcPath, "not-a-template") assert.Equal(t, r.files[0].DstPath().absPath(), filepath.Join(tmpDir, "not-a-template")) } + +func TestRendererFileTreeRendering(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + r, err := newRenderer(ctx, map[string]any{ + "dir_name": "my_directory", + "file_name": "my_file", + }, "./testdata/file-tree-rendering/template", "./testdata/file-tree-rendering/library", tmpDir) + require.NoError(t, err) + + err = r.walk() + assert.NoError(t, err) + + // Assert in memory representation is created. + assert.Len(t, r.files, 1) + assert.Equal(t, r.files[0].DstPath().absPath(), filepath.Join(tmpDir, "my_directory", "my_file")) + + err = r.persistToDisk() + require.NoError(t, err) + + // Assert files and directories are correctly materialized. + assert.DirExists(t, filepath.Join(tmpDir, "my_directory")) + assert.FileExists(t, filepath.Join(tmpDir, "my_directory", "my_file")) +} diff --git a/libs/template/testdata/file-tree-rendering/template/{{.dir_name}}/{{.file_name}}.tmpl b/libs/template/testdata/file-tree-rendering/template/{{.dir_name}}/{{.file_name}}.tmpl new file mode 100644 index 00000000..e69de29b From 81ee031a0415ab36442cbabacfee5da875a7de62 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 7 Aug 2023 15:14:25 +0200 Subject: [PATCH 004/310] Add bundle init command and support for prompting user for input values (#631) ## Changes This PR adds two features: 1. The bundle init command 2. Support for prompting for input values In order to do this, this PR also introduces a new `config` struct which handles reading config files, prompting users and all validation steps before we materialize the template With this PR users can start authoring custom templates, based on go text templates, for their projects / orgs. ## Tests Unit tests, both existing and new --- cmd/bundle/bundle.go | 1 + cmd/bundle/init.go | 79 +++++ cmd/bundle/init_test.go | 27 ++ libs/template/config.go | 198 +++++++++++++ libs/template/config_test.go | 163 +++++++++++ libs/template/materialize.go | 60 ++++ libs/template/schema.go | 121 -------- libs/template/schema_test.go | 274 ------------------ .../config.json | 6 + .../config.json | 3 + .../config-assign-from-file/config.json | 6 + libs/template/utils.go | 99 +++++++ libs/template/utils_test.go | 115 ++++++++ libs/template/validators.go | 4 +- libs/template/validators_test.go | 61 +++- 15 files changed, 815 insertions(+), 402 deletions(-) create mode 100644 cmd/bundle/init.go create mode 100644 cmd/bundle/init_test.go create mode 100644 libs/template/config.go create mode 100644 libs/template/config_test.go create mode 100644 libs/template/materialize.go delete mode 100644 libs/template/schema.go delete mode 100644 libs/template/schema_test.go create mode 100644 libs/template/testdata/config-assign-from-file-invalid-int/config.json create mode 100644 libs/template/testdata/config-assign-from-file-unknown-property/config.json create mode 100644 libs/template/testdata/config-assign-from-file/config.json create mode 100644 libs/template/utils.go create mode 100644 libs/template/utils_test.go diff --git a/cmd/bundle/bundle.go b/cmd/bundle/bundle.go index 8d1216f8..c933ec9c 100644 --- a/cmd/bundle/bundle.go +++ b/cmd/bundle/bundle.go @@ -19,5 +19,6 @@ func New() *cobra.Command { cmd.AddCommand(newSyncCommand()) cmd.AddCommand(newTestCommand()) cmd.AddCommand(newValidateCommand()) + cmd.AddCommand(newInitCommand()) return cmd } diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go new file mode 100644 index 00000000..e3d76ecf --- /dev/null +++ b/cmd/bundle/init.go @@ -0,0 +1,79 @@ +package bundle + +import ( + "os" + "path/filepath" + "strings" + + "github.com/databricks/cli/libs/git" + "github.com/databricks/cli/libs/template" + "github.com/spf13/cobra" +) + +var gitUrlPrefixes = []string{ + "https://", + "git@", +} + +func isRepoUrl(url string) bool { + result := false + for _, prefix := range gitUrlPrefixes { + if strings.HasPrefix(url, prefix) { + result = true + break + } + } + return result +} + +// Computes the repo name from the repo URL. Treats the last non empty word +// when splitting at '/' as the repo name. For example: for url git@github.com:databricks/cli.git +// the name would be "cli.git" +func repoName(url string) string { + parts := strings.Split(strings.TrimRight(url, "/"), "/") + return parts[len(parts)-1] +} + +func newInitCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "init TEMPLATE_PATH", + Short: "Initialize Template", + Args: cobra.ExactArgs(1), + } + + var configFile string + var projectDir string + cmd.Flags().StringVar(&configFile, "config-file", "", "File containing input parameters for template initialization.") + cmd.Flags().StringVar(&projectDir, "project-dir", "", "The project will be initialized in this directory.") + cmd.MarkFlagRequired("project-dir") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + templatePath := args[0] + ctx := cmd.Context() + + if !isRepoUrl(templatePath) { + // skip downloading the repo because input arg is not a URL. We assume + // it's a path on the local file system in that case + return template.Materialize(ctx, configFile, templatePath, projectDir) + } + + // Download the template in a temporary directory + tmpDir := os.TempDir() + templateURL := templatePath + templateDir := filepath.Join(tmpDir, repoName(templateURL)) + err := os.MkdirAll(templateDir, 0755) + if err != nil { + return err + } + // TODO: Add automated test that the downloaded git repo is cleaned up. + err = git.Clone(ctx, templateURL, "", templateDir) + if err != nil { + return err + } + defer os.RemoveAll(templateDir) + + return template.Materialize(ctx, configFile, templateDir, projectDir) + } + + return cmd +} diff --git a/cmd/bundle/init_test.go b/cmd/bundle/init_test.go new file mode 100644 index 00000000..4a795160 --- /dev/null +++ b/cmd/bundle/init_test.go @@ -0,0 +1,27 @@ +package bundle + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBundleInitIsRepoUrl(t *testing.T) { + assert.True(t, isRepoUrl("git@github.com:databricks/cli.git")) + assert.True(t, isRepoUrl("https://github.com/databricks/cli.git")) + + assert.False(t, isRepoUrl("./local")) + assert.False(t, isRepoUrl("foo")) +} + +func TestBundleInitRepoName(t *testing.T) { + // Test valid URLs + assert.Equal(t, "cli.git", repoName("git@github.com:databricks/cli.git")) + assert.Equal(t, "cli", repoName("https://github.com/databricks/cli/")) + + // test invalid URLs. In these cases the error would be floated when the + // git clone operation fails. + assert.Equal(t, "git@github.com:databricks", repoName("git@github.com:databricks")) + assert.Equal(t, "invalid-url", repoName("invalid-url")) + assert.Equal(t, "www.github.com", repoName("https://www.github.com")) +} diff --git a/libs/template/config.go b/libs/template/config.go new file mode 100644 index 00000000..ee5fcbef --- /dev/null +++ b/libs/template/config.go @@ -0,0 +1,198 @@ +package template + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/jsonschema" +) + +type config struct { + ctx context.Context + values map[string]any + schema *jsonschema.Schema +} + +func newConfig(ctx context.Context, schemaPath string) (*config, error) { + // Read config schema + schemaBytes, err := os.ReadFile(schemaPath) + if err != nil { + return nil, err + } + schema := &jsonschema.Schema{} + err = json.Unmarshal(schemaBytes, schema) + if err != nil { + return nil, err + } + + // Return config + return &config{ + ctx: ctx, + schema: schema, + values: make(map[string]any, 0), + }, nil +} + +// Reads json file at path and assigns values from the file +func (c *config) assignValuesFromFile(path string) error { + // Read the config file + configFromFile := make(map[string]any, 0) + b, err := os.ReadFile(path) + if err != nil { + return err + } + err = json.Unmarshal(b, &configFromFile) + if err != nil { + return err + } + + // Cast any integer properties, from float to integer. Required because + // the json unmarshaller treats all json numbers as floating point + for name, floatVal := range configFromFile { + property, ok := c.schema.Properties[name] + if !ok { + return fmt.Errorf("%s is not defined as an input parameter for the template", name) + } + if property.Type != jsonschema.IntegerType { + continue + } + v, err := toInteger(floatVal) + if err != nil { + return fmt.Errorf("failed to cast value %v of property %s from file %s to an integer: %w", floatVal, name, path, err) + } + configFromFile[name] = v + } + + // Write configs from the file to the input map, not overwriting any existing + // configurations. + for name, val := range configFromFile { + if _, ok := c.values[name]; ok { + continue + } + c.values[name] = val + } + return nil +} + +// Assigns default values from schema to input config map +func (c *config) assignDefaultValues() error { + for name, property := range c.schema.Properties { + // Config already has a value assigned + if _, ok := c.values[name]; ok { + continue + } + + // No default value defined for the property + if property.Default == nil { + continue + } + + // Assign default value if property is not an integer + if property.Type != jsonschema.IntegerType { + c.values[name] = property.Default + continue + } + + // Cast default value to int before assigning to an integer configuration. + // Required because untyped field Default will read all numbers as floats + // during unmarshalling + v, err := toInteger(property.Default) + if err != nil { + return fmt.Errorf("failed to cast default value %v of property %s to an integer: %w", property.Default, name, err) + } + c.values[name] = v + } + return nil +} + +// Prompts user for values for properties that do not have a value set yet +func (c *config) promptForValues() error { + for name, property := range c.schema.Properties { + // Config already has a value assigned + if _, ok := c.values[name]; ok { + continue + } + + // Initialize Prompt dialog + var err error + prompt := cmdio.Prompt(c.ctx) + prompt.Label = property.Description + prompt.AllowEdit = true + + // Compute default value to display by converting it to a string + if property.Default != nil { + prompt.Default, err = toString(property.Default, property.Type) + if err != nil { + return err + } + } + + // Get user input by running the prompt + userInput, err := prompt.Run() + if err != nil { + return err + } + + // Convert user input string back to a value + c.values[name], err = fromString(userInput, property.Type) + if err != nil { + return err + } + } + return nil +} + +// Prompt user for any missing config values. Assign default values if +// terminal is not TTY +func (c *config) promptOrAssignDefaultValues() error { + if cmdio.IsOutTTY(c.ctx) && cmdio.IsInTTY(c.ctx) { + return c.promptForValues() + } + return c.assignDefaultValues() +} + +// Validates the configuration. If passes, the configuration is ready to be used +// to initialize the template. +func (c *config) validate() error { + validateFns := []func() error{ + c.validateValuesDefined, + c.validateValuesType, + } + + for _, fn := range validateFns { + err := fn() + if err != nil { + return err + } + } + return nil +} + +// Validates all input properties have a user defined value assigned to them +func (c *config) validateValuesDefined() error { + for k := range c.schema.Properties { + if _, ok := c.values[k]; ok { + continue + } + return fmt.Errorf("no value has been assigned to input parameter %s", k) + } + return nil +} + +// Validates the types of all input properties values match their types defined in the schema +func (c *config) validateValuesType() error { + for k, v := range c.values { + fieldInfo, ok := c.schema.Properties[k] + if !ok { + return fmt.Errorf("%s is not defined as an input parameter for the template", k) + } + err := validateType(v, fieldInfo.Type) + if err != nil { + return fmt.Errorf("incorrect type for %s. %w", k, err) + } + } + return nil +} diff --git a/libs/template/config_test.go b/libs/template/config_test.go new file mode 100644 index 00000000..7b8341ec --- /dev/null +++ b/libs/template/config_test.go @@ -0,0 +1,163 @@ +package template + +import ( + "encoding/json" + "testing" + + "github.com/databricks/cli/libs/jsonschema" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func testSchema(t *testing.T) *jsonschema.Schema { + schemaJson := `{ + "properties": { + "int_val": { + "type": "integer", + "default": 123 + }, + "float_val": { + "type": "number" + }, + "bool_val": { + "type": "boolean" + }, + "string_val": { + "type": "string", + "default": "abc" + } + } + }` + var jsonSchema jsonschema.Schema + err := json.Unmarshal([]byte(schemaJson), &jsonSchema) + require.NoError(t, err) + return &jsonSchema +} + +func TestTemplateConfigAssignValuesFromFile(t *testing.T) { + c := config{ + schema: testSchema(t), + values: make(map[string]any), + } + + err := c.assignValuesFromFile("./testdata/config-assign-from-file/config.json") + assert.NoError(t, err) + + assert.Equal(t, int64(1), c.values["int_val"]) + assert.Equal(t, float64(2), c.values["float_val"]) + assert.Equal(t, true, c.values["bool_val"]) + assert.Equal(t, "hello", c.values["string_val"]) +} + +func TestTemplateConfigAssignValuesFromFileForUnknownField(t *testing.T) { + c := config{ + schema: testSchema(t), + values: make(map[string]any), + } + + err := c.assignValuesFromFile("./testdata/config-assign-from-file-unknown-property/config.json") + assert.EqualError(t, err, "unknown_prop is not defined as an input parameter for the template") +} + +func TestTemplateConfigAssignValuesFromFileForInvalidIntegerValue(t *testing.T) { + c := config{ + schema: testSchema(t), + values: make(map[string]any), + } + + err := c.assignValuesFromFile("./testdata/config-assign-from-file-invalid-int/config.json") + assert.EqualError(t, err, "failed to cast value abc of property int_val from file ./testdata/config-assign-from-file-invalid-int/config.json to an integer: cannot convert \"abc\" to an integer") +} + +func TestTemplateConfigAssignValuesFromFileDoesNotOverwriteExistingConfigs(t *testing.T) { + c := config{ + schema: testSchema(t), + values: map[string]any{ + "string_val": "this-is-not-overwritten", + }, + } + + err := c.assignValuesFromFile("./testdata/config-assign-from-file/config.json") + assert.NoError(t, err) + + assert.Equal(t, int64(1), c.values["int_val"]) + assert.Equal(t, float64(2), c.values["float_val"]) + assert.Equal(t, true, c.values["bool_val"]) + assert.Equal(t, "this-is-not-overwritten", c.values["string_val"]) +} + +func TestTemplateConfigAssignDefaultValues(t *testing.T) { + c := config{ + schema: testSchema(t), + values: make(map[string]any), + } + + err := c.assignDefaultValues() + assert.NoError(t, err) + + assert.Len(t, c.values, 2) + assert.Equal(t, "abc", c.values["string_val"]) + assert.Equal(t, int64(123), c.values["int_val"]) +} + +func TestTemplateConfigValidateValuesDefined(t *testing.T) { + c := config{ + schema: testSchema(t), + values: map[string]any{ + "int_val": 1, + "float_val": 1.0, + "bool_val": false, + }, + } + + err := c.validateValuesDefined() + assert.EqualError(t, err, "no value has been assigned to input parameter string_val") +} + +func TestTemplateConfigValidateTypeForValidConfig(t *testing.T) { + c := &config{ + schema: testSchema(t), + values: map[string]any{ + "int_val": 1, + "float_val": 1.1, + "bool_val": true, + "string_val": "abcd", + }, + } + + err := c.validateValuesType() + assert.NoError(t, err) + + err = c.validate() + assert.NoError(t, err) +} + +func TestTemplateConfigValidateTypeForUnknownField(t *testing.T) { + c := &config{ + schema: testSchema(t), + values: map[string]any{ + "unknown_prop": 1, + }, + } + + err := c.validateValuesType() + assert.EqualError(t, err, "unknown_prop is not defined as an input parameter for the template") +} + +func TestTemplateConfigValidateTypeForInvalidType(t *testing.T) { + c := &config{ + schema: testSchema(t), + values: map[string]any{ + "int_val": "this-should-be-an-int", + "float_val": 1.1, + "bool_val": true, + "string_val": "abcd", + }, + } + + err := c.validateValuesType() + assert.EqualError(t, err, `incorrect type for int_val. expected type integer, but value is "this-should-be-an-int"`) + + err = c.validate() + assert.EqualError(t, err, `incorrect type for int_val. expected type integer, but value is "this-should-be-an-int"`) +} diff --git a/libs/template/materialize.go b/libs/template/materialize.go new file mode 100644 index 00000000..bbc9e8da --- /dev/null +++ b/libs/template/materialize.go @@ -0,0 +1,60 @@ +package template + +import ( + "context" + "path/filepath" +) + +const libraryDirName = "library" +const templateDirName = "template" +const schemaFileName = "databricks_template_schema.json" + +// This function materializes the input templates as a project, using user defined +// configurations. +// Parameters: +// +// ctx: context containing a cmdio object. This is used to prompt the user +// configFilePath: file path containing user defined config values +// templateRoot: root of the template definition +// projectDir: root of directory where to initialize the project +func Materialize(ctx context.Context, configFilePath, templateRoot, projectDir string) error { + templatePath := filepath.Join(templateRoot, templateDirName) + libraryPath := filepath.Join(templateRoot, libraryDirName) + schemaPath := filepath.Join(templateRoot, schemaFileName) + + config, err := newConfig(ctx, schemaPath) + if err != nil { + return err + } + + // Read and assign config values from file + if configFilePath != "" { + err = config.assignValuesFromFile(configFilePath) + if err != nil { + return err + } + } + + // Prompt user for any missing config values. Assign default values if + // terminal is not TTY + err = config.promptOrAssignDefaultValues() + if err != nil { + return err + } + + err = config.validate() + if err != nil { + return err + } + + // Walk and render the template, since input configuration is complete + r, err := newRenderer(ctx, config.values, templatePath, libraryPath, projectDir) + if err != nil { + return err + } + err = r.walk() + if err != nil { + return err + } + return r.persistToDisk() +} diff --git a/libs/template/schema.go b/libs/template/schema.go deleted file mode 100644 index 957cd66c..00000000 --- a/libs/template/schema.go +++ /dev/null @@ -1,121 +0,0 @@ -package template - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/databricks/cli/libs/jsonschema" -) - -// function to check whether a float value represents an integer -func isIntegerValue(v float64) bool { - return v == float64(int(v)) -} - -// cast value to integer for config values that are floats but are supposed to be -// integers according to the schema -// -// Needed because the default json unmarshaler for maps converts all numbers to floats -func castFloatConfigValuesToInt(config map[string]any, jsonSchema *jsonschema.Schema) error { - for k, v := range config { - // error because all config keys should be defined in schema too - fieldInfo, ok := jsonSchema.Properties[k] - if !ok { - return fmt.Errorf("%s is not defined as an input parameter for the template", k) - } - // skip non integer fields - if fieldInfo.Type != jsonschema.IntegerType { - continue - } - - // convert floating point type values to integer - switch floatVal := v.(type) { - case float32: - if !isIntegerValue(float64(floatVal)) { - return fmt.Errorf("expected %s to have integer value but it is %v", k, v) - } - config[k] = int(floatVal) - case float64: - if !isIntegerValue(floatVal) { - return fmt.Errorf("expected %s to have integer value but it is %v", k, v) - } - config[k] = int(floatVal) - } - } - return nil -} - -func assignDefaultConfigValues(config map[string]any, schema *jsonschema.Schema) error { - for k, v := range schema.Properties { - if _, ok := config[k]; ok { - continue - } - if v.Default == nil { - return fmt.Errorf("input parameter %s is not defined in config", k) - } - config[k] = v.Default - } - return nil -} - -func validateConfigValueTypes(config map[string]any, schema *jsonschema.Schema) error { - // validate types defined in config - for k, v := range config { - fieldInfo, ok := schema.Properties[k] - if !ok { - return fmt.Errorf("%s is not defined as an input parameter for the template", k) - } - err := validateType(v, fieldInfo.Type) - if err != nil { - return fmt.Errorf("incorrect type for %s. %w", k, err) - } - } - return nil -} - -func ReadSchema(path string) (*jsonschema.Schema, error) { - schemaBytes, err := os.ReadFile(path) - if err != nil { - return nil, err - } - schema := &jsonschema.Schema{} - err = json.Unmarshal(schemaBytes, schema) - if err != nil { - return nil, err - } - return schema, nil -} - -func ReadConfig(path string, jsonSchema *jsonschema.Schema) (map[string]any, error) { - // Read config file - var config map[string]any - b, err := os.ReadFile(path) - if err != nil { - return nil, err - } - err = json.Unmarshal(b, &config) - if err != nil { - return nil, err - } - - // Assign default value to any fields that do not have a value yet - err = assignDefaultConfigValues(config, jsonSchema) - if err != nil { - return nil, err - } - - // cast any fields that are supposed to be integers. The json unmarshalling - // for a generic map converts all numbers to floating point - err = castFloatConfigValuesToInt(config, jsonSchema) - if err != nil { - return nil, err - } - - // validate config according to schema - err = validateConfigValueTypes(config, jsonSchema) - if err != nil { - return nil, err - } - return config, nil -} diff --git a/libs/template/schema_test.go b/libs/template/schema_test.go deleted file mode 100644 index ba30f81a..00000000 --- a/libs/template/schema_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package template - -import ( - "encoding/json" - "testing" - - "github.com/databricks/cli/libs/jsonschema" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func testSchema(t *testing.T) *jsonschema.Schema { - schemaJson := `{ - "properties": { - "int_val": { - "type": "integer" - }, - "float_val": { - "type": "number" - }, - "bool_val": { - "type": "boolean" - }, - "string_val": { - "type": "string" - } - } - }` - var jsonSchema jsonschema.Schema - err := json.Unmarshal([]byte(schemaJson), &jsonSchema) - require.NoError(t, err) - return &jsonSchema -} - -func TestTemplateSchemaIsInteger(t *testing.T) { - assert.False(t, isIntegerValue(1.1)) - assert.False(t, isIntegerValue(0.1)) - assert.False(t, isIntegerValue(-0.1)) - - assert.True(t, isIntegerValue(-1.0)) - assert.True(t, isIntegerValue(0.0)) - assert.True(t, isIntegerValue(2.0)) -} - -func TestTemplateSchemaCastFloatToInt(t *testing.T) { - // define schema for config - jsonSchema := testSchema(t) - - // define the config - configJson := `{ - "int_val": 1, - "float_val": 2, - "bool_val": true, - "string_val": "main hoon na" - }` - var config map[string]any - err := json.Unmarshal([]byte(configJson), &config) - require.NoError(t, err) - - // assert types before casting, checking that the integer was indeed loaded - // as a floating point - assert.IsType(t, float64(0), config["int_val"]) - assert.IsType(t, float64(0), config["float_val"]) - assert.IsType(t, true, config["bool_val"]) - assert.IsType(t, "abc", config["string_val"]) - - err = castFloatConfigValuesToInt(config, jsonSchema) - require.NoError(t, err) - - // assert type after casting, that the float value was converted to an integer - // for int_val. - assert.IsType(t, int(0), config["int_val"]) - assert.IsType(t, float64(0), config["float_val"]) - assert.IsType(t, true, config["bool_val"]) - assert.IsType(t, "abc", config["string_val"]) -} - -func TestTemplateSchemaCastFloatToIntFailsForUnknownTypes(t *testing.T) { - // define schema for config - schemaJson := `{ - "properties": { - "foo": { - "type": "integer" - } - } - }` - var jsonSchema jsonschema.Schema - err := json.Unmarshal([]byte(schemaJson), &jsonSchema) - require.NoError(t, err) - - // define the config - configJson := `{ - "bar": true - }` - var config map[string]any - err = json.Unmarshal([]byte(configJson), &config) - require.NoError(t, err) - - err = castFloatConfigValuesToInt(config, &jsonSchema) - assert.ErrorContains(t, err, "bar is not defined as an input parameter for the template") -} - -func TestTemplateSchemaCastFloatToIntFailsWhenWithNonIntValues(t *testing.T) { - // define schema for config - schemaJson := `{ - "properties": { - "foo": { - "type": "integer" - } - } - }` - var jsonSchema jsonschema.Schema - err := json.Unmarshal([]byte(schemaJson), &jsonSchema) - require.NoError(t, err) - - // define the config - configJson := `{ - "foo": 1.1 - }` - var config map[string]any - err = json.Unmarshal([]byte(configJson), &config) - require.NoError(t, err) - - err = castFloatConfigValuesToInt(config, &jsonSchema) - assert.ErrorContains(t, err, "expected foo to have integer value but it is 1.1") -} - -func TestTemplateSchemaValidateType(t *testing.T) { - // assert validation passing - err := validateType(int(0), jsonschema.IntegerType) - assert.NoError(t, err) - err = validateType(int32(1), jsonschema.IntegerType) - assert.NoError(t, err) - err = validateType(int64(1), jsonschema.IntegerType) - assert.NoError(t, err) - - err = validateType(float32(1.1), jsonschema.NumberType) - assert.NoError(t, err) - err = validateType(float64(1.2), jsonschema.NumberType) - assert.NoError(t, err) - err = validateType(int(1), jsonschema.NumberType) - assert.NoError(t, err) - - err = validateType(false, jsonschema.BooleanType) - assert.NoError(t, err) - - err = validateType("abc", jsonschema.StringType) - assert.NoError(t, err) - - // assert validation failing for integers - err = validateType(float64(1.2), jsonschema.IntegerType) - assert.ErrorContains(t, err, "expected type integer, but value is 1.2") - err = validateType(true, jsonschema.IntegerType) - assert.ErrorContains(t, err, "expected type integer, but value is true") - err = validateType("abc", jsonschema.IntegerType) - assert.ErrorContains(t, err, "expected type integer, but value is \"abc\"") - - // assert validation failing for floats - err = validateType(true, jsonschema.NumberType) - assert.ErrorContains(t, err, "expected type float, but value is true") - err = validateType("abc", jsonschema.NumberType) - assert.ErrorContains(t, err, "expected type float, but value is \"abc\"") - - // assert validation failing for boolean - err = validateType(int(1), jsonschema.BooleanType) - assert.ErrorContains(t, err, "expected type boolean, but value is 1") - err = validateType(float64(1), jsonschema.BooleanType) - assert.ErrorContains(t, err, "expected type boolean, but value is 1") - err = validateType("abc", jsonschema.BooleanType) - assert.ErrorContains(t, err, "expected type boolean, but value is \"abc\"") - - // assert validation failing for string - err = validateType(int(1), jsonschema.StringType) - assert.ErrorContains(t, err, "expected type string, but value is 1") - err = validateType(float64(1), jsonschema.StringType) - assert.ErrorContains(t, err, "expected type string, but value is 1") - err = validateType(false, jsonschema.StringType) - assert.ErrorContains(t, err, "expected type string, but value is false") -} - -func TestTemplateSchemaValidateConfig(t *testing.T) { - // define schema for config - jsonSchema := testSchema(t) - - // define the config - config := map[string]any{ - "int_val": 1, - "float_val": 1.1, - "bool_val": true, - "string_val": "abc", - } - - err := validateConfigValueTypes(config, jsonSchema) - assert.NoError(t, err) -} - -func TestTemplateSchemaValidateConfigFailsForUnknownField(t *testing.T) { - // define schema for config - jsonSchema := testSchema(t) - - // define the config - config := map[string]any{ - "foo": 1, - "float_val": 1.1, - "bool_val": true, - "string_val": "abc", - } - - err := validateConfigValueTypes(config, jsonSchema) - assert.ErrorContains(t, err, "foo is not defined as an input parameter for the template") -} - -func TestTemplateSchemaValidateConfigFailsForWhenIncorrectTypes(t *testing.T) { - // define schema for config - jsonSchema := testSchema(t) - - // define the config - config := map[string]any{ - "int_val": 1, - "float_val": 1.1, - "bool_val": "true", - "string_val": "abc", - } - - err := validateConfigValueTypes(config, jsonSchema) - assert.ErrorContains(t, err, "incorrect type for bool_val. expected type boolean, but value is \"true\"") -} - -func TestTemplateSchemaValidateConfigFailsForWhenMissingInputParams(t *testing.T) { - // define schema for config - schemaJson := `{ - "properties": { - "int_val": { - "type": "integer" - }, - "string_val": { - "type": "string" - } - } - }` - var jsonSchema jsonschema.Schema - err := json.Unmarshal([]byte(schemaJson), &jsonSchema) - require.NoError(t, err) - - // define the config - config := map[string]any{ - "int_val": 1, - } - - err = assignDefaultConfigValues(config, &jsonSchema) - assert.ErrorContains(t, err, "input parameter string_val is not defined in config") -} - -func TestTemplateDefaultAssignment(t *testing.T) { - // define schema for config - schemaJson := `{ - "properties": { - "foo": { - "type": "integer", - "default": 1 - } - } - }` - var jsonSchema jsonschema.Schema - err := json.Unmarshal([]byte(schemaJson), &jsonSchema) - require.NoError(t, err) - - // define the config - config := map[string]any{} - - err = assignDefaultConfigValues(config, &jsonSchema) - assert.NoError(t, err) - assert.Equal(t, 1.0, config["foo"]) -} diff --git a/libs/template/testdata/config-assign-from-file-invalid-int/config.json b/libs/template/testdata/config-assign-from-file-invalid-int/config.json new file mode 100644 index 00000000..a97bf0c2 --- /dev/null +++ b/libs/template/testdata/config-assign-from-file-invalid-int/config.json @@ -0,0 +1,6 @@ +{ + "int_val": "abc", + "float_val": 2, + "bool_val": true, + "string_val": "hello" +} diff --git a/libs/template/testdata/config-assign-from-file-unknown-property/config.json b/libs/template/testdata/config-assign-from-file-unknown-property/config.json new file mode 100644 index 00000000..518eaa6a --- /dev/null +++ b/libs/template/testdata/config-assign-from-file-unknown-property/config.json @@ -0,0 +1,3 @@ +{ + "unknown_prop": 123 +} diff --git a/libs/template/testdata/config-assign-from-file/config.json b/libs/template/testdata/config-assign-from-file/config.json new file mode 100644 index 00000000..564001e5 --- /dev/null +++ b/libs/template/testdata/config-assign-from-file/config.json @@ -0,0 +1,6 @@ +{ + "int_val": 1, + "float_val": 2, + "bool_val": true, + "string_val": "hello" +} diff --git a/libs/template/utils.go b/libs/template/utils.go new file mode 100644 index 00000000..bf11ed86 --- /dev/null +++ b/libs/template/utils.go @@ -0,0 +1,99 @@ +package template + +import ( + "errors" + "fmt" + "strconv" + + "github.com/databricks/cli/libs/jsonschema" +) + +// function to check whether a float value represents an integer +func isIntegerValue(v float64) bool { + return v == float64(int64(v)) +} + +func toInteger(v any) (int64, error) { + switch typedVal := v.(type) { + // cast float to int + case float32: + if !isIntegerValue(float64(typedVal)) { + return 0, fmt.Errorf("expected integer value, got: %v", v) + } + return int64(typedVal), nil + case float64: + if !isIntegerValue(typedVal) { + return 0, fmt.Errorf("expected integer value, got: %v", v) + } + return int64(typedVal), nil + + // pass through common integer cases + case int: + return int64(typedVal), nil + case int32: + return int64(typedVal), nil + case int64: + return typedVal, nil + + default: + return 0, fmt.Errorf("cannot convert %#v to an integer", v) + } +} + +func toString(v any, T jsonschema.Type) (string, error) { + switch T { + case jsonschema.BooleanType: + boolVal, ok := v.(bool) + if !ok { + return "", fmt.Errorf("expected bool, got: %#v", v) + } + return strconv.FormatBool(boolVal), nil + case jsonschema.StringType: + strVal, ok := v.(string) + if !ok { + return "", fmt.Errorf("expected string, got: %#v", v) + } + return strVal, nil + case jsonschema.NumberType: + floatVal, ok := v.(float64) + if !ok { + return "", fmt.Errorf("expected float, got: %#v", v) + } + return strconv.FormatFloat(floatVal, 'f', -1, 64), nil + case jsonschema.IntegerType: + intVal, err := toInteger(v) + if err != nil { + return "", err + } + return strconv.FormatInt(intVal, 10), nil + default: + return "", fmt.Errorf("cannot format object of type %s as a string. Value of object: %#v", T, v) + } +} + +func fromString(s string, T jsonschema.Type) (any, error) { + if T == jsonschema.StringType { + return s, nil + } + + // Variables to store value and error from parsing + var v any + var err error + + switch T { + case jsonschema.BooleanType: + v, err = strconv.ParseBool(s) + case jsonschema.NumberType: + v, err = strconv.ParseFloat(s, 32) + case jsonschema.IntegerType: + v, err = strconv.ParseInt(s, 10, 64) + default: + return "", fmt.Errorf("cannot parse string as object of type %s. Value of string: %q", T, s) + } + + // Return more readable error incase of a syntax error + if errors.Is(err, strconv.ErrSyntax) { + return nil, fmt.Errorf("could not parse %q as a %s: %w", s, T, err) + } + return v, err +} diff --git a/libs/template/utils_test.go b/libs/template/utils_test.go new file mode 100644 index 00000000..5fe70243 --- /dev/null +++ b/libs/template/utils_test.go @@ -0,0 +1,115 @@ +package template + +import ( + "math" + "testing" + + "github.com/databricks/cli/libs/jsonschema" + "github.com/stretchr/testify/assert" +) + +func TestTemplateIsInteger(t *testing.T) { + assert.False(t, isIntegerValue(1.1)) + assert.False(t, isIntegerValue(0.1)) + assert.False(t, isIntegerValue(-0.1)) + + assert.True(t, isIntegerValue(-1.0)) + assert.True(t, isIntegerValue(0.0)) + assert.True(t, isIntegerValue(2.0)) +} + +func TestTemplateToInteger(t *testing.T) { + v, err := toInteger(float32(2)) + assert.NoError(t, err) + assert.Equal(t, int64(2), v) + + v, err = toInteger(float64(4)) + assert.NoError(t, err) + assert.Equal(t, int64(4), v) + + v, err = toInteger(float64(4)) + assert.NoError(t, err) + assert.Equal(t, int64(4), v) + + v, err = toInteger(float64(math.MaxInt32 + 10)) + assert.NoError(t, err) + assert.Equal(t, int64(2147483657), v) + + v, err = toInteger(2) + assert.NoError(t, err) + assert.Equal(t, int64(2), v) + + _, err = toInteger(float32(2.2)) + assert.EqualError(t, err, "expected integer value, got: 2.2") + + _, err = toInteger(float64(math.MaxInt32 + 100.1)) + assert.ErrorContains(t, err, "expected integer value, got: 2.1474837471e+09") + + _, err = toInteger("abcd") + assert.EqualError(t, err, "cannot convert \"abcd\" to an integer") +} + +func TestTemplateToString(t *testing.T) { + s, err := toString(true, jsonschema.BooleanType) + assert.NoError(t, err) + assert.Equal(t, "true", s) + + s, err = toString("abc", jsonschema.StringType) + assert.NoError(t, err) + assert.Equal(t, "abc", s) + + s, err = toString(1.1, jsonschema.NumberType) + assert.NoError(t, err) + assert.Equal(t, "1.1", s) + + s, err = toString(2, jsonschema.IntegerType) + assert.NoError(t, err) + assert.Equal(t, "2", s) + + _, err = toString([]string{}, jsonschema.ArrayType) + assert.EqualError(t, err, "cannot format object of type array as a string. Value of object: []string{}") + + _, err = toString("true", jsonschema.BooleanType) + assert.EqualError(t, err, "expected bool, got: \"true\"") + + _, err = toString(123, jsonschema.StringType) + assert.EqualError(t, err, "expected string, got: 123") + + _, err = toString(false, jsonschema.NumberType) + assert.EqualError(t, err, "expected float, got: false") + + _, err = toString("abc", jsonschema.IntegerType) + assert.EqualError(t, err, "cannot convert \"abc\" to an integer") +} + +func TestTemplateFromString(t *testing.T) { + v, err := fromString("true", jsonschema.BooleanType) + assert.NoError(t, err) + assert.Equal(t, true, v) + + v, err = fromString("abc", jsonschema.StringType) + assert.NoError(t, err) + assert.Equal(t, "abc", v) + + v, err = fromString("1.1", jsonschema.NumberType) + assert.NoError(t, err) + // Floating point conversions are not perfect + assert.True(t, (v.(float64)-1.1) < 0.000001) + + v, err = fromString("12345", jsonschema.IntegerType) + assert.NoError(t, err) + assert.Equal(t, int64(12345), v) + + v, err = fromString("123", jsonschema.NumberType) + assert.NoError(t, err) + assert.Equal(t, float64(123), v) + + _, err = fromString("qrt", jsonschema.ArrayType) + assert.EqualError(t, err, "cannot parse string as object of type array. Value of string: \"qrt\"") + + _, err = fromString("abc", jsonschema.IntegerType) + assert.EqualError(t, err, "could not parse \"abc\" as a integer: strconv.ParseInt: parsing \"abc\": invalid syntax") + + _, err = fromString("1.0", jsonschema.IntegerType) + assert.EqualError(t, err, "could not parse \"1.0\" as a integer: strconv.ParseInt: parsing \"1.0\": invalid syntax") +} diff --git a/libs/template/validators.go b/libs/template/validators.go index 0ae41e46..57eda093 100644 --- a/libs/template/validators.go +++ b/libs/template/validators.go @@ -33,9 +33,7 @@ func validateBoolean(v any) error { } func validateNumber(v any) error { - if !slices.Contains([]reflect.Kind{reflect.Float32, reflect.Float64, reflect.Int, - reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, - reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64}, + if !slices.Contains([]reflect.Kind{reflect.Float32, reflect.Float64}, reflect.TypeOf(v).Kind()) { return fmt.Errorf("expected type float, but value is %#v", v) } diff --git a/libs/template/validators_test.go b/libs/template/validators_test.go index f0cbf8a1..f34f037a 100644 --- a/libs/template/validators_test.go +++ b/libs/template/validators_test.go @@ -3,8 +3,8 @@ package template import ( "testing" + "github.com/databricks/cli/libs/jsonschema" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestValidatorString(t *testing.T) { @@ -40,10 +40,10 @@ func TestValidatorNumber(t *testing.T) { assert.ErrorContains(t, err, "expected type float, but value is true") err = validateNumber(int32(1)) - require.NoError(t, err) + assert.ErrorContains(t, err, "expected type float, but value is 1") - err = validateNumber(int64(1)) - require.NoError(t, err) + err = validateNumber(int64(2)) + assert.ErrorContains(t, err, "expected type float, but value is 2") err = validateNumber(float32(1)) assert.NoError(t, err) @@ -74,3 +74,56 @@ func TestValidatorInt(t *testing.T) { err = validateInteger("abc") assert.ErrorContains(t, err, "expected type integer, but value is \"abc\"") } + +func TestTemplateValidateType(t *testing.T) { + // assert validation passing + err := validateType(int(0), jsonschema.IntegerType) + assert.NoError(t, err) + err = validateType(int32(1), jsonschema.IntegerType) + assert.NoError(t, err) + err = validateType(int64(1), jsonschema.IntegerType) + assert.NoError(t, err) + + err = validateType(float32(1.1), jsonschema.NumberType) + assert.NoError(t, err) + err = validateType(float64(1.2), jsonschema.NumberType) + assert.NoError(t, err) + + err = validateType(false, jsonschema.BooleanType) + assert.NoError(t, err) + + err = validateType("abc", jsonschema.StringType) + assert.NoError(t, err) + + // assert validation failing for integers + err = validateType(float64(1.2), jsonschema.IntegerType) + assert.ErrorContains(t, err, "expected type integer, but value is 1.2") + err = validateType(true, jsonschema.IntegerType) + assert.ErrorContains(t, err, "expected type integer, but value is true") + err = validateType("abc", jsonschema.IntegerType) + assert.ErrorContains(t, err, "expected type integer, but value is \"abc\"") + + // assert validation failing for floats + err = validateType(true, jsonschema.NumberType) + assert.ErrorContains(t, err, "expected type float, but value is true") + err = validateType("abc", jsonschema.NumberType) + assert.ErrorContains(t, err, "expected type float, but value is \"abc\"") + err = validateType(int(1), jsonschema.NumberType) + assert.ErrorContains(t, err, "expected type float, but value is 1") + + // assert validation failing for boolean + err = validateType(int(1), jsonschema.BooleanType) + assert.ErrorContains(t, err, "expected type boolean, but value is 1") + err = validateType(float64(1), jsonschema.BooleanType) + assert.ErrorContains(t, err, "expected type boolean, but value is 1") + err = validateType("abc", jsonschema.BooleanType) + assert.ErrorContains(t, err, "expected type boolean, but value is \"abc\"") + + // assert validation failing for string + err = validateType(int(1), jsonschema.StringType) + assert.ErrorContains(t, err, "expected type string, but value is 1") + err = validateType(float64(1), jsonschema.StringType) + assert.ErrorContains(t, err, "expected type string, but value is 1") + err = validateType(false, jsonschema.StringType) + assert.ErrorContains(t, err, "expected type string, but value is false") +} From d6f626912f056a0a01d68312b59075ee70adebe5 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 7 Aug 2023 19:29:02 +0200 Subject: [PATCH 005/310] Fix bundle git branch validation (#645) ## Changes This PR: 1. Fixes the computation logic for `ActualBranch`. An error in the earlier logic caused the validation mutator to be a no-op. 2. Makes the `.git` string a global var. This is useful to configure in tests. 3. Adds e2e test for the validation mutator. ## Tests Unit test --- bundle/config/mutator/load_git_details.go | 19 +++++---- bundle/tests/autoload_git_test.go | 20 ---------- .../git_branch_validation/.mock-git/HEAD | 1 + .../git_branch_validation/databricks.yml | 4 ++ bundle/tests/git_test.go | 39 +++++++++++++++++++ libs/git/repository.go | 10 +++-- 6 files changed, 61 insertions(+), 32 deletions(-) delete mode 100644 bundle/tests/autoload_git_test.go create mode 100644 bundle/tests/git_branch_validation/.mock-git/HEAD create mode 100644 bundle/tests/git_branch_validation/databricks.yml create mode 100644 bundle/tests/git_test.go diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index f22aafe0..ab47677d 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -24,17 +24,20 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error { if err != nil { return err } - // load branch name if undefined - if b.Config.Bundle.Git.Branch == "" { - branch, err := repo.CurrentBranch() - if err != nil { - log.Warnf(ctx, "failed to load current branch: %s", err) - } else { - b.Config.Bundle.Git.Branch = branch - b.Config.Bundle.Git.ActualBranch = branch + + // Read branch name of current checkout + branch, err := repo.CurrentBranch() + if err == nil { + b.Config.Bundle.Git.ActualBranch = branch + if b.Config.Bundle.Git.Branch == "" { + // Only load branch if there's no user defined value b.Config.Bundle.Git.Inferred = true + b.Config.Bundle.Git.Branch = branch } + } else { + log.Warnf(ctx, "failed to load current branch: %s", err) } + // load commit hash if undefined if b.Config.Bundle.Git.Commit == "" { commit, err := repo.LatestCommit() diff --git a/bundle/tests/autoload_git_test.go b/bundle/tests/autoload_git_test.go deleted file mode 100644 index a1075198..00000000 --- a/bundle/tests/autoload_git_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAutoLoad(t *testing.T) { - b := load(t, "./autoload_git") - assert.True(t, b.Config.Bundle.Git.Inferred) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") -} - -func TestManuallySetBranch(t *testing.T) { - b := loadEnvironment(t, "./autoload_git", "production") - assert.False(t, b.Config.Bundle.Git.Inferred) - assert.Equal(t, "main", b.Config.Bundle.Git.Branch) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") -} diff --git a/bundle/tests/git_branch_validation/.mock-git/HEAD b/bundle/tests/git_branch_validation/.mock-git/HEAD new file mode 100644 index 00000000..6c83ec9d --- /dev/null +++ b/bundle/tests/git_branch_validation/.mock-git/HEAD @@ -0,0 +1 @@ +ref: refs/heads/feature-b diff --git a/bundle/tests/git_branch_validation/databricks.yml b/bundle/tests/git_branch_validation/databricks.yml new file mode 100644 index 00000000..8c7b96ef --- /dev/null +++ b/bundle/tests/git_branch_validation/databricks.yml @@ -0,0 +1,4 @@ +bundle: + name: "Dancing Feet" + git: + branch: "feature-a" diff --git a/bundle/tests/git_test.go b/bundle/tests/git_test.go new file mode 100644 index 00000000..daab4d30 --- /dev/null +++ b/bundle/tests/git_test.go @@ -0,0 +1,39 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/libs/git" + "github.com/stretchr/testify/assert" +) + +func TestGitAutoLoad(t *testing.T) { + b := load(t, "./autoload_git") + assert.True(t, b.Config.Bundle.Git.Inferred) + assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") +} + +func TestGitManuallySetBranch(t *testing.T) { + b := loadEnvironment(t, "./autoload_git", "production") + assert.False(t, b.Config.Bundle.Git.Inferred) + assert.Equal(t, "main", b.Config.Bundle.Git.Branch) + assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") +} + +func TestGitBundleBranchValidation(t *testing.T) { + git.GitDirectoryName = ".mock-git" + t.Cleanup(func() { + git.GitDirectoryName = ".git" + }) + + b := load(t, "./git_branch_validation") + assert.False(t, b.Config.Bundle.Git.Inferred) + assert.Equal(t, "feature-a", b.Config.Bundle.Git.Branch) + assert.Equal(t, "feature-b", b.Config.Bundle.Git.ActualBranch) + + err := bundle.Apply(context.Background(), b, mutator.ValidateGitDetails()) + assert.ErrorContains(t, err, "not on the right Git branch:") +} diff --git a/libs/git/repository.go b/libs/git/repository.go index 3b93669a..2f19cff9 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -12,6 +12,8 @@ import ( const gitIgnoreFileName = ".gitignore" +var GitDirectoryName = ".git" + // Repository represents a Git repository or a directory // that could later be initialized as Git repository. type Repository struct { @@ -45,7 +47,7 @@ func (r *Repository) Root() string { func (r *Repository) CurrentBranch() (string, error) { // load .git/HEAD - ref, err := LoadReferenceFile(filepath.Join(r.rootPath, ".git", "HEAD")) + ref, err := LoadReferenceFile(filepath.Join(r.rootPath, GitDirectoryName, "HEAD")) if err != nil { return "", err } @@ -62,7 +64,7 @@ func (r *Repository) CurrentBranch() (string, error) { func (r *Repository) LatestCommit() (string, error) { // load .git/HEAD - ref, err := LoadReferenceFile(filepath.Join(r.rootPath, ".git", "HEAD")) + ref, err := LoadReferenceFile(filepath.Join(r.rootPath, GitDirectoryName, "HEAD")) if err != nil { return "", err } @@ -81,7 +83,7 @@ func (r *Repository) LatestCommit() (string, error) { if err != nil { return "", err } - branchHeadRef, err := LoadReferenceFile(filepath.Join(r.rootPath, ".git", branchHeadPath)) + branchHeadRef, err := LoadReferenceFile(filepath.Join(r.rootPath, GitDirectoryName, branchHeadPath)) if err != nil { return "", err } @@ -186,7 +188,7 @@ func NewRepository(path string) (*Repository, error) { } real := true - rootPath, err := folders.FindDirWithLeaf(path, ".git") + rootPath, err := folders.FindDirWithLeaf(path, GitDirectoryName) if err != nil { if !os.IsNotExist(err) { return nil, err From ee88b0be3c28ecf0be81197359cefff42b1bc849 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Aug 2023 12:09:02 +0200 Subject: [PATCH 006/310] Bump golang.org/x/term from 0.10.0 to 0.11.0 (#643) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.10.0 to 0.11.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.10.0&new-version=0.11.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8f4051e1..5b01ec5c 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( golang.org/x/mod v0.12.0 golang.org/x/oauth2 v0.10.0 golang.org/x/sync v0.3.0 - golang.org/x/term v0.10.0 + golang.org/x/term v0.11.0 golang.org/x/text v0.11.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) @@ -52,7 +52,7 @@ require ( go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.11.0 // indirect golang.org/x/net v0.12.0 // indirect - golang.org/x/sys v0.10.0 // indirect + golang.org/x/sys v0.11.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/api v0.131.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 38a55108..ff272354 100644 --- a/go.sum +++ b/go.sum @@ -217,12 +217,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From a3de441fd28e3c4eaa400607bc5f43875b6de9bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Aug 2023 13:59:33 +0200 Subject: [PATCH 007/310] Bump golang.org/x/text from 0.11.0 to 0.12.0 (#642) Bumps [golang.org/x/text](https://github.com/golang/text) from 0.11.0 to 0.12.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/text&package-manager=go_modules&previous-version=0.11.0&new-version=0.12.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5b01ec5c..d9d0a3a3 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( golang.org/x/oauth2 v0.10.0 golang.org/x/sync v0.3.0 golang.org/x/term v0.11.0 - golang.org/x/text v0.11.0 + golang.org/x/text v0.12.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) diff --git a/go.sum b/go.sum index ff272354..7af9032e 100644 --- a/go.sum +++ b/go.sum @@ -229,8 +229,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 54a1bcd10afaa65eff1a5c8b910950e9baf2ded6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Aug 2023 12:17:48 +0000 Subject: [PATCH 008/310] Bump golang.org/x/oauth2 from 0.10.0 to 0.11.0 (#641) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.10.0 to 0.11.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.10.0&new-version=0.11.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index d9d0a3a3..c3efa91b 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/whilp/git-urls v1.0.0 // MIT golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 golang.org/x/mod v0.12.0 - golang.org/x/oauth2 v0.10.0 + golang.org/x/oauth2 v0.11.0 golang.org/x/sync v0.3.0 golang.org/x/term v0.11.0 golang.org/x/text v0.12.0 @@ -50,8 +50,8 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/zclconf/go-cty v1.13.2 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.11.0 // indirect - golang.org/x/net v0.12.0 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect golang.org/x/sys v0.11.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/api v0.131.0 // indirect diff --git a/go.sum b/go.sum index 7af9032e..1edb3b48 100644 --- a/go.sum +++ b/go.sum @@ -163,8 +163,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw= golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= @@ -187,12 +187,12 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 6430d2345395c859ffab614baf28b19139ac0a6b Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 9 Aug 2023 11:22:42 +0200 Subject: [PATCH 009/310] Print y/n options when displaying prompts using cmdio.Ask (#650) ## Changes Adds `[y/n]` in `cmdio.Ask` to make the options obvious in all question prompts ## Tests Test manually. Works. --- bundle/deploy/files/delete.go | 2 +- bundle/deploy/terraform/destroy.go | 2 +- libs/cmdio/logger.go | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bundle/deploy/files/delete.go b/bundle/deploy/files/delete.go index 1f103bbd..990eca47 100644 --- a/bundle/deploy/files/delete.go +++ b/bundle/deploy/files/delete.go @@ -27,7 +27,7 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error { red := color.New(color.FgRed).SprintFunc() if !b.AutoApprove { - proceed, err := cmdio.Ask(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?: ", b.Config.Workspace.RootPath, red("deleted permanently!"))) + proceed, err := cmdio.Ask(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?", b.Config.Workspace.RootPath, red("deleted permanently!"))) if err != nil { return err } diff --git a/bundle/deploy/terraform/destroy.go b/bundle/deploy/terraform/destroy.go index 839ea5f9..649542f6 100644 --- a/bundle/deploy/terraform/destroy.go +++ b/bundle/deploy/terraform/destroy.go @@ -89,7 +89,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { // Ask for confirmation, if needed if !b.Plan.ConfirmApply { red := color.New(color.FgRed).SprintFunc() - b.Plan.ConfirmApply, err = cmdio.Ask(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed? [y/n]: ", red("destroy"))) + b.Plan.ConfirmApply, err = cmdio.Ask(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed?", red("destroy"))) if err != nil { return err } diff --git a/libs/cmdio/logger.go b/libs/cmdio/logger.go index a507c5cc..3190a6a7 100644 --- a/libs/cmdio/logger.go +++ b/libs/cmdio/logger.go @@ -87,6 +87,8 @@ func (l *Logger) Ask(question string) (bool, error) { return false, fmt.Errorf("question prompts are not supported in json mode") } + // Add acceptable answers to the question prompt. + question += ` [y/n]:` l.Writer.Write([]byte(question)) ans, err := l.Reader.ReadString('\n') From 979b680c50d852ff299070992cd686082d7a0bf4 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 10 Aug 2023 11:22:38 +0200 Subject: [PATCH 010/310] Release v0.203.0 (#653) CLI: * Infer host from profile during `auth login` ([#629](https://github.com/databricks/cli/pull/629)). Bundles: * Extend deployment mode support ([#577](https://github.com/databricks/cli/pull/577)). * Add validation for Git settings in bundles ([#578](https://github.com/databricks/cli/pull/578)). * Only treat files with .tmpl extension as templates ([#594](https://github.com/databricks/cli/pull/594)). * Add JSON schema validation for input template parameters ([#598](https://github.com/databricks/cli/pull/598)). * Add DATABRICKS_BUNDLE_INCLUDE_PATHS to specify include paths through env vars ([#591](https://github.com/databricks/cli/pull/591)). * Initialise a empty default bundle if BUNDLE_ROOT and DATABRICKS_BUNDLE_INCLUDES env vars are present ([#604](https://github.com/databricks/cli/pull/604)). * Regenerate bundle resource structs from latest Terraform provider ([#633](https://github.com/databricks/cli/pull/633)). * Fixed processing jobs libraries with remote path ([#638](https://github.com/databricks/cli/pull/638)). * Add unit test for file name execution during rendering ([#640](https://github.com/databricks/cli/pull/640)). * Add bundle init command and support for prompting user for input values ([#631](https://github.com/databricks/cli/pull/631)). * Fix bundle git branch validation ([#645](https://github.com/databricks/cli/pull/645)). Internal: * Fix mkdir integration test on GCP ([#620](https://github.com/databricks/cli/pull/620)). * Fix git clone integration test for non-existing repo ([#610](https://github.com/databricks/cli/pull/610)). * Remove push to main trigger for build workflow ([#621](https://github.com/databricks/cli/pull/621)). * Remove workflow to publish binaries to S3 ([#622](https://github.com/databricks/cli/pull/622)). * Fix failing fs mkdir test on azure ([#627](https://github.com/databricks/cli/pull/627)). * Print y/n options when displaying prompts using cmdio.Ask ([#650](https://github.com/databricks/cli/pull/650)). API Changes: * Changed `databricks account metastore-assignments create` command to not return anything. * Added `databricks account network-policy` command group. OpenAPI commit 7b57ba3a53f4de3d049b6a24391fe5474212daf8 (2023-07-28) Dependency updates: * Bump OpenAPI specification & Go SDK Version ([#624](https://github.com/databricks/cli/pull/624)). * Bump golang.org/x/term from 0.10.0 to 0.11.0 ([#643](https://github.com/databricks/cli/pull/643)). * Bump golang.org/x/text from 0.11.0 to 0.12.0 ([#642](https://github.com/databricks/cli/pull/642)). * Bump golang.org/x/oauth2 from 0.10.0 to 0.11.0 ([#641](https://github.com/databricks/cli/pull/641)). --- CHANGELOG.md | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f0b1f696..6cf7673b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,43 @@ # Version changelog +## 0.203.0 + +CLI: + * Infer host from profile during `auth login` ([#629](https://github.com/databricks/cli/pull/629)). + +Bundles: + * Extend deployment mode support ([#577](https://github.com/databricks/cli/pull/577)). + * Add validation for Git settings in bundles ([#578](https://github.com/databricks/cli/pull/578)). + * Only treat files with .tmpl extension as templates ([#594](https://github.com/databricks/cli/pull/594)). + * Add JSON schema validation for input template parameters ([#598](https://github.com/databricks/cli/pull/598)). + * Add DATABRICKS_BUNDLE_INCLUDE_PATHS to specify include paths through env vars ([#591](https://github.com/databricks/cli/pull/591)). + * Initialise a empty default bundle if BUNDLE_ROOT and DATABRICKS_BUNDLE_INCLUDES env vars are present ([#604](https://github.com/databricks/cli/pull/604)). + * Regenerate bundle resource structs from latest Terraform provider ([#633](https://github.com/databricks/cli/pull/633)). + * Fixed processing jobs libraries with remote path ([#638](https://github.com/databricks/cli/pull/638)). + * Add unit test for file name execution during rendering ([#640](https://github.com/databricks/cli/pull/640)). + * Add bundle init command and support for prompting user for input values ([#631](https://github.com/databricks/cli/pull/631)). + * Fix bundle git branch validation ([#645](https://github.com/databricks/cli/pull/645)). + +Internal: + * Fix mkdir integration test on GCP ([#620](https://github.com/databricks/cli/pull/620)). + * Fix git clone integration test for non-existing repo ([#610](https://github.com/databricks/cli/pull/610)). + * Remove push to main trigger for build workflow ([#621](https://github.com/databricks/cli/pull/621)). + * Remove workflow to publish binaries to S3 ([#622](https://github.com/databricks/cli/pull/622)). + * Fix failing fs mkdir test on azure ([#627](https://github.com/databricks/cli/pull/627)). + * Print y/n options when displaying prompts using cmdio.Ask ([#650](https://github.com/databricks/cli/pull/650)). + +API Changes: + * Changed `databricks account metastore-assignments create` command to not return anything. + * Added `databricks account network-policy` command group. + +OpenAPI commit 7b57ba3a53f4de3d049b6a24391fe5474212daf8 (2023-07-28) + +Dependency updates: + * Bump OpenAPI specification & Go SDK Version ([#624](https://github.com/databricks/cli/pull/624)). + * Bump golang.org/x/term from 0.10.0 to 0.11.0 ([#643](https://github.com/databricks/cli/pull/643)). + * Bump golang.org/x/text from 0.11.0 to 0.12.0 ([#642](https://github.com/databricks/cli/pull/642)). + * Bump golang.org/x/oauth2 from 0.10.0 to 0.11.0 ([#641](https://github.com/databricks/cli/pull/641)). + ## 0.202.0 Breaking Change: From 2a58253d20ad6d48c55f138f0c92f62544fa7c28 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 10 Aug 2023 11:36:42 +0200 Subject: [PATCH 011/310] Consolidate functions in libs/git (#652) ## Changes The functions in `libs/git/git.go` assumed global state (e.g. working directory) and were no longer used. This change consolidates the functionality to turn an origin URL into an HTTPS URL. Closes #187. ## Tests Expanded existing unit test. --- libs/git/git.go | 80 ------------------------------------------ libs/git/git_test.go | 22 ------------ libs/git/utils.go | 24 ++++++------- libs/git/utils_test.go | 15 ++++++-- 4 files changed, 24 insertions(+), 117 deletions(-) delete mode 100644 libs/git/git.go delete mode 100644 libs/git/git_test.go diff --git a/libs/git/git.go b/libs/git/git.go deleted file mode 100644 index c5d09034..00000000 --- a/libs/git/git.go +++ /dev/null @@ -1,80 +0,0 @@ -package git - -import ( - "fmt" - "net/url" - "os" - "path" - "strings" - - "github.com/databricks/cli/folders" - giturls "github.com/whilp/git-urls" - "gopkg.in/ini.v1" -) - -func Root() (string, error) { - wd, err := os.Getwd() - if err != nil { - return "", err - } - return folders.FindDirWithLeaf(wd, ".git") -} - -// Origin finds the git repository the project is cloned from, so that -// we could automatically verify if this project is checked out in repos -// home folder of the user according to recommended best practices. Can -// also be used to determine a good enough default project name. -func Origin() (*url.URL, error) { - root, err := Root() - if err != nil { - return nil, err - } - file := fmt.Sprintf("%s/.git/config", root) - gitConfig, err := ini.Load(file) - if err != nil { - return nil, err - } - section := gitConfig.Section(`remote "origin"`) - if section == nil { - return nil, fmt.Errorf("remote `origin` is not defined in %s", file) - } - url := section.Key("url") - if url == nil { - return nil, fmt.Errorf("git origin url is not defined") - } - return giturls.Parse(url.Value()) -} - -// HttpsOrigin returns URL in the format expected by Databricks Repos -// platform functionality. Gradually expand implementation to work with -// other formats of git URLs. -func HttpsOrigin() (string, error) { - origin, err := Origin() - if err != nil { - return "", err - } - // if current repo is checked out with a SSH key - if origin.Scheme != "https" { - origin.Scheme = "https" - } - // `git@` is not required for HTTPS, as Databricks Repos are checked - // out using an API token instead of username. But does it hold true - // for all of the git implementations? - if origin.User != nil { - origin.User = nil - } - // Remove `.git` suffix, if present. - origin.Path = strings.TrimSuffix(origin.Path, ".git") - return origin.String(), nil -} - -// RepositoryName returns repository name as last path entry from detected -// git repository up the tree or returns error if it fails to do so. -func RepositoryName() (string, error) { - origin, err := Origin() - if err != nil { - return "", err - } - base := path.Base(origin.Path) - return strings.TrimSuffix(base, ".git"), nil -} diff --git a/libs/git/git_test.go b/libs/git/git_test.go deleted file mode 100644 index 818ba842..00000000 --- a/libs/git/git_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package git - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGetGitOrigin(t *testing.T) { - this, err := RepositoryName() - assert.NoError(t, err) - assert.Equal(t, "cli", this) -} - -func TestHttpsOrigin(t *testing.T) { - url, err := HttpsOrigin() - assert.NoError(t, err) - // must pass on the upcoming forks - assert.True(t, strings.HasPrefix(url, "https://github.com"), url) - assert.True(t, strings.HasSuffix(url, "cli"), url) -} diff --git a/libs/git/utils.go b/libs/git/utils.go index 13ce2c9e..1d38da3a 100644 --- a/libs/git/utils.go +++ b/libs/git/utils.go @@ -6,23 +6,23 @@ import ( giturls "github.com/whilp/git-urls" ) +// Return an origin URL as an HTTPS URL. +// The transformations in this function are not guaranteed to work for all +// Git providers. They are only guaranteed to work for GitHub. func ToHttpsUrl(url string) (string, error) { - originUrl, err := giturls.Parse(url) + origin, err := giturls.Parse(url) if err != nil { return "", err } - if originUrl.Scheme == "https" { - return originUrl.String(), nil + // If this repository is checked out over SSH + if origin.Scheme != "https" { + origin.Scheme = "https" } - // if current repo is checked out with a SSH key - if originUrl.Scheme != "https" { - originUrl.Scheme = "https" - } - // `git@` is not required for HTTPS - if originUrl.User != nil { - originUrl.User = nil + // Basic auth is not applicable for an HTTPS URL. + if origin.User != nil { + origin.User = nil } // Remove `.git` suffix, if present. - originUrl.Path = strings.TrimSuffix(originUrl.Path, ".git") - return originUrl.String(), nil + origin.Path = strings.TrimSuffix(origin.Path, ".git") + return origin.String(), nil } diff --git a/libs/git/utils_test.go b/libs/git/utils_test.go index 52a912da..2a77cae1 100644 --- a/libs/git/utils_test.go +++ b/libs/git/utils_test.go @@ -7,7 +7,16 @@ import ( ) func TestToHttpsUrlForSsh(t *testing.T) { - url, err := ToHttpsUrl("user@foo.com:org/repo-name.git") - assert.NoError(t, err) - assert.Equal(t, "https://foo.com/org/repo-name", url) + for _, e := range []struct { + url string + expected string + }{ + {"user@foo.com:org/repo-name.git", "https://foo.com/org/repo-name"}, + {"git@github.com:databricks/cli.git", "https://github.com/databricks/cli"}, + {"https://github.com/databricks/cli.git", "https://github.com/databricks/cli"}, + } { + url, err := ToHttpsUrl(e.url) + assert.NoError(t, err) + assert.Equal(t, e.expected, url) + } } From 6b615ccfb46551d565e70aeff1294b1d24c5b3bb Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 10 Aug 2023 12:03:52 +0200 Subject: [PATCH 012/310] Add internal tag for bundle fields to be skipped from schema (#636) ## Changes This PR: 1. Introduces the "internal" tag to bundle configs that should not be visible to customers. 2. Annotates "metadata_service_url" as an internal field. ## Tests Unit tests. --- bundle/config/workspace.go | 2 +- bundle/schema/schema.go | 10 ++++++- bundle/schema/schema_test.go | 52 ++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 2 deletions(-) diff --git a/bundle/config/workspace.go b/bundle/config/workspace.go index f278ea17..bd116a9c 100644 --- a/bundle/config/workspace.go +++ b/bundle/config/workspace.go @@ -24,7 +24,7 @@ type Workspace struct { Host string `json:"host,omitempty"` Profile string `json:"profile,omitempty"` AuthType string `json:"auth_type,omitempty"` - MetadataServiceURL string `json:"metadata_service_url,omitempty"` + MetadataServiceURL string `json:"metadata_service_url,omitempty" bundle:"internal"` // OAuth specific attributes. ClientID string `json:"client_id,omitempty"` diff --git a/bundle/schema/schema.go b/bundle/schema/schema.go index fee9b676..00dd2719 100644 --- a/bundle/schema/schema.go +++ b/bundle/schema/schema.go @@ -9,6 +9,14 @@ import ( "github.com/databricks/cli/libs/jsonschema" ) +// Fields tagged "readonly" should not be emitted in the schema as they are +// computed at runtime, and should not be assigned a value by the bundle author. +const readonlyTag = "readonly" + +// Annotation for internal bundle fields that should not be exposed to customers. +// Fields can be tagged as "internal" to remove them from the generated schema. +const internalTag = "internal" + // This function translates golang types into json schema. Here is the mapping // between json schema types and golang types // @@ -197,7 +205,7 @@ func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*jsonschem required := []string{} for _, child := range children { bundleTag := child.Tag.Get("bundle") - if bundleTag == "readonly" { + if bundleTag == readonlyTag || bundleTag == internalTag { continue } diff --git a/bundle/schema/schema_test.go b/bundle/schema/schema_test.go index 66baf873..d44a2082 100644 --- a/bundle/schema/schema_test.go +++ b/bundle/schema/schema_test.go @@ -1462,3 +1462,55 @@ func TestBundleReadOnlytag(t *testing.T) { t.Log("[DEBUG] expected: ", expected) assert.Equal(t, expected, string(jsonSchema)) } + +func TestBundleInternalTag(t *testing.T) { + type Pokemon struct { + Pikachu string `json:"pikachu" bundle:"internal"` + Raichu string `json:"raichu"` + } + + type Foo struct { + Pokemon *Pokemon `json:"pokemon"` + Apple int `json:"apple"` + Mango string `json:"mango" bundle:"internal"` + } + + elem := Foo{} + + schema, err := New(reflect.TypeOf(elem), nil) + assert.NoError(t, err) + + jsonSchema, err := json.MarshalIndent(schema, " ", " ") + assert.NoError(t, err) + + expected := + `{ + "type": "object", + "properties": { + "apple": { + "type": "number" + }, + "pokemon": { + "type": "object", + "properties": { + "raichu": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "raichu" + ] + } + }, + "additionalProperties": false, + "required": [ + "pokemon", + "apple" + ] + }` + + t.Log("[DEBUG] actual: ", string(jsonSchema)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(jsonSchema)) +} From 8656c4a1fad95f349a778b18c73303df6754632f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 11 Aug 2023 14:28:05 +0200 Subject: [PATCH 013/310] Log the bundle root configuration file if applicable (#657) ## Changes Pass through the `context.Context` to the bundle loader functions. ## Tests Unit tests pass. --- bundle/bundle.go | 14 ++++++++----- bundle/bundle_test.go | 21 ++++++++++--------- bundle/root_test.go | 9 ++++---- bundle/tests/bundle/wheel_test.go | 21 +++++++++++-------- bundle/tests/conflicting_resource_ids_test.go | 13 +++++++----- bundle/tests/include_test.go | 5 +++-- bundle/tests/loader.go | 5 +++-- cmd/root/bundle.go | 9 ++++---- cmd/root/bundle_test.go | 2 +- 9 files changed, 57 insertions(+), 42 deletions(-) diff --git a/bundle/bundle.go b/bundle/bundle.go index 0147883c..06c68fe8 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -7,6 +7,7 @@ package bundle import ( + "context" "fmt" "os" "path/filepath" @@ -16,6 +17,7 @@ import ( "github.com/databricks/cli/folders" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" + "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/terraform" "github.com/databricks/databricks-sdk-go" sdkconfig "github.com/databricks/databricks-sdk-go/config" @@ -45,7 +47,7 @@ type Bundle struct { const ExtraIncludePathsKey string = "DATABRICKS_BUNDLE_INCLUDES" -func Load(path string) (*Bundle, error) { +func Load(ctx context.Context, path string) (*Bundle, error) { bundle := &Bundle{} stat, err := os.Stat(path) if err != nil { @@ -56,6 +58,7 @@ func Load(path string) (*Bundle, error) { _, hasIncludePathEnv := os.LookupEnv(ExtraIncludePathsKey) _, hasBundleRootEnv := os.LookupEnv(envBundleRoot) if hasIncludePathEnv && hasBundleRootEnv && stat.IsDir() { + log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path) bundle.Config = config.Root{ Path: path, Bundle: config.Bundle{ @@ -66,6 +69,7 @@ func Load(path string) (*Bundle, error) { } return nil, err } + log.Debugf(ctx, "Loading bundle configuration from: %s", configFile) err = bundle.Config.Load(configFile) if err != nil { return nil, err @@ -75,19 +79,19 @@ func Load(path string) (*Bundle, error) { // MustLoad returns a bundle configuration. // It returns an error if a bundle was not found or could not be loaded. -func MustLoad() (*Bundle, error) { +func MustLoad(ctx context.Context) (*Bundle, error) { root, err := mustGetRoot() if err != nil { return nil, err } - return Load(root) + return Load(ctx, root) } // TryLoad returns a bundle configuration if there is one, but doesn't fail if there isn't one. // It returns an error if a bundle was found but could not be loaded. // It returns a `nil` bundle if a bundle was not found. -func TryLoad() (*Bundle, error) { +func TryLoad(ctx context.Context) (*Bundle, error) { root, err := tryGetRoot() if err != nil { return nil, err @@ -98,7 +102,7 @@ func TryLoad() (*Bundle, error) { return nil, nil } - return Load(root) + return Load(ctx, root) } func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient { diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index 18550f4f..ac947500 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -1,6 +1,7 @@ package bundle import ( + "context" "os" "path/filepath" "testing" @@ -10,13 +11,13 @@ import ( ) func TestLoadNotExists(t *testing.T) { - b, err := Load("/doesntexist") + b, err := Load(context.Background(), "/doesntexist") assert.True(t, os.IsNotExist(err)) assert.Nil(t, b) } func TestLoadExists(t *testing.T) { - b, err := Load("./tests/basic") + b, err := Load(context.Background(), "./tests/basic") require.Nil(t, err) assert.Equal(t, "basic", b.Config.Bundle.Name) } @@ -27,7 +28,7 @@ func TestBundleCacheDir(t *testing.T) { require.NoError(t, err) f1.Close() - bundle, err := Load(projectDir) + bundle, err := Load(context.Background(), projectDir) require.NoError(t, err) // Artificially set environment. @@ -51,7 +52,7 @@ func TestBundleCacheDirOverride(t *testing.T) { require.NoError(t, err) f1.Close() - bundle, err := Load(projectDir) + bundle, err := Load(context.Background(), projectDir) require.NoError(t, err) // Artificially set environment. @@ -70,39 +71,39 @@ func TestBundleCacheDirOverride(t *testing.T) { func TestBundleMustLoadSuccess(t *testing.T) { t.Setenv(envBundleRoot, "./tests/basic") - b, err := MustLoad() + b, err := MustLoad(context.Background()) require.NoError(t, err) assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path)) } func TestBundleMustLoadFailureWithEnv(t *testing.T) { t.Setenv(envBundleRoot, "./tests/doesntexist") - _, err := MustLoad() + _, err := MustLoad(context.Background()) require.Error(t, err, "not a directory") } func TestBundleMustLoadFailureIfNotFound(t *testing.T) { chdir(t, t.TempDir()) - _, err := MustLoad() + _, err := MustLoad(context.Background()) require.Error(t, err, "unable to find bundle root") } func TestBundleTryLoadSuccess(t *testing.T) { t.Setenv(envBundleRoot, "./tests/basic") - b, err := TryLoad() + b, err := TryLoad(context.Background()) require.NoError(t, err) assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path)) } func TestBundleTryLoadFailureWithEnv(t *testing.T) { t.Setenv(envBundleRoot, "./tests/doesntexist") - _, err := TryLoad() + _, err := TryLoad(context.Background()) require.Error(t, err, "not a directory") } func TestBundleTryLoadOkIfNotFound(t *testing.T) { chdir(t, t.TempDir()) - b, err := TryLoad() + b, err := TryLoad(context.Background()) assert.NoError(t, err) assert.Nil(t, b) } diff --git a/bundle/root_test.go b/bundle/root_test.go index e85c4fdc..0c4c46aa 100644 --- a/bundle/root_test.go +++ b/bundle/root_test.go @@ -1,6 +1,7 @@ package bundle import ( + "context" "os" "path/filepath" "testing" @@ -108,7 +109,7 @@ func TestLoadYamlWhenIncludesEnvPresent(t *testing.T) { chdir(t, filepath.Join(".", "tests", "basic")) t.Setenv(ExtraIncludePathsKey, "test") - bundle, err := MustLoad() + bundle, err := MustLoad(context.Background()) assert.NoError(t, err) assert.Equal(t, "basic", bundle.Config.Bundle.Name) @@ -123,7 +124,7 @@ func TestLoadDefautlBundleWhenNoYamlAndRootAndIncludesEnvPresent(t *testing.T) { t.Setenv(envBundleRoot, dir) t.Setenv(ExtraIncludePathsKey, "test") - bundle, err := MustLoad() + bundle, err := MustLoad(context.Background()) assert.NoError(t, err) assert.Equal(t, dir, bundle.Config.Path) } @@ -133,7 +134,7 @@ func TestErrorIfNoYamlNoRootEnvAndIncludesEnvPresent(t *testing.T) { chdir(t, dir) t.Setenv(ExtraIncludePathsKey, "test") - _, err := MustLoad() + _, err := MustLoad(context.Background()) assert.Error(t, err) } @@ -142,6 +143,6 @@ func TestErrorIfNoYamlNoIncludesEnvAndRootEnvPresent(t *testing.T) { chdir(t, dir) t.Setenv(envBundleRoot, dir) - _, err := MustLoad() + _, err := MustLoad(context.Background()) assert.Error(t, err) } diff --git a/bundle/tests/bundle/wheel_test.go b/bundle/tests/bundle/wheel_test.go index bfc1fa04..ee745773 100644 --- a/bundle/tests/bundle/wheel_test.go +++ b/bundle/tests/bundle/wheel_test.go @@ -12,11 +12,12 @@ import ( ) func TestBundlePythonWheelBuild(t *testing.T) { - b, err := bundle.Load("./python_wheel") + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel") require.NoError(t, err) m := phases.Build() - err = m.Apply(context.Background(), b) + err = m.Apply(ctx, b) require.NoError(t, err) matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") @@ -24,16 +25,17 @@ func TestBundlePythonWheelBuild(t *testing.T) { require.Equal(t, 1, len(matches)) match := libraries.MatchWithArtifacts() - err = match.Apply(context.Background(), b) + err = match.Apply(ctx, b) require.NoError(t, err) } func TestBundlePythonWheelBuildAutoDetect(t *testing.T) { - b, err := bundle.Load("./python_wheel_no_artifact") + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel_no_artifact") require.NoError(t, err) m := phases.Build() - err = m.Apply(context.Background(), b) + err = m.Apply(ctx, b) require.NoError(t, err) matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") @@ -41,19 +43,20 @@ func TestBundlePythonWheelBuildAutoDetect(t *testing.T) { require.Equal(t, 1, len(matches)) match := libraries.MatchWithArtifacts() - err = match.Apply(context.Background(), b) + err = match.Apply(ctx, b) require.NoError(t, err) } func TestBundlePythonWheelWithDBFSLib(t *testing.T) { - b, err := bundle.Load("./python_wheel_dbfs_lib") + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel_dbfs_lib") require.NoError(t, err) m := phases.Build() - err = m.Apply(context.Background(), b) + err = m.Apply(ctx, b) require.NoError(t, err) match := libraries.MatchWithArtifacts() - err = match.Apply(context.Background(), b) + err = match.Apply(ctx, b) require.NoError(t, err) } diff --git a/bundle/tests/conflicting_resource_ids_test.go b/bundle/tests/conflicting_resource_ids_test.go index b75e3753..704683ad 100644 --- a/bundle/tests/conflicting_resource_ids_test.go +++ b/bundle/tests/conflicting_resource_ids_test.go @@ -13,24 +13,27 @@ import ( ) func TestConflictingResourceIdsNoSubconfig(t *testing.T) { - _, err := bundle.Load("./conflicting_resource_ids/no_subconfigurations") + ctx := context.Background() + _, err := bundle.Load(ctx, "./conflicting_resource_ids/no_subconfigurations") bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/no_subconfigurations/databricks.yml") assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, bundleConfigPath)) } func TestConflictingResourceIdsOneSubconfig(t *testing.T) { - b, err := bundle.Load("./conflicting_resource_ids/one_subconfiguration") + ctx := context.Background() + b, err := bundle.Load(ctx, "./conflicting_resource_ids/one_subconfiguration") require.NoError(t, err) - err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...)) + err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/databricks.yml") resourcesConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/resources.yml") assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, resourcesConfigPath)) } func TestConflictingResourceIdsTwoSubconfigs(t *testing.T) { - b, err := bundle.Load("./conflicting_resource_ids/two_subconfigurations") + ctx := context.Background() + b, err := bundle.Load(ctx, "./conflicting_resource_ids/two_subconfigurations") require.NoError(t, err) - err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...)) + err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) resources1ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources1.yml") resources2ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources2.yml") assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", resources1ConfigPath, resources2ConfigPath)) diff --git a/bundle/tests/include_test.go b/bundle/tests/include_test.go index 00aecb9f..eb09d1aa 100644 --- a/bundle/tests/include_test.go +++ b/bundle/tests/include_test.go @@ -14,9 +14,10 @@ import ( ) func TestIncludeInvalid(t *testing.T) { - b, err := bundle.Load("./include_invalid") + ctx := context.Background() + b, err := bundle.Load(ctx, "./include_invalid") require.NoError(t, err) - err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...)) + err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) require.Error(t, err) assert.Contains(t, err.Error(), "notexists.yml defined in 'include' section does not match any files") } diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index 42f1fc5b..056a82d9 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -10,9 +10,10 @@ import ( ) func load(t *testing.T, path string) *bundle.Bundle { - b, err := bundle.Load(path) + ctx := context.Background() + b, err := bundle.Load(ctx, path) require.NoError(t, err) - err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...)) + err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) require.NoError(t, err) return b } diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index 8a3b5977..f691bbfc 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -1,6 +1,7 @@ package root import ( + "context" "os" "github.com/databricks/cli/bundle" @@ -41,8 +42,9 @@ func getProfile(cmd *cobra.Command) (value string) { } // loadBundle loads the bundle configuration and applies default mutators. -func loadBundle(cmd *cobra.Command, args []string, load func() (*bundle.Bundle, error)) (*bundle.Bundle, error) { - b, err := load() +func loadBundle(cmd *cobra.Command, args []string, load func(ctx context.Context) (*bundle.Bundle, error)) (*bundle.Bundle, error) { + ctx := cmd.Context() + b, err := load(ctx) if err != nil { return nil, err } @@ -57,7 +59,6 @@ func loadBundle(cmd *cobra.Command, args []string, load func() (*bundle.Bundle, b.Config.Workspace.Profile = profile } - ctx := cmd.Context() err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) if err != nil { return nil, err @@ -67,7 +68,7 @@ func loadBundle(cmd *cobra.Command, args []string, load func() (*bundle.Bundle, } // configureBundle loads the bundle configuration and configures it on the command's context. -func configureBundle(cmd *cobra.Command, args []string, load func() (*bundle.Bundle, error)) error { +func configureBundle(cmd *cobra.Command, args []string, load func(ctx context.Context) (*bundle.Bundle, error)) error { b, err := loadBundle(cmd, args, load) if err != nil { return err diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 4b44e019..4382cf22 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -39,7 +39,7 @@ func emptyCommand(t *testing.T) *cobra.Command { func setup(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle { setupDatabricksCfg(t) - err := configureBundle(cmd, []string{"validate"}, func() (*bundle.Bundle, error) { + err := configureBundle(cmd, []string{"validate"}, func(_ context.Context) (*bundle.Bundle, error) { return &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ From 6ea70c82a93805a90a112f916c21c0659947b272 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 11 Aug 2023 15:48:32 +0200 Subject: [PATCH 014/310] Execute paths without the .tmpl extension as templates (#654) ## Changes The `.tmpl` extension is only meant as a qualifier for whether the file content is executed as a template. All file paths in the `template` directory should be treated as valid go text templates. Before only paths with the `.tmpl` extensions would be resolved as templates, after this change, all file paths are interpreted as templates. ## Tests Unit test. The newly added unit tests also asserts that the file path is correct, even when the `.tmpl` extension is missing. --- libs/template/renderer.go | 19 +++++++++++-------- libs/template/renderer_test.go | 14 ++++++++++++++ .../template-in-path/library/my_funcs.tmpl | 7 +++++++ .../{{template `file_name`}} | 0 4 files changed, 32 insertions(+), 8 deletions(-) create mode 100644 libs/template/testdata/template-in-path/library/my_funcs.tmpl create mode 100644 libs/template/testdata/template-in-path/template/{{template `dir_name`}}/{{template `file_name`}} diff --git a/libs/template/renderer.go b/libs/template/renderer.go index c7e79841..76479c05 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -124,19 +124,29 @@ func (r *renderer) computeFile(relPathTemplate string) (file, error) { } perm := info.Mode().Perm() + // Execute relative path template to get destination path for the file + relPath, err := r.executeTemplate(relPathTemplate) + if err != nil { + return nil, err + } + // If file name does not specify the `.tmpl` extension, then it is copied // over as is, without treating it as a template if !strings.HasSuffix(relPathTemplate, templateExtension) { return ©File{ dstPath: &destinationPath{ root: r.instanceRoot, - relPath: relPathTemplate, + relPath: relPath, }, perm: perm, ctx: r.ctx, srcPath: relPathTemplate, srcFiler: r.templateFiler, }, nil + } else { + // Trim the .tmpl suffix from file name, if specified in the template + // path + relPath = strings.TrimSuffix(relPath, templateExtension) } // read template file's content @@ -160,13 +170,6 @@ func (r *renderer) computeFile(relPathTemplate string) (file, error) { return nil, fmt.Errorf("failed to compute file content for %s. %w", relPathTemplate, err) } - // Execute relative path template to get materialized path for the file - relPathTemplate = strings.TrimSuffix(relPathTemplate, templateExtension) - relPath, err := r.executeTemplate(relPathTemplate) - if err != nil { - return nil, err - } - return &inMemoryFile{ dstPath: &destinationPath{ root: r.instanceRoot, diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 37b94b1e..f3f7f234 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -459,3 +459,17 @@ func TestRendererFileTreeRendering(t *testing.T) { assert.DirExists(t, filepath.Join(tmpDir, "my_directory")) assert.FileExists(t, filepath.Join(tmpDir, "my_directory", "my_file")) } + +func TestRendererSubTemplateInPath(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + r, err := newRenderer(ctx, nil, "./testdata/template-in-path/template", "./testdata/template-in-path/library", tmpDir) + require.NoError(t, err) + + err = r.walk() + require.NoError(t, err) + + assert.Equal(t, filepath.Join(tmpDir, "my_directory", "my_file"), r.files[0].DstPath().absPath()) + assert.Equal(t, "my_directory/my_file", r.files[0].DstPath().relPath) +} diff --git a/libs/template/testdata/template-in-path/library/my_funcs.tmpl b/libs/template/testdata/template-in-path/library/my_funcs.tmpl new file mode 100644 index 00000000..3415ad77 --- /dev/null +++ b/libs/template/testdata/template-in-path/library/my_funcs.tmpl @@ -0,0 +1,7 @@ +{{define "dir_name" -}} +my_directory +{{- end}} + +{{define "file_name" -}} +my_file +{{- end}} diff --git a/libs/template/testdata/template-in-path/template/{{template `dir_name`}}/{{template `file_name`}} b/libs/template/testdata/template-in-path/template/{{template `dir_name`}}/{{template `file_name`}} new file mode 100644 index 00000000..e69de29b From 97699b849fc8431741e19518531ef5e17834c201 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 14 Aug 2023 08:43:45 +0200 Subject: [PATCH 015/310] Enable environment overrides for job clusters (#658) ## Changes While they are a slice, we can identify a job cluster by its job cluster key. A job definition with multiple job clusters with the same key is always invalid. We can therefore merge definitions with the same key into one. This is compatible with how environment overrides are applied; merging a slice means appending to it. The override will end up in the job cluster slice of the original, which gives us a deterministic way to merge them. Since the alternative is an invalid configuration, this doesn't change behavior. ## Tests New test coverage. --- bundle/config/resources.go | 11 ++++ bundle/config/resources/job.go | 38 ++++++++++++- bundle/config/resources/job_test.go | 57 +++++++++++++++++++ bundle/config/root.go | 5 ++ .../tests/override_job_cluster/databricks.yml | 35 ++++++++++++ bundle/tests/override_job_cluster_test.go | 29 ++++++++++ 6 files changed, 174 insertions(+), 1 deletion(-) create mode 100644 bundle/config/resources/job_test.go create mode 100644 bundle/tests/override_job_cluster/databricks.yml create mode 100644 bundle/tests/override_job_cluster_test.go diff --git a/bundle/config/resources.go b/bundle/config/resources.go index fc86647e..b15158b4 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -113,3 +113,14 @@ func (r *Resources) SetConfigFilePath(path string) { e.ConfigFilePath = path } } + +// MergeJobClusters iterates over all jobs and merges their job clusters. +// This is called after applying the environment overrides. +func (r *Resources) MergeJobClusters() error { + for _, job := range r.Jobs { + if err := job.MergeJobClusters(); err != nil { + return err + } + } + return nil +} diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index a1ea3855..327d7e13 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -1,6 +1,9 @@ package resources -import "github.com/databricks/databricks-sdk-go/service/jobs" +import ( + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/imdario/mergo" +) type Job struct { ID string `json:"id,omitempty" bundle:"readonly"` @@ -10,3 +13,36 @@ type Job struct { *jobs.JobSettings } + +// MergeJobClusters merges job clusters with the same key. +// The job clusters field is a slice, and as such, overrides are appended to it. +// We can identify a job cluster by its key, however, so we can use this key +// to figure out which definitions are actually overrides and merge them. +func (j *Job) MergeJobClusters() error { + keys := make(map[string]*jobs.JobCluster) + output := make([]jobs.JobCluster, 0, len(j.JobClusters)) + + // Environment overrides are always appended, so we can iterate in natural order to + // first find the base definition, and merge instances we encounter later. + for i := range j.JobClusters { + key := j.JobClusters[i].JobClusterKey + + // Register job cluster with key if not yet seen before. + ref, ok := keys[key] + if !ok { + output = append(output, j.JobClusters[i]) + keys[key] = &j.JobClusters[i] + continue + } + + // Merge this instance into the reference. + err := mergo.Merge(ref, &j.JobClusters[i], mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return err + } + } + + // Overwrite resulting slice. + j.JobClusters = output + return nil +} diff --git a/bundle/config/resources/job_test.go b/bundle/config/resources/job_test.go new file mode 100644 index 00000000..2ff3205e --- /dev/null +++ b/bundle/config/resources/job_test.go @@ -0,0 +1,57 @@ +package resources + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestJobMergeJobClusters(t *testing.T) { + j := &Job{ + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + JobClusterKey: "foo", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + }, + { + JobClusterKey: "bar", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "10.4.x-scala2.12", + }, + }, + { + JobClusterKey: "foo", + NewCluster: &compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + } + + err := j.MergeJobClusters() + require.NoError(t, err) + + assert.Len(t, j.JobClusters, 2) + assert.Equal(t, "foo", j.JobClusters[0].JobClusterKey) + assert.Equal(t, "bar", j.JobClusters[1].JobClusterKey) + + // This job cluster was merged with a subsequent one. + jc0 := j.JobClusters[0].NewCluster + assert.Equal(t, "13.3.x-scala2.12", jc0.SparkVersion) + assert.Equal(t, "i3.2xlarge", jc0.NodeTypeId) + assert.Equal(t, 4, jc0.NumWorkers) + + // This job cluster was left untouched. + jc1 := j.JobClusters[1].NewCluster + assert.Equal(t, "10.4.x-scala2.12", jc1.SparkVersion) +} diff --git a/bundle/config/root.go b/bundle/config/root.go index 52f88737..4ca9d0a0 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -203,6 +203,11 @@ func (r *Root) MergeEnvironment(env *Environment) error { if err != nil { return err } + + err = r.Resources.MergeJobClusters() + if err != nil { + return err + } } if env.Variables != nil { diff --git a/bundle/tests/override_job_cluster/databricks.yml b/bundle/tests/override_job_cluster/databricks.yml new file mode 100644 index 00000000..33061b2e --- /dev/null +++ b/bundle/tests/override_job_cluster/databricks.yml @@ -0,0 +1,35 @@ +bundle: + name: override_job_cluster + +workspace: + host: https://acme.cloud.databricks.com/ + +resources: + jobs: + foo: + name: job + job_clusters: + - job_cluster_key: key + new_cluster: + spark_version: 13.3.x-scala2.12 + +environments: + development: + resources: + jobs: + foo: + job_clusters: + - job_cluster_key: key + new_cluster: + node_type_id: i3.xlarge + num_workers: 1 + + staging: + resources: + jobs: + foo: + job_clusters: + - job_cluster_key: key + new_cluster: + node_type_id: i3.2xlarge + num_workers: 4 diff --git a/bundle/tests/override_job_cluster_test.go b/bundle/tests/override_job_cluster_test.go new file mode 100644 index 00000000..97f7c04e --- /dev/null +++ b/bundle/tests/override_job_cluster_test.go @@ -0,0 +1,29 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOverrideJobClusterDev(t *testing.T) { + b := loadEnvironment(t, "./override_job_cluster", "development") + assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) + assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) + + c := b.Config.Resources.Jobs["foo"].JobClusters[0] + assert.Equal(t, "13.3.x-scala2.12", c.NewCluster.SparkVersion) + assert.Equal(t, "i3.xlarge", c.NewCluster.NodeTypeId) + assert.Equal(t, 1, c.NewCluster.NumWorkers) +} + +func TestOverrideJobClusterStaging(t *testing.T) { + b := loadEnvironment(t, "./override_job_cluster", "staging") + assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) + assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) + + c := b.Config.Resources.Jobs["foo"].JobClusters[0] + assert.Equal(t, "13.3.x-scala2.12", c.NewCluster.SparkVersion) + assert.Equal(t, "i3.2xlarge", c.NewCluster.NodeTypeId) + assert.Equal(t, 4, c.NewCluster.NumWorkers) +} From 5b819cd982182542476fe249a4c0002e96bd6dd3 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 14 Aug 2023 14:45:08 +0200 Subject: [PATCH 016/310] Always resolve .databrickscfg file (#659) ## Changes #629 introduced a change to autopopulate the host from .databrickscfg if the user is logging back into a host they were previously using. This did not respect the DATABRICKS_CONFIG_FILE env variable, causing the flow to stop working for users with no .databrickscfg file in their home directory. This PR refactors all config file loading to go through one interface, `databrickscfg.GetDatabricksCfg()`, and an auxiliary `databrickscfg.GetDatabricksCfgPath()` to get the configured file path. Closes #655. ## Tests ``` $ databricks auth login --profile abc Error: open /Users/miles/.databrickscfg: no such file or directory $ ./cli auth login --profile abc Error: cannot load Databricks config file: open /Users/miles/.databrickscfg: no such file or directory $ DATABRICKS_CONFIG_FILE=~/.databrickscfg.bak ./cli auth login --profile abc Databricks Host: https://asdf ``` --- cmd/auth/env.go | 5 +++-- cmd/auth/login.go | 2 +- cmd/auth/profiles.go | 24 +++++--------------- cmd/root/auth.go | 19 +++++++++------- libs/databrickscfg/profiles.go | 34 ++++++++++++++++++++++++----- libs/databrickscfg/profiles_test.go | 9 +++++--- 6 files changed, 55 insertions(+), 38 deletions(-) diff --git a/cmd/auth/env.go b/cmd/auth/env.go index 7bf3fd91..241d5f88 100644 --- a/cmd/auth/env.go +++ b/cmd/auth/env.go @@ -9,6 +9,7 @@ import ( "net/url" "strings" + "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" "gopkg.in/ini.v1" @@ -28,7 +29,7 @@ func canonicalHost(host string) (string, error) { var ErrNoMatchingProfiles = errors.New("no matching profiles found") -func resolveSection(cfg *config.Config, iniFile *ini.File) (*ini.Section, error) { +func resolveSection(cfg *config.Config, iniFile *config.File) (*ini.Section, error) { var candidates []*ini.Section configuredHost, err := canonicalHost(cfg.Host) if err != nil { @@ -68,7 +69,7 @@ func resolveSection(cfg *config.Config, iniFile *ini.File) (*ini.Section, error) } func loadFromDatabricksCfg(cfg *config.Config) error { - iniFile, err := getDatabricksCfg() + iniFile, err := databrickscfg.Get() if errors.Is(err, fs.ErrNotExist) { // it's fine not to have ~/.databrickscfg return nil diff --git a/cmd/auth/login.go b/cmd/auth/login.go index e248118a..cf1d5c30 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -61,7 +61,7 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { } // If the chosen profile has a hostname and the user hasn't specified a host, infer the host from the profile. - _, profiles, err := databrickscfg.LoadProfiles(databrickscfg.DefaultPath, func(p databrickscfg.Profile) bool { + _, profiles, err := databrickscfg.LoadProfiles(func(p databrickscfg.Profile) bool { return p.Name == profileName }) if err != nil { diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 2b08164f..97d8eeab 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -5,32 +5,16 @@ import ( "fmt" "net/http" "os" - "path/filepath" - "strings" "sync" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" "gopkg.in/ini.v1" ) -func getDatabricksCfg() (*ini.File, error) { - configFile := os.Getenv("DATABRICKS_CONFIG_FILE") - if configFile == "" { - configFile = "~/.databrickscfg" - } - if strings.HasPrefix(configFile, "~") { - homedir, err := os.UserHomeDir() - if err != nil { - return nil, fmt.Errorf("cannot find homedir: %w", err) - } - configFile = filepath.Join(homedir, configFile[1:]) - } - return ini.Load(configFile) -} - type profileMetadata struct { Name string `json:"name"` Host string `json:"host,omitempty"` @@ -111,10 +95,12 @@ func newProfilesCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { var profiles []*profileMetadata - iniFile, err := getDatabricksCfg() + iniFile, err := databrickscfg.Get() if os.IsNotExist(err) { // return empty list for non-configured machines - iniFile = ini.Empty() + iniFile = &config.File{ + File: &ini.File{}, + } } else if err != nil { return fmt.Errorf("cannot parse config file: %w", err) } diff --git a/cmd/root/auth.go b/cmd/root/auth.go index c13f7463..2f32d260 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -40,10 +40,7 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { // 1. only admins will have account configured // 2. 99% of admins will have access to just one account // hence, we don't need to create a special "DEFAULT_ACCOUNT" profile yet - _, profiles, err := databrickscfg.LoadProfiles( - databrickscfg.DefaultPath, - databrickscfg.MatchAccountProfiles, - ) + _, profiles, err := databrickscfg.LoadProfiles(databrickscfg.MatchAccountProfiles) if err != nil { return err } @@ -124,8 +121,11 @@ func transformLoadError(path string, err error) error { } func askForWorkspaceProfile() (string, error) { - path := databrickscfg.DefaultPath - file, profiles, err := databrickscfg.LoadProfiles(path, databrickscfg.MatchWorkspaceProfiles) + path, err := databrickscfg.GetPath() + if err != nil { + return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) + } + file, profiles, err := databrickscfg.LoadProfiles(databrickscfg.MatchWorkspaceProfiles) if err != nil { return "", transformLoadError(path, err) } @@ -156,8 +156,11 @@ func askForWorkspaceProfile() (string, error) { } func askForAccountProfile() (string, error) { - path := databrickscfg.DefaultPath - file, profiles, err := databrickscfg.LoadProfiles(path, databrickscfg.MatchAccountProfiles) + path, err := databrickscfg.GetPath() + if err != nil { + return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) + } + file, profiles, err := databrickscfg.LoadProfiles(databrickscfg.MatchAccountProfiles) if err != nil { return "", transformLoadError(path, err) } diff --git a/libs/databrickscfg/profiles.go b/libs/databrickscfg/profiles.go index 7892bddd..864000d0 100644 --- a/libs/databrickscfg/profiles.go +++ b/libs/databrickscfg/profiles.go @@ -1,7 +1,9 @@ package databrickscfg import ( + "fmt" "os" + "path/filepath" "strings" "github.com/databricks/databricks-sdk-go/config" @@ -64,12 +66,34 @@ func MatchAllProfiles(p Profile) bool { return true } -const DefaultPath = "~/.databrickscfg" +// Get the path to the .databrickscfg file, falling back to the default in the current user's home directory. +func GetPath() (string, error) { + configFile := os.Getenv("DATABRICKS_CONFIG_FILE") + if configFile == "" { + configFile = "~/.databrickscfg" + } + if strings.HasPrefix(configFile, "~") { + homedir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("cannot find homedir: %w", err) + } + configFile = filepath.Join(homedir, configFile[1:]) + } + return configFile, nil +} -func LoadProfiles(path string, fn ProfileMatchFunction) (file string, profiles Profiles, err error) { - f, err := config.LoadFile(path) +func Get() (*config.File, error) { + configFile, err := GetPath() if err != nil { - return + return nil, fmt.Errorf("cannot determine Databricks config file path: %w", err) + } + return config.LoadFile(configFile) +} + +func LoadProfiles(fn ProfileMatchFunction) (file string, profiles Profiles, err error) { + f, err := Get() + if err != nil { + return "", nil, fmt.Errorf("cannot load Databricks config file: %w", err) } homedir, err := os.UserHomeDir() @@ -106,7 +130,7 @@ func LoadProfiles(path string, fn ProfileMatchFunction) (file string, profiles P } func ProfileCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - _, profiles, err := LoadProfiles(DefaultPath, MatchAllProfiles) + _, profiles, err := LoadProfiles(MatchAllProfiles) if err != nil { return nil, cobra.ShellCompDirectiveError } diff --git a/libs/databrickscfg/profiles_test.go b/libs/databrickscfg/profiles_test.go index 582c6658..b1acdce9 100644 --- a/libs/databrickscfg/profiles_test.go +++ b/libs/databrickscfg/profiles_test.go @@ -32,19 +32,22 @@ func TestLoadProfilesReturnsHomedirAsTilde(t *testing.T) { } else { t.Setenv("HOME", "./testdata") } - file, _, err := LoadProfiles("./testdata/databrickscfg", func(p Profile) bool { return true }) + t.Setenv("DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") + file, _, err := LoadProfiles(func(p Profile) bool { return true }) require.NoError(t, err) assert.Equal(t, "~/databrickscfg", file) } func TestLoadProfilesMatchWorkspace(t *testing.T) { - _, profiles, err := LoadProfiles("./testdata/databrickscfg", MatchWorkspaceProfiles) + t.Setenv("DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") + _, profiles, err := LoadProfiles(MatchWorkspaceProfiles) require.NoError(t, err) assert.Equal(t, []string{"DEFAULT", "query", "foo1", "foo2"}, profiles.Names()) } func TestLoadProfilesMatchAccount(t *testing.T) { - _, profiles, err := LoadProfiles("./testdata/databrickscfg", MatchAccountProfiles) + t.Setenv("DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") + _, profiles, err := LoadProfiles(MatchAccountProfiles) require.NoError(t, err) assert.Equal(t, []string{"acc"}, profiles.Names()) } From 8dc69365817cdc9b21b0e938cd73e8d9cd5a3f7f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 15 Aug 2023 11:58:54 +0200 Subject: [PATCH 017/310] Merge artifacts and resources block with overrides enabled (#660) ## Changes Originally, these blocks were merged with overrides. This was (inadvertently) disabled in #94. This change re-enables merging these blocks with overrides, such that any field set in an environment override always takes precedence over the field set in the base definition. ## Tests New unit test passes. --- bundle/config/root.go | 10 +++++----- .../resources/databricks.yml | 20 +++++++++++++++++++ .../{ => workspace}/databricks.yml | 0 bundle/tests/environment_overrides_test.go | 18 +++++++++++++---- 4 files changed, 39 insertions(+), 9 deletions(-) create mode 100644 bundle/tests/environment_overrides/resources/databricks.yml rename bundle/tests/environment_overrides/{ => workspace}/databricks.yml (100%) diff --git a/bundle/config/root.go b/bundle/config/root.go index 4ca9d0a0..b6d1efc9 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -166,7 +166,7 @@ func (r *Root) Merge(other *Root) error { } // TODO: define and test semantics for merging. - return mergo.MergeWithOverwrite(r, other) + return mergo.Merge(r, other, mergo.WithOverride) } func (r *Root) MergeEnvironment(env *Environment) error { @@ -178,28 +178,28 @@ func (r *Root) MergeEnvironment(env *Environment) error { } if env.Bundle != nil { - err = mergo.MergeWithOverwrite(&r.Bundle, env.Bundle) + err = mergo.Merge(&r.Bundle, env.Bundle, mergo.WithOverride) if err != nil { return err } } if env.Workspace != nil { - err = mergo.MergeWithOverwrite(&r.Workspace, env.Workspace) + err = mergo.Merge(&r.Workspace, env.Workspace, mergo.WithOverride) if err != nil { return err } } if env.Artifacts != nil { - err = mergo.Merge(&r.Artifacts, env.Artifacts, mergo.WithAppendSlice) + err = mergo.Merge(&r.Artifacts, env.Artifacts, mergo.WithOverride, mergo.WithAppendSlice) if err != nil { return err } } if env.Resources != nil { - err = mergo.Merge(&r.Resources, env.Resources, mergo.WithAppendSlice) + err = mergo.Merge(&r.Resources, env.Resources, mergo.WithOverride, mergo.WithAppendSlice) if err != nil { return err } diff --git a/bundle/tests/environment_overrides/resources/databricks.yml b/bundle/tests/environment_overrides/resources/databricks.yml new file mode 100644 index 00000000..eef5dc01 --- /dev/null +++ b/bundle/tests/environment_overrides/resources/databricks.yml @@ -0,0 +1,20 @@ +bundle: + name: environment_overrides + +workspace: + host: https://acme.cloud.databricks.com/ + +resources: + jobs: + job1: + name: "base job" + +environments: + development: + default: true + + staging: + resources: + jobs: + job1: + name: "staging job" diff --git a/bundle/tests/environment_overrides/databricks.yml b/bundle/tests/environment_overrides/workspace/databricks.yml similarity index 100% rename from bundle/tests/environment_overrides/databricks.yml rename to bundle/tests/environment_overrides/workspace/databricks.yml diff --git a/bundle/tests/environment_overrides_test.go b/bundle/tests/environment_overrides_test.go index 4b8401c8..b8cc224a 100644 --- a/bundle/tests/environment_overrides_test.go +++ b/bundle/tests/environment_overrides_test.go @@ -6,12 +6,22 @@ import ( "github.com/stretchr/testify/assert" ) -func TestEnvironmentOverridesDev(t *testing.T) { - b := loadEnvironment(t, "./environment_overrides", "development") +func TestEnvironmentOverridesWorkspaceDev(t *testing.T) { + b := loadEnvironment(t, "./environment_overrides/workspace", "development") assert.Equal(t, "https://development.acme.cloud.databricks.com/", b.Config.Workspace.Host) } -func TestEnvironmentOverridesStaging(t *testing.T) { - b := loadEnvironment(t, "./environment_overrides", "staging") +func TestEnvironmentOverridesWorkspaceStaging(t *testing.T) { + b := loadEnvironment(t, "./environment_overrides/workspace", "staging") assert.Equal(t, "https://staging.acme.cloud.databricks.com/", b.Config.Workspace.Host) } + +func TestEnvironmentOverridesResourcesDev(t *testing.T) { + b := loadEnvironment(t, "./environment_overrides/resources", "development") + assert.Equal(t, "base job", b.Config.Resources.Jobs["job1"].Name) +} + +func TestEnvironmentOverridesResourcesStaging(t *testing.T) { + b := loadEnvironment(t, "./environment_overrides/resources", "staging") + assert.Equal(t, "staging job", b.Config.Resources.Jobs["job1"].Name) +} From 5cdaacacc37b60bbefd479284577ab60d224d45c Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 15 Aug 2023 15:39:32 +0200 Subject: [PATCH 018/310] Locked terraform binary version to <= 1.5.5 (#666) ## Changes Locked terraform binary version to <= 1.5.5 --- bundle/deploy/terraform/init.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index eb3e99d1..924c1f09 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -57,7 +57,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con // Download Terraform to private bin directory. installer := &releases.LatestVersion{ Product: product.Terraform, - Constraints: version.MustConstraints(version.NewConstraint("<2.0")), + Constraints: version.MustConstraints(version.NewConstraint("<=1.5.5")), InstallDir: binDir, } execPath, err = installer.Install(ctx) From 6e708da6fca43ba2c2c15e849a6e09e122afc687 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 15 Aug 2023 15:50:40 +0200 Subject: [PATCH 019/310] Upgraded Go version to 1.21 (#664) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes Upgraded Go version to 1.21 Upgraded to use `slices` and `slog` from core instead of experimental. Still use `exp/maps` as our code relies on `maps.Keys` which is not part of core package and therefore refactoring required. ### Tests Integration tests passed ``` [DEBUG] Test execution command: /opt/homebrew/opt/go@1.21/bin/go test ./... -json -timeout 1h -run ^TestAcc [DEBUG] Test execution directory: /Users/andrew.nester/cli 2023/08/15 13:20:51 [INFO] ✅ TestAccAlertsCreateErrWhenNoArguments (2.150s) 2023/08/15 13:20:52 [INFO] ✅ TestAccApiGet (0.580s) 2023/08/15 13:20:53 [INFO] ✅ TestAccClustersList (0.900s) 2023/08/15 13:20:54 [INFO] ✅ TestAccClustersGet (0.870s) 2023/08/15 13:21:06 [INFO] ✅ TestAccFilerWorkspaceFilesReadWrite (11.980s) 2023/08/15 13:21:13 [INFO] ✅ TestAccFilerWorkspaceFilesReadDir (7.060s) 2023/08/15 13:21:25 [INFO] ✅ TestAccFilerDbfsReadWrite (12.810s) 2023/08/15 13:21:33 [INFO] ✅ TestAccFilerDbfsReadDir (7.380s) 2023/08/15 13:21:41 [INFO] ✅ TestAccFilerWorkspaceNotebookConflict (7.760s) 2023/08/15 13:21:49 [INFO] ✅ TestAccFilerWorkspaceNotebookWithOverwriteFlag (8.660s) 2023/08/15 13:21:49 [INFO] ✅ TestAccFilerLocalReadWrite (0.020s) 2023/08/15 13:21:49 [INFO] ✅ TestAccFilerLocalReadDir (0.010s) 2023/08/15 13:21:52 [INFO] ✅ TestAccFsCatForDbfs (3.190s) 2023/08/15 13:21:53 [INFO] ✅ TestAccFsCatForDbfsOnNonExistentFile (0.890s) 2023/08/15 13:21:54 [INFO] ✅ TestAccFsCatForDbfsInvalidScheme (0.600s) 2023/08/15 13:21:57 [INFO] ✅ TestAccFsCatDoesNotSupportOutputModeJson (2.960s) 2023/08/15 13:22:28 [INFO] ✅ TestAccFsCpDir (31.480s) 2023/08/15 13:22:43 [INFO] ✅ TestAccFsCpFileToFile (14.530s) 2023/08/15 13:22:58 [INFO] ✅ TestAccFsCpFileToDir (14.610s) 2023/08/15 13:23:29 [INFO] ✅ TestAccFsCpDirToDirFileNotOverwritten (31.810s) 2023/08/15 13:23:47 [INFO] ✅ TestAccFsCpFileToDirFileNotOverwritten (17.500s) 2023/08/15 13:24:04 [INFO] ✅ TestAccFsCpFileToFileFileNotOverwritten (17.260s) 2023/08/15 13:24:37 [INFO] ✅ TestAccFsCpDirToDirWithOverwriteFlag (32.690s) 2023/08/15 13:24:56 [INFO] ✅ TestAccFsCpFileToFileWithOverwriteFlag (19.290s) 2023/08/15 13:25:15 [INFO] ✅ TestAccFsCpFileToDirWithOverwriteFlag (19.230s) 2023/08/15 13:25:17 [INFO] ✅ TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag (2.010s) 2023/08/15 13:25:18 [INFO] ✅ TestAccFsCpErrorsOnInvalidScheme (0.610s) 2023/08/15 13:25:33 [INFO] ✅ TestAccFsCpSourceIsDirectoryButTargetIsFile (14.900s) 2023/08/15 13:25:37 [INFO] ✅ TestAccFsLsForDbfs (3.770s) 2023/08/15 13:25:41 [INFO] ✅ TestAccFsLsForDbfsWithAbsolutePaths (4.160s) 2023/08/15 13:25:44 [INFO] ✅ TestAccFsLsForDbfsOnFile (2.990s) 2023/08/15 13:25:46 [INFO] ✅ TestAccFsLsForDbfsOnEmptyDir (1.870s) 2023/08/15 13:25:46 [INFO] ✅ TestAccFsLsForDbfsForNonexistingDir (0.850s) 2023/08/15 13:25:47 [INFO] ✅ TestAccFsLsWithoutScheme (0.560s) 2023/08/15 13:25:49 [INFO] ✅ TestAccFsMkdirCreatesDirectory (2.310s) 2023/08/15 13:25:52 [INFO] ✅ TestAccFsMkdirCreatesMultipleDirectories (2.920s) 2023/08/15 13:25:55 [INFO] ✅ TestAccFsMkdirWhenDirectoryAlreadyExists (2.320s) 2023/08/15 13:25:57 [INFO] ✅ TestAccFsMkdirWhenFileExistsAtPath (2.820s) 2023/08/15 13:26:01 [INFO] ✅ TestAccFsRmForFile (4.030s) 2023/08/15 13:26:05 [INFO] ✅ TestAccFsRmForEmptyDirectory (3.530s) 2023/08/15 13:26:08 [INFO] ✅ TestAccFsRmForNonEmptyDirectory (3.190s) 2023/08/15 13:26:09 [INFO] ✅ TestAccFsRmForNonExistentFile (0.830s) 2023/08/15 13:26:13 [INFO] ✅ TestAccFsRmForNonEmptyDirectoryWithRecursiveFlag (3.580s) 2023/08/15 13:26:13 [INFO] ✅ TestAccGitClone (0.800s) 2023/08/15 13:26:14 [INFO] ✅ TestAccGitCloneWithOnlyRepoNameOnAlternateBranch (0.790s) 2023/08/15 13:26:15 [INFO] ✅ TestAccGitCloneErrorsWhenRepositoryDoesNotExist (0.540s) 2023/08/15 13:26:23 [INFO] ✅ TestAccLock (8.630s) 2023/08/15 13:26:27 [INFO] ✅ TestAccLockUnlockWithoutAllowsLockFileNotExist (3.490s) 2023/08/15 13:26:30 [INFO] ✅ TestAccLockUnlockWithAllowsLockFileNotExist (3.130s) 2023/08/15 13:26:39 [INFO] ✅ TestAccSyncFullFileSync (9.370s) 2023/08/15 13:26:50 [INFO] ✅ TestAccSyncIncrementalFileSync (10.390s) 2023/08/15 13:27:00 [INFO] ✅ TestAccSyncNestedFolderSync (10.680s) 2023/08/15 13:27:11 [INFO] ✅ TestAccSyncNestedFolderDoesntFailOnNonEmptyDirectory (10.970s) 2023/08/15 13:27:22 [INFO] ✅ TestAccSyncNestedSpacePlusAndHashAreEscapedSync (10.930s) 2023/08/15 13:27:29 [INFO] ✅ TestAccSyncIncrementalFileOverwritesFolder (7.020s) 2023/08/15 13:27:37 [INFO] ✅ TestAccSyncIncrementalSyncPythonNotebookToFile (7.380s) 2023/08/15 13:27:43 [INFO] ✅ TestAccSyncIncrementalSyncFileToPythonNotebook (6.050s) 2023/08/15 13:27:48 [INFO] ✅ TestAccSyncIncrementalSyncPythonNotebookDelete (5.390s) 2023/08/15 13:27:51 [INFO] ✅ TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist (2.570s) 2023/08/15 13:27:56 [INFO] ✅ TestAccSyncEnsureRemotePathIsUsableIfRepoExists (5.540s) 2023/08/15 13:27:58 [INFO] ✅ TestAccSyncEnsureRemotePathIsUsableInWorkspace (1.840s) 2023/08/15 13:27:59 [INFO] ✅ TestAccWorkspaceList (0.790s) 2023/08/15 13:28:08 [INFO] ✅ TestAccExportDir (8.860s) 2023/08/15 13:28:11 [INFO] ✅ TestAccExportDirDoesNotOverwrite (3.090s) 2023/08/15 13:28:14 [INFO] ✅ TestAccExportDirWithOverwriteFlag (3.500s) 2023/08/15 13:28:23 [INFO] ✅ TestAccImportDir (8.330s) 2023/08/15 13:28:34 [INFO] ✅ TestAccImportDirDoesNotOverwrite (10.970s) 2023/08/15 13:28:44 [INFO] ✅ TestAccImportDirWithOverwriteFlag (10.130s) 2023/08/15 13:28:44 [INFO] ✅ 68/68 passed, 0 failed, 3 skipped ``` --- .github/workflows/push.yml | 6 ++---- .github/workflows/release-snapshot.yml | 2 +- .github/workflows/release.yml | 2 +- bundle/artifacts/all.go | 3 ++- bundle/config/interpolation/interpolation.go | 3 ++- bundle/config/interpolation/lookup.go | 3 +-- bundle/config/mutator/process_root_includes.go | 2 +- bundle/internal/tf/codegen/generator/util.go | 3 ++- bundle/internal/tf/codegen/generator/walker.go | 3 ++- cmd/root/logger.go | 11 ++++++----- cmd/root/root.go | 3 ++- go.mod | 2 +- go.sum | 15 +++++++++++++++ internal/acc/helpers.go | 2 -- internal/helpers.go | 1 - libs/auth/oauth.go | 3 +-- libs/cmdio/io.go | 6 +++--- libs/filer/dbfs_client.go | 2 +- libs/filer/files_client.go | 2 +- libs/filer/local_client.go | 3 +-- libs/filer/slice.go | 2 +- libs/filer/workspace_files_client.go | 2 +- libs/flags/log_level_flag.go | 2 +- libs/locker/locker.go | 2 +- libs/log/context.go | 2 +- libs/log/levels.go | 2 +- libs/log/logger.go | 2 +- libs/log/replace_attr.go | 2 +- libs/log/replace_attr_test.go | 2 +- libs/log/sdk.go | 3 ++- libs/log/source.go | 3 +-- libs/log/source_test.go | 2 +- libs/template/renderer.go | 2 +- libs/template/validators.go | 2 +- 34 files changed, 60 insertions(+), 47 deletions(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index c47cfc72..6f14fe88 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -28,7 +28,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: 1.19.5 + go-version: 1.21.0 cache: true - name: Set go env @@ -56,9 +56,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - # Use 1.19 because of godoc formatting. - # See https://tip.golang.org/doc/go1.19#go-doc. - go-version: 1.19 + go-version: 1.21 # No need to download cached dependencies when running gofmt. cache: false diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index 3acb6b5a..130d49dd 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -22,7 +22,7 @@ jobs: id: go uses: actions/setup-go@v3 with: - go-version: 1.19.5 + go-version: 1.21.0 - name: Locate cache paths id: cache diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c1ecef01..5992dcb4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -21,7 +21,7 @@ jobs: id: go uses: actions/setup-go@v3 with: - go-version: 1.19.5 + go-version: 1.21.0 - name: Locate cache paths id: cache diff --git a/bundle/artifacts/all.go b/bundle/artifacts/all.go index b6a3f7dc..1a1661e5 100644 --- a/bundle/artifacts/all.go +++ b/bundle/artifacts/all.go @@ -4,9 +4,10 @@ import ( "context" "fmt" + "slices" + "github.com/databricks/cli/bundle" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // all is an internal proxy for producing a list of mutators for all artifacts. diff --git a/bundle/config/interpolation/interpolation.go b/bundle/config/interpolation/interpolation.go index bf19804a..bf5bd169 100644 --- a/bundle/config/interpolation/interpolation.go +++ b/bundle/config/interpolation/interpolation.go @@ -9,10 +9,11 @@ import ( "sort" "strings" + "slices" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/variable" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) const Delimiter = "." diff --git a/bundle/config/interpolation/lookup.go b/bundle/config/interpolation/lookup.go index 932d739e..3dc5047a 100644 --- a/bundle/config/interpolation/lookup.go +++ b/bundle/config/interpolation/lookup.go @@ -3,9 +3,8 @@ package interpolation import ( "errors" "fmt" + "slices" "strings" - - "golang.org/x/exp/slices" ) // LookupFunction returns the value to rewrite a path expression to. diff --git a/bundle/config/mutator/process_root_includes.go b/bundle/config/mutator/process_root_includes.go index c2dffc6e..98992872 100644 --- a/bundle/config/mutator/process_root_includes.go +++ b/bundle/config/mutator/process_root_includes.go @@ -5,11 +5,11 @@ import ( "fmt" "os" "path/filepath" + "slices" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "golang.org/x/exp/slices" ) // Get extra include paths from environment variable diff --git a/bundle/internal/tf/codegen/generator/util.go b/bundle/internal/tf/codegen/generator/util.go index 890417d8..6e703a70 100644 --- a/bundle/internal/tf/codegen/generator/util.go +++ b/bundle/internal/tf/codegen/generator/util.go @@ -1,8 +1,9 @@ package generator import ( + "slices" + "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // sortKeys returns a sorted copy of the keys in the specified map. diff --git a/bundle/internal/tf/codegen/generator/walker.go b/bundle/internal/tf/codegen/generator/walker.go index 9532e0e4..2ed044c3 100644 --- a/bundle/internal/tf/codegen/generator/walker.go +++ b/bundle/internal/tf/codegen/generator/walker.go @@ -4,10 +4,11 @@ import ( "fmt" "strings" + "slices" + tfjson "github.com/hashicorp/terraform-json" "github.com/iancoleman/strcase" "github.com/zclconf/go-cty/cty" - "golang.org/x/exp/slices" ) type field struct { diff --git a/cmd/root/logger.go b/cmd/root/logger.go index 87f69550..ddfae445 100644 --- a/cmd/root/logger.go +++ b/cmd/root/logger.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "log/slog" "os" "github.com/databricks/cli/libs/cmdio" @@ -11,7 +12,6 @@ import ( "github.com/databricks/cli/libs/log" "github.com/fatih/color" "github.com/spf13/cobra" - "golang.org/x/exp/slog" ) const ( @@ -52,11 +52,12 @@ func (l *friendlyHandler) coloredLevel(rec slog.Record) string { func (l *friendlyHandler) Handle(ctx context.Context, rec slog.Record) error { t := fmt.Sprintf("%02d:%02d", rec.Time.Hour(), rec.Time.Minute()) attrs := "" - rec.Attrs(func(a slog.Attr) { + rec.Attrs(func(a slog.Attr) bool { attrs += fmt.Sprintf(" %s%s%s", color.CyanString(a.Key), color.CyanString("="), color.YellowString(a.Value.String())) + return true }) msg := fmt.Sprintf("%s %s %s%s\n", color.MagentaString(t), @@ -76,16 +77,16 @@ type logFlags struct { func (f *logFlags) makeLogHandler(opts slog.HandlerOptions) (slog.Handler, error) { switch f.output { case flags.OutputJSON: - return opts.NewJSONHandler(f.file.Writer()), nil + return slog.NewJSONHandler(f.file.Writer(), &opts), nil case flags.OutputText: w := f.file.Writer() if cmdio.IsTTY(w) { return &friendlyHandler{ - Handler: opts.NewTextHandler(w), + Handler: slog.NewTextHandler(w, &opts), w: w, }, nil } - return opts.NewTextHandler(w), nil + return slog.NewTextHandler(w, &opts), nil default: return nil, fmt.Errorf("invalid log output mode: %s", f.output) diff --git a/cmd/root/root.go b/cmd/root/root.go index 0a18594a..48868b41 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -6,11 +6,12 @@ import ( "os" "strings" + "log/slog" + "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/log" "github.com/spf13/cobra" - "golang.org/x/exp/slog" ) func New() *cobra.Command { diff --git a/go.mod b/go.mod index c3efa91b..9534a4c9 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/databricks/cli -go 1.18 +go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 diff --git a/go.sum b/go.sum index 1edb3b48..b8c90e5e 100644 --- a/go.sum +++ b/go.sum @@ -6,9 +6,11 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= +github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= @@ -40,6 +42,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -52,8 +55,11 @@ github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBD github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= +github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= github.com/go-git/go-git/v5 v5.6.1 h1:q4ZRqQl4pR/ZJHc1L5CFjGA1a10u76aV1iC+nh+bHsk= +github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC2MDs4ee8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -84,6 +90,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= @@ -94,6 +101,7 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= @@ -110,7 +118,9 @@ github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -124,6 +134,7 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/nwidger/jsoncolor v0.3.2 h1:rVJJlwAWDJShnbTYOQ5RM7yTA20INyKXlJ/fg4JMhHQ= github.com/nwidger/jsoncolor v0.3.2/go.mod h1:Cs34umxLbJvgBMnVNVqhji9BhoT/N/KinHqZptQ7cf4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -134,7 +145,9 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= +github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -153,6 +166,7 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/whilp/git-urls v1.0.0 h1:95f6UMWN5FKW71ECsXRUd3FVYiXdrE7aX4NZKcPmIjU= github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgwk/qCfE= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0= github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= @@ -283,6 +297,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/internal/acc/helpers.go b/internal/acc/helpers.go index aa990274..f9800134 100644 --- a/internal/acc/helpers.go +++ b/internal/acc/helpers.go @@ -6,7 +6,6 @@ import ( "os" "strings" "testing" - "time" ) // GetEnvOrSkipTest proceeds with test only with that env variable. @@ -22,7 +21,6 @@ const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" // RandomName gives random name with optional prefix. e.g. qa.RandomName("tf-") func RandomName(prefix ...string) string { - rand.Seed(time.Now().UnixNano()) randLen := 12 b := make([]byte, randLen) for i := range b { diff --git a/internal/helpers.go b/internal/helpers.go index 194f0eee..ddc00517 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -37,7 +37,6 @@ func GetEnvOrSkipTest(t *testing.T, name string) string { // RandomName gives random name with optional prefix. e.g. qa.RandomName("tf-") func RandomName(prefix ...string) string { - rand.Seed(time.Now().UnixNano()) randLen := 12 b := make([]byte, randLen) for i := range b { diff --git a/libs/auth/oauth.go b/libs/auth/oauth.go index b7e0ce2f..dd27d04b 100644 --- a/libs/auth/oauth.go +++ b/libs/auth/oauth.go @@ -2,6 +2,7 @@ package auth import ( "context" + "crypto/rand" "crypto/sha256" _ "embed" "encoding/base64" @@ -9,7 +10,6 @@ import ( "errors" "fmt" "io" - "math/rand" "net" "net/http" "strings" @@ -255,7 +255,6 @@ func (a *PersistentAuth) stateAndPKCE() (string, *authhandler.PKCEParams) { } func (a *PersistentAuth) randomString(size int) string { - rand.Seed(time.Now().UnixNano()) raw := make([]byte, size) _, _ = rand.Read(raw) return base64.RawURLEncoding.EncodeToString(raw) diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index a60231c0..bc5a5f30 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "slices" "strings" "time" @@ -12,7 +13,6 @@ import ( "github.com/databricks/cli/libs/flags" "github.com/manifoldco/promptui" "github.com/mattn/go-isatty" - "golang.org/x/exp/slices" ) // cmdIO is the private instance, that is not supposed to be accessed @@ -140,8 +140,8 @@ func (c *cmdIO) Select(names map[string]string, label string) (id string, err er for k, v := range names { items = append(items, tuple{k, v}) } - slices.SortFunc(items, func(a, b tuple) bool { - return a.Name < b.Name + slices.SortFunc(items, func(a, b tuple) int { + return strings.Compare(a.Name, b.Name) }) idx, _, err := (&promptui.Select{ Label: label, diff --git a/libs/filer/dbfs_client.go b/libs/filer/dbfs_client.go index 64eb4b77..38e8f9f3 100644 --- a/libs/filer/dbfs_client.go +++ b/libs/filer/dbfs_client.go @@ -7,6 +7,7 @@ import ( "io/fs" "net/http" "path" + "slices" "sort" "strings" "time" @@ -14,7 +15,6 @@ import ( "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/files" - "golang.org/x/exp/slices" ) // Type that implements fs.DirEntry for DBFS. diff --git a/libs/filer/files_client.go b/libs/filer/files_client.go index ee7587dc..285338b6 100644 --- a/libs/filer/files_client.go +++ b/libs/filer/files_client.go @@ -10,13 +10,13 @@ import ( "net/http" "net/url" "path" + "slices" "strings" "time" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/client" - "golang.org/x/exp/slices" ) // Type that implements fs.FileInfo for the Files API. diff --git a/libs/filer/local_client.go b/libs/filer/local_client.go index 8d960c84..958b6277 100644 --- a/libs/filer/local_client.go +++ b/libs/filer/local_client.go @@ -6,8 +6,7 @@ import ( "io/fs" "os" "path/filepath" - - "golang.org/x/exp/slices" + "slices" ) // LocalClient implements the [Filer] interface for the local filesystem. diff --git a/libs/filer/slice.go b/libs/filer/slice.go index c35d6e78..077bb305 100644 --- a/libs/filer/slice.go +++ b/libs/filer/slice.go @@ -1,6 +1,6 @@ package filer -import "golang.org/x/exp/slices" +import "slices" // sliceWithout returns a copy of the specified slice without element e, if it is present. func sliceWithout[S []E, E comparable](s S, e E) S { diff --git a/libs/filer/workspace_files_client.go b/libs/filer/workspace_files_client.go index db06f91c..ed4ad7a2 100644 --- a/libs/filer/workspace_files_client.go +++ b/libs/filer/workspace_files_client.go @@ -11,6 +11,7 @@ import ( "net/url" "path" "regexp" + "slices" "sort" "strings" "time" @@ -19,7 +20,6 @@ import ( "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/service/workspace" - "golang.org/x/exp/slices" ) // Type that implements fs.DirEntry for WSFS. diff --git a/libs/flags/log_level_flag.go b/libs/flags/log_level_flag.go index f5d305a5..836d84b7 100644 --- a/libs/flags/log_level_flag.go +++ b/libs/flags/log_level_flag.go @@ -2,12 +2,12 @@ package flags import ( "fmt" + "log/slog" "strings" "github.com/databricks/cli/libs/log" "github.com/spf13/cobra" "golang.org/x/exp/maps" - "golang.org/x/exp/slog" ) var levels = map[string]slog.Level{ diff --git a/libs/locker/locker.go b/libs/locker/locker.go index bb95b784..66993156 100644 --- a/libs/locker/locker.go +++ b/libs/locker/locker.go @@ -8,12 +8,12 @@ import ( "fmt" "io" "io/fs" + "slices" "time" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/google/uuid" - "golang.org/x/exp/slices" ) type UnlockOption int diff --git a/libs/log/context.go b/libs/log/context.go index 7ed1d292..d9e31d11 100644 --- a/libs/log/context.go +++ b/libs/log/context.go @@ -3,7 +3,7 @@ package log import ( "context" - "golang.org/x/exp/slog" + "log/slog" ) type logger int diff --git a/libs/log/levels.go b/libs/log/levels.go index f6277cf3..cdb5a1e1 100644 --- a/libs/log/levels.go +++ b/libs/log/levels.go @@ -1,6 +1,6 @@ package log -import "golang.org/x/exp/slog" +import "log/slog" const ( LevelTrace slog.Level = -8 diff --git a/libs/log/logger.go b/libs/log/logger.go index 80d8782c..43a30e92 100644 --- a/libs/log/logger.go +++ b/libs/log/logger.go @@ -6,7 +6,7 @@ import ( "runtime" "time" - "golang.org/x/exp/slog" + "log/slog" ) // GetLogger returns either the logger configured on the context, diff --git a/libs/log/replace_attr.go b/libs/log/replace_attr.go index 55d2c15f..b71e47d2 100644 --- a/libs/log/replace_attr.go +++ b/libs/log/replace_attr.go @@ -1,6 +1,6 @@ package log -import "golang.org/x/exp/slog" +import "log/slog" type ReplaceAttrFunction func(groups []string, a slog.Attr) slog.Attr diff --git a/libs/log/replace_attr_test.go b/libs/log/replace_attr_test.go index dce11be1..afedeaa6 100644 --- a/libs/log/replace_attr_test.go +++ b/libs/log/replace_attr_test.go @@ -1,10 +1,10 @@ package log import ( + "log/slog" "testing" "github.com/stretchr/testify/assert" - "golang.org/x/exp/slog" ) func testReplaceA(groups []string, a slog.Attr) slog.Attr { diff --git a/libs/log/sdk.go b/libs/log/sdk.go index 975f83aa..e1b1ffed 100644 --- a/libs/log/sdk.go +++ b/libs/log/sdk.go @@ -6,8 +6,9 @@ import ( "runtime" "time" + "log/slog" + sdk "github.com/databricks/databricks-sdk-go/logger" - "golang.org/x/exp/slog" ) // slogAdapter makes an slog.Logger usable with the Databricks SDK. diff --git a/libs/log/source.go b/libs/log/source.go index 4a30aaab..d0fd30dc 100644 --- a/libs/log/source.go +++ b/libs/log/source.go @@ -1,9 +1,8 @@ package log import ( + "log/slog" "path/filepath" - - "golang.org/x/exp/slog" ) // ReplaceSourceAttr rewrites the source attribute to include only the file's basename. diff --git a/libs/log/source_test.go b/libs/log/source_test.go index 010aad5a..5c587af6 100644 --- a/libs/log/source_test.go +++ b/libs/log/source_test.go @@ -1,10 +1,10 @@ package log import ( + "log/slog" "testing" "github.com/stretchr/testify/assert" - "golang.org/x/exp/slog" ) func TestReplaceSourceAttrSourceKey(t *testing.T) { diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 76479c05..9be1b58e 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -8,13 +8,13 @@ import ( "os" "path" "path/filepath" + "slices" "strings" "text/template" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/logger" - "golang.org/x/exp/slices" ) const templateExtension = ".tmpl" diff --git a/libs/template/validators.go b/libs/template/validators.go index 57eda093..209700b6 100644 --- a/libs/template/validators.go +++ b/libs/template/validators.go @@ -3,9 +3,9 @@ package template import ( "fmt" "reflect" + "slices" "github.com/databricks/cli/libs/jsonschema" - "golang.org/x/exp/slices" ) type validator func(v any) error From 878bb6deaed425d5d5c47b7330f47209b2ca71ca Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 15 Aug 2023 16:28:04 +0200 Subject: [PATCH 020/310] Return better error messages for invalid JSON schema types in templates (#661) ## Changes Adds a function to validate json schema types added by the author. The default json unmarshaller does not validate that the parsed type matches the enum defined in `jsonschema.Type` Includes some other improvements to provide better error messages. This PR was prompted by usability difficulties reported by @mingyu89 during mlops stack migration. ## Tests Unit tests --- libs/jsonschema/schema.go | 37 ++++++++++++++++++++++++++++ libs/jsonschema/schema_test.go | 44 ++++++++++++++++++++++++++++++++++ libs/template/config.go | 15 ++++++++---- libs/template/config_test.go | 31 ++++++++++++++++++++++++ libs/template/utils.go | 8 +++++-- libs/template/utils_test.go | 6 +++++ 6 files changed, 135 insertions(+), 6 deletions(-) create mode 100644 libs/jsonschema/schema_test.go diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index 49e31bb7..c0d1736c 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -1,5 +1,11 @@ package jsonschema +import ( + "encoding/json" + "fmt" + "os" +) + // defines schema for a json object type Schema struct { // Type of the object @@ -47,3 +53,34 @@ const ( ArrayType Type = "array" IntegerType Type = "integer" ) + +func (schema *Schema) validate() error { + for _, v := range schema.Properties { + switch v.Type { + case NumberType, BooleanType, StringType, IntegerType: + continue + case "int", "int32", "int64": + return fmt.Errorf("type %s is not a recognized json schema type. Please use \"integer\" instead", v.Type) + case "float", "float32", "float64": + return fmt.Errorf("type %s is not a recognized json schema type. Please use \"number\" instead", v.Type) + case "bool": + return fmt.Errorf("type %s is not a recognized json schema type. Please use \"boolean\" instead", v.Type) + default: + return fmt.Errorf("type %s is not a recognized json schema type", v.Type) + } + } + return nil +} + +func Load(path string) (*Schema, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + schema := &Schema{} + err = json.Unmarshal(b, schema) + if err != nil { + return nil, err + } + return schema, schema.validate() +} diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go new file mode 100644 index 00000000..76112492 --- /dev/null +++ b/libs/jsonschema/schema_test.go @@ -0,0 +1,44 @@ +package jsonschema + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestJsonSchemaValidate(t *testing.T) { + var err error + toSchema := func(s string) *Schema { + return &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: Type(s), + }, + }, + } + } + + err = toSchema("string").validate() + assert.NoError(t, err) + + err = toSchema("boolean").validate() + assert.NoError(t, err) + + err = toSchema("number").validate() + assert.NoError(t, err) + + err = toSchema("integer").validate() + assert.NoError(t, err) + + err = toSchema("int").validate() + assert.EqualError(t, err, "type int is not a recognized json schema type. Please use \"integer\" instead") + + err = toSchema("float").validate() + assert.EqualError(t, err, "type float is not a recognized json schema type. Please use \"number\" instead") + + err = toSchema("bool").validate() + assert.EqualError(t, err, "type bool is not a recognized json schema type. Please use \"boolean\" instead") + + err = toSchema("foobar").validate() + assert.EqualError(t, err, "type foobar is not a recognized json schema type") +} diff --git a/libs/template/config.go b/libs/template/config.go index ee5fcbef..173244b0 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -18,13 +18,11 @@ type config struct { func newConfig(ctx context.Context, schemaPath string) (*config, error) { // Read config schema - schemaBytes, err := os.ReadFile(schemaPath) + schema, err := jsonschema.Load(schemaPath) if err != nil { return nil, err } - schema := &jsonschema.Schema{} - err = json.Unmarshal(schemaBytes, schema) - if err != nil { + if err := validateSchema(schema); err != nil { return nil, err } @@ -36,6 +34,15 @@ func newConfig(ctx context.Context, schemaPath string) (*config, error) { }, nil } +func validateSchema(schema *jsonschema.Schema) error { + for _, v := range schema.Properties { + if v.Type == jsonschema.ArrayType || v.Type == jsonschema.ObjectType { + return fmt.Errorf("property type %s is not supported by bundle templates", v.Type) + } + } + return nil +} + // Reads json file at path and assigns values from the file func (c *config) assignValuesFromFile(path string) error { // Read the config file diff --git a/libs/template/config_test.go b/libs/template/config_test.go index 7b8341ec..33524246 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -161,3 +161,34 @@ func TestTemplateConfigValidateTypeForInvalidType(t *testing.T) { err = c.validate() assert.EqualError(t, err, `incorrect type for int_val. expected type integer, but value is "this-should-be-an-int"`) } + +func TestTemplateValidateSchema(t *testing.T) { + var err error + toSchema := func(s string) *jsonschema.Schema { + return &jsonschema.Schema{ + Properties: map[string]*jsonschema.Schema{ + "foo": { + Type: jsonschema.Type(s), + }, + }, + } + } + + err = validateSchema(toSchema("string")) + assert.NoError(t, err) + + err = validateSchema(toSchema("boolean")) + assert.NoError(t, err) + + err = validateSchema(toSchema("number")) + assert.NoError(t, err) + + err = validateSchema(toSchema("integer")) + assert.NoError(t, err) + + err = validateSchema(toSchema("object")) + assert.EqualError(t, err, "property type object is not supported by bundle templates") + + err = validateSchema(toSchema("array")) + assert.EqualError(t, err, "property type array is not supported by bundle templates") +} diff --git a/libs/template/utils.go b/libs/template/utils.go index bf11ed86..ade6a573 100644 --- a/libs/template/utils.go +++ b/libs/template/utils.go @@ -66,8 +66,10 @@ func toString(v any, T jsonschema.Type) (string, error) { return "", err } return strconv.FormatInt(intVal, 10), nil - default: + case jsonschema.ArrayType, jsonschema.ObjectType: return "", fmt.Errorf("cannot format object of type %s as a string. Value of object: %#v", T, v) + default: + return "", fmt.Errorf("unknown json schema type: %q", T) } } @@ -87,8 +89,10 @@ func fromString(s string, T jsonschema.Type) (any, error) { v, err = strconv.ParseFloat(s, 32) case jsonschema.IntegerType: v, err = strconv.ParseInt(s, 10, 64) - default: + case jsonschema.ArrayType, jsonschema.ObjectType: return "", fmt.Errorf("cannot parse string as object of type %s. Value of string: %q", T, s) + default: + return "", fmt.Errorf("unknown json schema type: %q", T) } // Return more readable error incase of a syntax error diff --git a/libs/template/utils_test.go b/libs/template/utils_test.go index 5fe70243..1e038aac 100644 --- a/libs/template/utils_test.go +++ b/libs/template/utils_test.go @@ -80,6 +80,9 @@ func TestTemplateToString(t *testing.T) { _, err = toString("abc", jsonschema.IntegerType) assert.EqualError(t, err, "cannot convert \"abc\" to an integer") + + _, err = toString("abc", "foobar") + assert.EqualError(t, err, "unknown json schema type: \"foobar\"") } func TestTemplateFromString(t *testing.T) { @@ -112,4 +115,7 @@ func TestTemplateFromString(t *testing.T) { _, err = fromString("1.0", jsonschema.IntegerType) assert.EqualError(t, err, "could not parse \"1.0\" as a integer: strconv.ParseInt: parsing \"1.0\": invalid syntax") + + _, err = fromString("1.0", "foobar") + assert.EqualError(t, err, "unknown json schema type: \"foobar\"") } From 61b103318fabfce36bdc5271452afa3c9a94e89c Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 15 Aug 2023 16:50:20 +0200 Subject: [PATCH 021/310] Use custom prompter for bundle template inputs (#663) ## Changes Prompt UI glitches often. We are switching to a custom implementation of a simple prompter which is much more stable. This also allows new lines in prompts which has been an ask by the mlflow team. ## Tests Tested manually --- bundle/deploy/files/delete.go | 2 +- bundle/deploy/terraform/destroy.go | 2 +- libs/cmdio/logger.go | 54 ++++++++++++++++++++++++------ libs/cmdio/logger_test.go | 2 +- libs/template/config.go | 12 +++---- 5 files changed, 50 insertions(+), 22 deletions(-) diff --git a/bundle/deploy/files/delete.go b/bundle/deploy/files/delete.go index 990eca47..9f7ad4d4 100644 --- a/bundle/deploy/files/delete.go +++ b/bundle/deploy/files/delete.go @@ -27,7 +27,7 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error { red := color.New(color.FgRed).SprintFunc() if !b.AutoApprove { - proceed, err := cmdio.Ask(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?", b.Config.Workspace.RootPath, red("deleted permanently!"))) + proceed, err := cmdio.AskYesOrNo(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?", b.Config.Workspace.RootPath, red("deleted permanently!"))) if err != nil { return err } diff --git a/bundle/deploy/terraform/destroy.go b/bundle/deploy/terraform/destroy.go index 649542f6..0b3baba3 100644 --- a/bundle/deploy/terraform/destroy.go +++ b/bundle/deploy/terraform/destroy.go @@ -89,7 +89,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { // Ask for confirmation, if needed if !b.Plan.ConfirmApply { red := color.New(color.FgRed).SprintFunc() - b.Plan.ConfirmApply, err = cmdio.Ask(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed?", red("destroy"))) + b.Plan.ConfirmApply, err = cmdio.AskYesOrNo(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed?", red("destroy"))) if err != nil { return err } diff --git a/libs/cmdio/logger.go b/libs/cmdio/logger.go index 3190a6a7..0663306e 100644 --- a/libs/cmdio/logger.go +++ b/libs/cmdio/logger.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "os" + "strings" "github.com/databricks/cli/libs/flags" ) @@ -74,33 +75,64 @@ func LogError(ctx context.Context, err error) { }) } -func Ask(ctx context.Context, question string) (bool, error) { +func Ask(ctx context.Context, question, defaultVal string) (string, error) { logger, ok := FromContext(ctx) if !ok { logger = Default() } - return logger.Ask(question) + return logger.Ask(question, defaultVal) } -func (l *Logger) Ask(question string) (bool, error) { - if l.Mode == flags.ModeJson { - return false, fmt.Errorf("question prompts are not supported in json mode") +func AskYesOrNo(ctx context.Context, question string) (bool, error) { + logger, ok := FromContext(ctx) + if !ok { + logger = Default() } // Add acceptable answers to the question prompt. - question += ` [y/n]:` - l.Writer.Write([]byte(question)) - ans, err := l.Reader.ReadString('\n') + question += ` [y/n]` + // Ask the question + ans, err := logger.Ask(question, "") if err != nil { return false, err } - if ans == "y\n" { + if ans == "y" { return true, nil - } else { - return false, nil } + return false, nil +} + +func (l *Logger) Ask(question string, defaultVal string) (string, error) { + if l.Mode == flags.ModeJson { + return "", fmt.Errorf("question prompts are not supported in json mode") + } + + // Add default value to question prompt. + if defaultVal != "" { + question += fmt.Sprintf(` [%s]`, defaultVal) + } + question += `: ` + + // print prompt + _, err := l.Writer.Write([]byte(question)) + if err != nil { + return "", err + } + + // read user input. Trim new line characters + ans, err := l.Reader.ReadString('\n') + if err != nil { + return "", err + } + ans = strings.Trim(ans, "\n\r") + + // Return default value if user just presses enter + if ans == "" { + return defaultVal, nil + } + return ans, nil } func (l *Logger) writeJson(event Event) { diff --git a/libs/cmdio/logger_test.go b/libs/cmdio/logger_test.go index ff715b11..da619046 100644 --- a/libs/cmdio/logger_test.go +++ b/libs/cmdio/logger_test.go @@ -9,6 +9,6 @@ import ( func TestAskFailedInJsonMode(t *testing.T) { l := NewLogger(flags.ModeJson) - _, err := l.Ask("What is your spirit animal?") + _, err := l.Ask("What is your spirit animal?", "") assert.ErrorContains(t, err, "question prompts are not supported in json mode") } diff --git a/libs/template/config.go b/libs/template/config.go index 173244b0..302a1361 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -123,22 +123,18 @@ func (c *config) promptForValues() error { continue } - // Initialize Prompt dialog - var err error - prompt := cmdio.Prompt(c.ctx) - prompt.Label = property.Description - prompt.AllowEdit = true - // Compute default value to display by converting it to a string + var defaultVal string + var err error if property.Default != nil { - prompt.Default, err = toString(property.Default, property.Type) + defaultVal, err = toString(property.Default, property.Type) if err != nil { return err } } // Get user input by running the prompt - userInput, err := prompt.Run() + userInput, err := cmdio.Ask(c.ctx, property.Description, defaultVal) if err != nil { return err } From 6c644e159c290d992122e8d7dfc1760ffb1c41be Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:07:22 +0200 Subject: [PATCH 022/310] Add map and pair helper functions for bundle templates (#665) ## Changes Go text templates allows only specifying one input argument for invocations of associated templates (ie `{{template ...}}`). This PR introduces the map and pair functions which allow template authors to work around this limitation by passing multiple arguments as key value pairs in a map. This PR is based on feedback from the mlops stacks migration where otherwise a bunch of duplicate code is required for computed values and fixtures. ## Tests Unit test --- libs/template/helpers.go | 24 +++++++++++++++++++ libs/template/helpers_test.go | 15 ++++++++++++ .../testdata/map-pair/library/abc.tmpl | 3 +++ .../testdata/map-pair/template/hello.tmpl | 1 + 4 files changed, 43 insertions(+) create mode 100644 libs/template/testdata/map-pair/library/abc.tmpl create mode 100644 libs/template/testdata/map-pair/template/hello.tmpl diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 94737c1e..ac846658 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -15,6 +15,11 @@ func (err ErrFail) Error() string { return err.msg } +type pair struct { + k string + v any +} + var helperFuncs = template.FuncMap{ "fail": func(format string, args ...any) (any, error) { return nil, ErrFail{fmt.Sprintf(format, args...)} @@ -27,4 +32,23 @@ var helperFuncs = template.FuncMap{ "regexp": func(expr string) (*regexp.Regexp, error) { return regexp.Compile(expr) }, + // A key value pair. This is used with the map function to generate maps + // to use inside a template + "pair": func(k string, v any) pair { + return pair{k, v} + }, + // map converts a list of pairs to a map object. This is useful to pass multiple + // objects to templates defined in the library directory. Go text template + // syntax for invoking a template only allows specifying a single argument, + // this function can be used to workaround that limitation. + // + // For example: {{template "my_template" (map (pair "foo" $arg1) (pair "bar" $arg2))}} + // $arg1 and $arg2 can be referred from inside "my_template" as ".foo" and ".bar" + "map": func(pairs ...pair) map[string]any { + result := make(map[string]any, 0) + for _, p := range pairs { + result[p.k] = p.v + } + return result + }, } diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index 169e06f3..023eed29 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -54,3 +54,18 @@ func TestTemplateUrlFunction(t *testing.T) { assert.Len(t, r.files, 1) assert.Equal(t, "https://www.databricks.com", string(r.files[0].(*inMemoryFile).content)) } + +func TestTemplateMapPairFunction(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + r, err := newRenderer(ctx, nil, "./testdata/map-pair/template", "./testdata/map-pair/library", tmpDir) + + require.NoError(t, err) + + err = r.walk() + assert.NoError(t, err) + + assert.Len(t, r.files, 1) + assert.Equal(t, "false 123 hello 12.3", string(r.files[0].(*inMemoryFile).content)) +} diff --git a/libs/template/testdata/map-pair/library/abc.tmpl b/libs/template/testdata/map-pair/library/abc.tmpl new file mode 100644 index 00000000..387c7555 --- /dev/null +++ b/libs/template/testdata/map-pair/library/abc.tmpl @@ -0,0 +1,3 @@ +{{- define "my_template" -}} +{{- .foo}} {{.bar}} {{.abc}} {{.def -}} +{{- end -}} diff --git a/libs/template/testdata/map-pair/template/hello.tmpl b/libs/template/testdata/map-pair/template/hello.tmpl new file mode 100644 index 00000000..d0077846 --- /dev/null +++ b/libs/template/testdata/map-pair/template/hello.tmpl @@ -0,0 +1 @@ +{{template "my_template" (map (pair "foo" false) (pair "bar" 123) (pair "abc" "hello") (pair "def" 12.3)) -}} From 6a843f28efb2fb12726d327a9856be696d471c5a Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 15 Aug 2023 21:03:43 +0200 Subject: [PATCH 023/310] Correct name for force acquire deploy flag (#656) ## Changes As discussed here, the name for this flag should be `force-lock`: https://github.com/databricks/cli/pull/578#discussion_r1276233445 ## Tests Manually and existing tests --- cmd/bundle/deploy.go | 2 +- internal/locker_test.go | 2 +- libs/locker/locker.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index 807bb982..8818bbbf 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -17,7 +17,7 @@ func newDeployCommand() *cobra.Command { var forceLock bool var computeID string cmd.Flags().BoolVar(&force, "force", false, "Force-override Git branch validation.") - cmd.Flags().BoolVar(&forceLock, "force-deploy", false, "Force acquisition of deployment lock.") + cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") cmd.Flags().StringVarP(&computeID, "compute-id", "c", "", "Override compute in the deployment with the given compute ID.") cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/internal/locker_test.go b/internal/locker_test.go index 2c7e7aa8..661838ec 100644 --- a/internal/locker_test.go +++ b/internal/locker_test.go @@ -90,7 +90,7 @@ func TestAccLock(t *testing.T) { indexOfAnInactiveLocker = i } assert.ErrorContains(t, lockerErrs[i], "lock acquired by") - assert.ErrorContains(t, lockerErrs[i], "Use --force to override") + assert.ErrorContains(t, lockerErrs[i], "Use --force-lock to override") } } assert.Equal(t, 1, countActive, "Exactly one locker should successfull acquire the lock") diff --git a/libs/locker/locker.go b/libs/locker/locker.go index 66993156..b0d65c42 100644 --- a/libs/locker/locker.go +++ b/libs/locker/locker.go @@ -105,10 +105,10 @@ func (locker *Locker) assertLockHeld(ctx context.Context) error { return err } if activeLockState.ID != locker.State.ID && !activeLockState.IsForced { - return fmt.Errorf("deploy lock acquired by %s at %v. Use --force to override", activeLockState.User, activeLockState.AcquisitionTime) + return fmt.Errorf("deploy lock acquired by %s at %v. Use --force-lock to override", activeLockState.User, activeLockState.AcquisitionTime) } if activeLockState.ID != locker.State.ID && activeLockState.IsForced { - return fmt.Errorf("deploy lock force acquired by %s at %v. Use --force to override", activeLockState.User, activeLockState.AcquisitionTime) + return fmt.Errorf("deploy lock force acquired by %s at %v. Use --force-lock to override", activeLockState.User, activeLockState.AcquisitionTime) } return nil } From d225d7a662b68ecc9840002bac4a1256b4bffdf1 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 16 Aug 2023 13:28:57 +0200 Subject: [PATCH 024/310] Confirm that override with a zero value doesn't work (#669) ## Changes This is not desirable and will be addressed by representing our configuration in a different structure (e.g. with cty, or with plain `any`), instead of Go structs. ## Tests Pass. --- .../resources/databricks.yml | 16 ++++++++++++++++ bundle/tests/environment_overrides_test.go | 11 +++++++++++ 2 files changed, 27 insertions(+) diff --git a/bundle/tests/environment_overrides/resources/databricks.yml b/bundle/tests/environment_overrides/resources/databricks.yml index eef5dc01..df261ba0 100644 --- a/bundle/tests/environment_overrides/resources/databricks.yml +++ b/bundle/tests/environment_overrides/resources/databricks.yml @@ -9,6 +9,13 @@ resources: job1: name: "base job" + pipelines: + boolean1: + photon: true + + boolean2: + photon: false + environments: development: default: true @@ -18,3 +25,12 @@ environments: jobs: job1: name: "staging job" + + pipelines: + boolean1: + # Note: setting a property to a zero value (in Go) does not have effect. + # See the corresponding test for details. + photon: false + + boolean2: + photon: true diff --git a/bundle/tests/environment_overrides_test.go b/bundle/tests/environment_overrides_test.go index b8cc224a..0a3f9fcd 100644 --- a/bundle/tests/environment_overrides_test.go +++ b/bundle/tests/environment_overrides_test.go @@ -19,9 +19,20 @@ func TestEnvironmentOverridesWorkspaceStaging(t *testing.T) { func TestEnvironmentOverridesResourcesDev(t *testing.T) { b := loadEnvironment(t, "./environment_overrides/resources", "development") assert.Equal(t, "base job", b.Config.Resources.Jobs["job1"].Name) + + // Base values are preserved in the development environment. + assert.Equal(t, true, b.Config.Resources.Pipelines["boolean1"].Photon) + assert.Equal(t, false, b.Config.Resources.Pipelines["boolean2"].Photon) } func TestEnvironmentOverridesResourcesStaging(t *testing.T) { b := loadEnvironment(t, "./environment_overrides/resources", "staging") assert.Equal(t, "staging job", b.Config.Resources.Jobs["job1"].Name) + + // Overrides are only applied if they are not zero-valued. + // This means that in its current form, we cannot override a true value with a false value. + // Note: this is not desirable and will be addressed by representing our configuration + // in a different structure (e.g. with cty), instead of Go structs. + assert.Equal(t, true, b.Config.Resources.Pipelines["boolean1"].Photon) + assert.Equal(t, true, b.Config.Resources.Pipelines["boolean2"].Photon) } From 35e8ed30c6207c239331c30491ecd2c34b1d123e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 16 Aug 2023 14:56:12 +0200 Subject: [PATCH 025/310] Release v0.203.1 (#672) CLI: * Always resolve .databrickscfg file ([#659](https://github.com/databricks/cli/pull/659)). Bundles: * Add internal tag for bundle fields to be skipped from schema ([#636](https://github.com/databricks/cli/pull/636)). * Log the bundle root configuration file if applicable ([#657](https://github.com/databricks/cli/pull/657)). * Execute paths without the .tmpl extension as templates ([#654](https://github.com/databricks/cli/pull/654)). * Enable environment overrides for job clusters ([#658](https://github.com/databricks/cli/pull/658)). * Merge artifacts and resources block with overrides enabled ([#660](https://github.com/databricks/cli/pull/660)). * Locked terraform binary version to <= 1.5.5 ([#666](https://github.com/databricks/cli/pull/666)). * Return better error messages for invalid JSON schema types in templates ([#661](https://github.com/databricks/cli/pull/661)). * Use custom prompter for bundle template inputs ([#663](https://github.com/databricks/cli/pull/663)). * Add map and pair helper functions for bundle templates ([#665](https://github.com/databricks/cli/pull/665)). * Correct name for force acquire deploy flag ([#656](https://github.com/databricks/cli/pull/656)). * Confirm that override with a zero value doesn't work ([#669](https://github.com/databricks/cli/pull/669)). Internal: * Consolidate functions in libs/git ([#652](https://github.com/databricks/cli/pull/652)). * Upgraded Go version to 1.21 ([#664](https://github.com/databricks/cli/pull/664)). --- CHANGELOG.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6cf7673b..b0b6bc0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Version changelog +## 0.203.1 + +CLI: + * Always resolve .databrickscfg file ([#659](https://github.com/databricks/cli/pull/659)). + +Bundles: + * Add internal tag for bundle fields to be skipped from schema ([#636](https://github.com/databricks/cli/pull/636)). + * Log the bundle root configuration file if applicable ([#657](https://github.com/databricks/cli/pull/657)). + * Execute paths without the .tmpl extension as templates ([#654](https://github.com/databricks/cli/pull/654)). + * Enable environment overrides for job clusters ([#658](https://github.com/databricks/cli/pull/658)). + * Merge artifacts and resources block with overrides enabled ([#660](https://github.com/databricks/cli/pull/660)). + * Locked terraform binary version to <= 1.5.5 ([#666](https://github.com/databricks/cli/pull/666)). + * Return better error messages for invalid JSON schema types in templates ([#661](https://github.com/databricks/cli/pull/661)). + * Use custom prompter for bundle template inputs ([#663](https://github.com/databricks/cli/pull/663)). + * Add map and pair helper functions for bundle templates ([#665](https://github.com/databricks/cli/pull/665)). + * Correct name for force acquire deploy flag ([#656](https://github.com/databricks/cli/pull/656)). + * Confirm that override with a zero value doesn't work ([#669](https://github.com/databricks/cli/pull/669)). + +Internal: + * Consolidate functions in libs/git ([#652](https://github.com/databricks/cli/pull/652)). + * Upgraded Go version to 1.21 ([#664](https://github.com/databricks/cli/pull/664)). + ## 0.203.0 CLI: From 4694832534b2b94d94835b9b57629ab99f05b50c Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 17 Aug 2023 11:11:39 +0200 Subject: [PATCH 026/310] Do not try auto detect Python package if no Python wheel tasks defined (#674) ## Changes Fixes #673 It also includes a change for `libraries` from #635 to get the list of wheel tasks --- bundle/artifacts/whl/autodetect.go | 7 +++++ bundle/libraries/libraries.go | 50 +++++++++++++++++++++--------- 2 files changed, 43 insertions(+), 14 deletions(-) diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index a801b48d..41d80bb7 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -10,7 +10,9 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/log" ) type detectPkg struct { @@ -25,6 +27,11 @@ func (m *detectPkg) Name() string { } func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error { + wheelTasks := libraries.FindAllWheelTasks(b) + if len(wheelTasks) == 0 { + log.Infof(ctx, "No wheel tasks in databricks.yml config, skipping auto detect") + return nil + } cmdio.LogString(ctx, "artifacts.whl.AutoDetect: Detecting Python wheel project...") // checking if there is setup.py in the bundle root diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 8ccf3fc7..29848236 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -25,26 +25,48 @@ func (a *match) Name() string { } func (a *match) Apply(ctx context.Context, b *bundle.Bundle) error { - r := b.Config.Resources - for k := range b.Config.Resources.Jobs { - tasks := r.Jobs[k].JobSettings.Tasks - for i := range tasks { - task := &tasks[i] - if isMissingRequiredLibraries(task) { - return fmt.Errorf("task '%s' is missing required libraries. Please include your package code in task libraries block", task.TaskKey) - } - for j := range task.Libraries { - lib := &task.Libraries[j] - err := findArtifactsAndMarkForUpload(ctx, lib, b) - if err != nil { - return err - } + tasks := findAllTasks(b) + for _, task := range tasks { + if isMissingRequiredLibraries(task) { + return fmt.Errorf("task '%s' is missing required libraries. Please include your package code in task libraries block", task.TaskKey) + } + for j := range task.Libraries { + lib := &task.Libraries[j] + err := findArtifactsAndMarkForUpload(ctx, lib, b) + if err != nil { + return err } } } return nil } +func findAllTasks(b *bundle.Bundle) []*jobs.Task { + r := b.Config.Resources + result := make([]*jobs.Task, 0) + for k := range b.Config.Resources.Jobs { + tasks := r.Jobs[k].JobSettings.Tasks + for i := range tasks { + task := &tasks[i] + result = append(result, task) + } + } + + return result +} + +func FindAllWheelTasks(b *bundle.Bundle) []*jobs.Task { + tasks := findAllTasks(b) + wheelTasks := make([]*jobs.Task, 0) + for _, task := range tasks { + if task.PythonWheelTask != nil { + wheelTasks = append(wheelTasks, task) + } + } + + return wheelTasks +} + func isMissingRequiredLibraries(task *jobs.Task) bool { if task.Libraries != nil { return false From 56dcd3f0a7398bfda2fb517886d6690e9f0018b5 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 17 Aug 2023 17:22:32 +0200 Subject: [PATCH 027/310] Renamed `environments` to `targets` in bundle configuration (#670) ## Changes Renamed Environments to Targets in bundle.yml. The change is backward-compatible and customers can continue to use `environments` in the time being. ## Tests Added tests which checks that both `environments` and `targets` sections in bundle.yml works correctly --- bundle/bundle.go | 10 +-- bundle/bundle_test.go | 16 ++-- bundle/config/bundle.go | 9 +- bundle/config/mutator/default_environment.go | 37 -------- bundle/config/mutator/default_target.go | 37 ++++++++ ...ronment_test.go => default_target_test.go} | 16 ++-- .../config/mutator/default_workspace_root.go | 6 +- .../mutator/default_workspace_root_test.go | 4 +- bundle/config/mutator/mutator.go | 6 +- bundle/config/mutator/override_compute.go | 2 +- ...ronment_mode.go => process_target_mode.go} | 20 ++--- ...de_test.go => process_target_mode_test.go} | 22 ++--- .../mutator/select_default_environment.go | 54 ----------- .../select_default_environment_test.go | 90 ------------------- .../config/mutator/select_default_target.go | 54 +++++++++++ .../mutator/select_default_target_test.go | 90 +++++++++++++++++++ bundle/config/mutator/select_environment.go | 48 ---------- bundle/config/mutator/select_target.go | 54 +++++++++++ ...ironment_test.go => select_target_test.go} | 14 +-- bundle/config/resources.go | 2 +- bundle/config/resources/job.go | 2 +- bundle/config/root.go | 70 +++++++++------ bundle/config/root_test.go | 12 +-- bundle/config/{environment.go => target.go} | 12 +-- bundle/config/variable/variable.go | 2 +- bundle/config/workspace.go | 2 +- bundle/deploy/terraform/init_test.go | 16 ++-- bundle/deploy/terraform/load_test.go | 2 +- bundle/phases/initialize.go | 2 +- bundle/schema/README.md | 4 +- bundle/schema/docs.go | 16 ++-- bundle/schema/docs/bundle_descriptions.json | 6 +- bundle/tests/autoload_git/databricks.yml | 2 +- bundle/tests/environment_empty/databricks.yml | 5 -- bundle/tests/environment_empty_test.go | 12 --- bundle/tests/environment_git_test.go | 20 +++++ bundle/tests/environment_overrides_test.go | 8 +- .../environments_autoload_git/databricks.yml | 11 +++ .../databricks.yml | 44 +++++++++ .../environments_job_and_pipeline_test.go | 56 ++++++++++++ .../databricks.yml | 35 ++++++++ .../environments_override_job_cluster_test.go | 29 ++++++ bundle/tests/git_test.go | 2 +- .../tests/interpolation_target/databricks.yml | 14 +++ bundle/tests/interpolation_test.go | 12 +++ bundle/tests/job_and_pipeline/databricks.yml | 2 +- bundle/tests/job_and_pipeline_test.go | 6 +- bundle/tests/loader.go | 4 +- .../tests/override_job_cluster/databricks.yml | 2 +- bundle/tests/override_job_cluster_test.go | 4 +- bundle/tests/target_empty/databricks.yml | 5 ++ bundle/tests/target_empty_test.go | 12 +++ .../target_overrides/resources/databricks.yml | 20 +++++ .../target_overrides/workspace/databricks.yml | 14 +++ bundle/tests/target_overrides_test.go | 27 ++++++ .../variables/env_overrides/databricks.yml | 2 +- bundle/tests/variables_test.go | 20 ++--- cmd/bundle/variables.go | 2 +- cmd/configure/configure.go | 2 +- cmd/root/bundle.go | 48 +++++++--- cmd/root/bundle_test.go | 24 +++++ cmd/root/root.go | 1 + cmd/sync/sync_test.go | 2 +- 63 files changed, 768 insertions(+), 416 deletions(-) delete mode 100644 bundle/config/mutator/default_environment.go create mode 100644 bundle/config/mutator/default_target.go rename bundle/config/mutator/{default_environment_test.go => default_target_test.go} (51%) rename bundle/config/mutator/{process_environment_mode.go => process_target_mode.go} (89%) rename bundle/config/mutator/{process_environment_mode_test.go => process_target_mode_test.go} (90%) delete mode 100644 bundle/config/mutator/select_default_environment.go delete mode 100644 bundle/config/mutator/select_default_environment_test.go create mode 100644 bundle/config/mutator/select_default_target.go create mode 100644 bundle/config/mutator/select_default_target_test.go delete mode 100644 bundle/config/mutator/select_environment.go create mode 100644 bundle/config/mutator/select_target.go rename bundle/config/mutator/{select_environment_test.go => select_target_test.go} (62%) rename bundle/config/{environment.go => target.go} (80%) delete mode 100644 bundle/tests/environment_empty/databricks.yml delete mode 100644 bundle/tests/environment_empty_test.go create mode 100644 bundle/tests/environment_git_test.go create mode 100644 bundle/tests/environments_autoload_git/databricks.yml create mode 100644 bundle/tests/environments_job_and_pipeline/databricks.yml create mode 100644 bundle/tests/environments_job_and_pipeline_test.go create mode 100644 bundle/tests/environments_override_job_cluster/databricks.yml create mode 100644 bundle/tests/environments_override_job_cluster_test.go create mode 100644 bundle/tests/interpolation_target/databricks.yml create mode 100644 bundle/tests/target_empty/databricks.yml create mode 100644 bundle/tests/target_empty_test.go create mode 100644 bundle/tests/target_overrides/resources/databricks.yml create mode 100644 bundle/tests/target_overrides/workspace/databricks.yml create mode 100644 bundle/tests/target_overrides_test.go diff --git a/bundle/bundle.go b/bundle/bundle.go index 06c68fe8..a5eaa289 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -117,10 +117,10 @@ func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient { } // CacheDir returns directory to use for temporary files for this bundle. -// Scoped to the bundle's environment. +// Scoped to the bundle's target. func (b *Bundle) CacheDir(paths ...string) (string, error) { - if b.Config.Bundle.Environment == "" { - panic("environment not set") + if b.Config.Bundle.Target == "" { + panic("target not set") } cacheDirName, exists := os.LookupEnv("DATABRICKS_BUNDLE_TMP") @@ -138,8 +138,8 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) { // Fixed components of the result path. parts := []string{ cacheDirName, - // Scope with environment name. - b.Config.Bundle.Environment, + // Scope with target name. + b.Config.Bundle.Target, } // Append dynamic components of the result path. diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index ac947500..4a3e7f2c 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -31,16 +31,16 @@ func TestBundleCacheDir(t *testing.T) { bundle, err := Load(context.Background(), projectDir) require.NoError(t, err) - // Artificially set environment. - // This is otherwise done by [mutators.SelectEnvironment]. - bundle.Config.Bundle.Environment = "default" + // Artificially set target. + // This is otherwise done by [mutators.SelectTarget]. + bundle.Config.Bundle.Target = "default" // unset env variable in case it's set t.Setenv("DATABRICKS_BUNDLE_TMP", "") cacheDir, err := bundle.CacheDir() - // format is /.databricks/bundle/ + // format is /.databricks/bundle/ assert.NoError(t, err) assert.Equal(t, filepath.Join(projectDir, ".databricks", "bundle", "default"), cacheDir) } @@ -55,16 +55,16 @@ func TestBundleCacheDirOverride(t *testing.T) { bundle, err := Load(context.Background(), projectDir) require.NoError(t, err) - // Artificially set environment. - // This is otherwise done by [mutators.SelectEnvironment]. - bundle.Config.Bundle.Environment = "default" + // Artificially set target. + // This is otherwise done by [mutators.SelectTarget]. + bundle.Config.Bundle.Target = "default" // now we expect to use 'bundleTmpDir' instead of CWD/.databricks/bundle t.Setenv("DATABRICKS_BUNDLE_TMP", bundleTmpDir) cacheDir, err := bundle.CacheDir() - // format is / + // format is / assert.NoError(t, err) assert.Equal(t, filepath.Join(bundleTmpDir, "default"), cacheDir) } diff --git a/bundle/config/bundle.go b/bundle/config/bundle.go index f3401477..d444f507 100644 --- a/bundle/config/bundle.go +++ b/bundle/config/bundle.go @@ -15,7 +15,10 @@ type Bundle struct { // Default warehouse to run SQL on. // DefaultWarehouse string `json:"default_warehouse,omitempty"` - // Environment is set by the mutator that selects the environment. + // Target is set by the mutator that selects the target. + Target string `json:"target,omitempty" bundle:"readonly"` + + // DEPRECATED. Left for backward compatibility with Target Environment string `json:"environment,omitempty" bundle:"readonly"` // Terraform holds configuration related to Terraform. @@ -32,10 +35,10 @@ type Bundle struct { // origin url. Automatically loaded by reading .git directory if not specified Git Git `json:"git,omitempty"` - // Determines the mode of the environment. + // Determines the mode of the target. // For example, 'mode: development' can be used for deployments for // development purposes. - // Annotated readonly as this should be set at the environment level. + // Annotated readonly as this should be set at the target level. Mode Mode `json:"mode,omitempty" bundle:"readonly"` // Overrides the compute used for jobs and other supported assets. diff --git a/bundle/config/mutator/default_environment.go b/bundle/config/mutator/default_environment.go deleted file mode 100644 index 1598a647..00000000 --- a/bundle/config/mutator/default_environment.go +++ /dev/null @@ -1,37 +0,0 @@ -package mutator - -import ( - "context" - "fmt" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" -) - -type defineDefaultEnvironment struct { - name string -} - -// DefineDefaultEnvironment adds an environment named "default" -// to the configuration if none have been defined. -func DefineDefaultEnvironment() bundle.Mutator { - return &defineDefaultEnvironment{ - name: "default", - } -} - -func (m *defineDefaultEnvironment) Name() string { - return fmt.Sprintf("DefineDefaultEnvironment(%s)", m.name) -} - -func (m *defineDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) error { - // Nothing to do if the configuration has at least 1 environment. - if len(b.Config.Environments) > 0 { - return nil - } - - // Define default environment. - b.Config.Environments = make(map[string]*config.Environment) - b.Config.Environments[m.name] = &config.Environment{} - return nil -} diff --git a/bundle/config/mutator/default_target.go b/bundle/config/mutator/default_target.go new file mode 100644 index 00000000..d5318a3e --- /dev/null +++ b/bundle/config/mutator/default_target.go @@ -0,0 +1,37 @@ +package mutator + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" +) + +type defineDefaultTarget struct { + name string +} + +// DefineDefaultTarget adds a target named "default" +// to the configuration if none have been defined. +func DefineDefaultTarget() bundle.Mutator { + return &defineDefaultTarget{ + name: "default", + } +} + +func (m *defineDefaultTarget) Name() string { + return fmt.Sprintf("DefineDefaultTarget(%s)", m.name) +} + +func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) error { + // Nothing to do if the configuration has at least 1 target. + if len(b.Config.Targets) > 0 { + return nil + } + + // Define default target. + b.Config.Targets = make(map[string]*config.Target) + b.Config.Targets[m.name] = &config.Target{} + return nil +} diff --git a/bundle/config/mutator/default_environment_test.go b/bundle/config/mutator/default_target_test.go similarity index 51% rename from bundle/config/mutator/default_environment_test.go rename to bundle/config/mutator/default_target_test.go index f196e5ba..49fbe6de 100644 --- a/bundle/config/mutator/default_environment_test.go +++ b/bundle/config/mutator/default_target_test.go @@ -11,25 +11,25 @@ import ( "github.com/stretchr/testify/require" ) -func TestDefaultEnvironment(t *testing.T) { +func TestDefaultTarget(t *testing.T) { bundle := &bundle.Bundle{} - err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle) + err := mutator.DefineDefaultTarget().Apply(context.Background(), bundle) require.NoError(t, err) - env, ok := bundle.Config.Environments["default"] + env, ok := bundle.Config.Targets["default"] assert.True(t, ok) - assert.Equal(t, &config.Environment{}, env) + assert.Equal(t, &config.Target{}, env) } -func TestDefaultEnvironmentAlreadySpecified(t *testing.T) { +func TestDefaultTargetAlreadySpecified(t *testing.T) { bundle := &bundle.Bundle{ Config: config.Root{ - Environments: map[string]*config.Environment{ + Targets: map[string]*config.Target{ "development": {}, }, }, } - err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle) + err := mutator.DefineDefaultTarget().Apply(context.Background(), bundle) require.NoError(t, err) - _, ok := bundle.Config.Environments["default"] + _, ok := bundle.Config.Targets["default"] assert.False(t, ok) } diff --git a/bundle/config/mutator/default_workspace_root.go b/bundle/config/mutator/default_workspace_root.go index bf51eda9..260a5958 100644 --- a/bundle/config/mutator/default_workspace_root.go +++ b/bundle/config/mutator/default_workspace_root.go @@ -27,14 +27,14 @@ func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle return fmt.Errorf("unable to define default workspace root: bundle name not defined") } - if b.Config.Bundle.Environment == "" { - return fmt.Errorf("unable to define default workspace root: bundle environment not selected") + if b.Config.Bundle.Target == "" { + return fmt.Errorf("unable to define default workspace root: bundle target not selected") } b.Config.Workspace.RootPath = fmt.Sprintf( "~/.bundle/%s/%s", b.Config.Bundle.Name, - b.Config.Bundle.Environment, + b.Config.Bundle.Target, ) return nil } diff --git a/bundle/config/mutator/default_workspace_root_test.go b/bundle/config/mutator/default_workspace_root_test.go index 4a78e6e5..1822dca0 100644 --- a/bundle/config/mutator/default_workspace_root_test.go +++ b/bundle/config/mutator/default_workspace_root_test.go @@ -15,8 +15,8 @@ func TestDefaultWorkspaceRoot(t *testing.T) { bundle := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ - Name: "name", - Environment: "environment", + Name: "name", + Target: "environment", }, }, } diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index 058258c8..ff1f96f5 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -7,11 +7,11 @@ import ( func DefaultMutators() []bundle.Mutator { return []bundle.Mutator{ ProcessRootIncludes(), - DefineDefaultEnvironment(), + DefineDefaultTarget(), LoadGitDetails(), } } -func DefaultMutatorsForEnvironment(env string) []bundle.Mutator { - return append(DefaultMutators(), SelectEnvironment(env)) +func DefaultMutatorsForTarget(env string) []bundle.Mutator { + return append(DefaultMutators(), SelectTarget(env)) } diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index ba3fd994..12439249 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -35,7 +35,7 @@ func overrideJobCompute(j *resources.Job, compute string) { func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) error { if b.Config.Bundle.Mode != config.Development { if b.Config.Bundle.ComputeID != "" { - return fmt.Errorf("cannot override compute for an environment that does not use 'mode: development'") + return fmt.Errorf("cannot override compute for an target that does not use 'mode: development'") } return nil } diff --git a/bundle/config/mutator/process_environment_mode.go b/bundle/config/mutator/process_target_mode.go similarity index 89% rename from bundle/config/mutator/process_environment_mode.go rename to bundle/config/mutator/process_target_mode.go index d2030234..b5dc2559 100644 --- a/bundle/config/mutator/process_environment_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -13,16 +13,16 @@ import ( "github.com/databricks/databricks-sdk-go/service/ml" ) -type processEnvironmentMode struct{} +type processTargetMode struct{} const developmentConcurrentRuns = 4 -func ProcessEnvironmentMode() bundle.Mutator { - return &processEnvironmentMode{} +func ProcessTargetMode() bundle.Mutator { + return &processTargetMode{} } -func (m *processEnvironmentMode) Name() string { - return "ProcessEnvironmentMode" +func (m *processTargetMode) Name() string { + return "ProcessTargetMode" } // Mark all resources as being for 'development' purposes, i.e. @@ -110,14 +110,14 @@ func findIncorrectPath(b *bundle.Bundle, mode config.Mode) string { func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) error { if b.Config.Bundle.Git.Inferred { - env := b.Config.Bundle.Environment - return fmt.Errorf("environment with 'mode: production' must specify an explicit 'environments.%s.git' configuration", env) + env := b.Config.Bundle.Target + return fmt.Errorf("target with 'mode: production' must specify an explicit 'targets.%s.git' configuration", env) } r := b.Config.Resources for i := range r.Pipelines { if r.Pipelines[i].Development { - return fmt.Errorf("environment with 'mode: production' cannot specify a pipeline with 'development: true'") + return fmt.Errorf("target with 'mode: production' cannot specify a pipeline with 'development: true'") } } @@ -125,7 +125,7 @@ func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUs if path := findIncorrectPath(b, config.Production); path != "" { message := "%s must not contain the current username when using 'mode: production'" if path == "root_path" { - return fmt.Errorf(message+"\n tip: set workspace.root_path to a shared path such as /Shared/.bundle/${bundle.name}/${bundle.environment}", path) + return fmt.Errorf(message+"\n tip: set workspace.root_path to a shared path such as /Shared/.bundle/${bundle.name}/${bundle.target}", path) } else { return fmt.Errorf(message, path) } @@ -165,7 +165,7 @@ func isRunAsSet(r config.Resources) bool { return true } -func (m *processEnvironmentMode) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error { switch b.Config.Bundle.Mode { case config.Development: err := validateDevelopmentMode(b) diff --git a/bundle/config/mutator/process_environment_mode_test.go b/bundle/config/mutator/process_target_mode_test.go similarity index 90% rename from bundle/config/mutator/process_environment_mode_test.go rename to bundle/config/mutator/process_target_mode_test.go index 36e0396e..76db64de 100644 --- a/bundle/config/mutator/process_environment_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -58,10 +58,10 @@ func mockBundle(mode config.Mode) *bundle.Bundle { } } -func TestProcessEnvironmentModeDevelopment(t *testing.T) { +func TestProcessTargetModeDevelopment(t *testing.T) { bundle := mockBundle(config.Development) - m := ProcessEnvironmentMode() + m := ProcessTargetMode() err := m.Apply(context.Background(), bundle) require.NoError(t, err) assert.Equal(t, "[dev lennart] job1", bundle.Config.Resources.Jobs["job1"].Name) @@ -73,10 +73,10 @@ func TestProcessEnvironmentModeDevelopment(t *testing.T) { assert.True(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) } -func TestProcessEnvironmentModeDefault(t *testing.T) { +func TestProcessTargetModeDefault(t *testing.T) { bundle := mockBundle("") - m := ProcessEnvironmentMode() + m := ProcessTargetMode() err := m.Apply(context.Background(), bundle) require.NoError(t, err) assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name) @@ -84,7 +84,7 @@ func TestProcessEnvironmentModeDefault(t *testing.T) { assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) } -func TestProcessEnvironmentModeProduction(t *testing.T) { +func TestProcessTargetModeProduction(t *testing.T) { bundle := mockBundle(config.Production) err := validateProductionMode(context.Background(), bundle, false) @@ -118,7 +118,7 @@ func TestProcessEnvironmentModeProduction(t *testing.T) { assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) } -func TestProcessEnvironmentModeProductionGit(t *testing.T) { +func TestProcessTargetModeProductionGit(t *testing.T) { bundle := mockBundle(config.Production) // Pretend the user didn't set Git configuration explicitly @@ -129,10 +129,10 @@ func TestProcessEnvironmentModeProductionGit(t *testing.T) { bundle.Config.Bundle.Git.Inferred = false } -func TestProcessEnvironmentModeProductionOkForPrincipal(t *testing.T) { +func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { bundle := mockBundle(config.Production) - // Our environment has all kinds of problems when not using service principals ... + // Our target has all kinds of problems when not using service principals ... err := validateProductionMode(context.Background(), bundle, false) require.Error(t, err) @@ -152,7 +152,7 @@ func TestAllResourcesMocked(t *testing.T) { assert.True( t, !field.IsNil() && field.Len() > 0, - "process_environment_mode should support '%s' (please add it to process_environment_mode.go and extend the test suite)", + "process_target_mode should support '%s' (please add it to process_target_mode.go and extend the test suite)", resources.Type().Field(i).Name, ) } @@ -164,7 +164,7 @@ func TestAllResourcesRenamed(t *testing.T) { bundle := mockBundle(config.Development) resources := reflect.ValueOf(bundle.Config.Resources) - m := ProcessEnvironmentMode() + m := ProcessTargetMode() err := m.Apply(context.Background(), bundle) require.NoError(t, err) @@ -179,7 +179,7 @@ func TestAllResourcesRenamed(t *testing.T) { assert.True( t, strings.Contains(nameField.String(), "dev"), - "process_environment_mode should rename '%s' in '%s'", + "process_target_mode should rename '%s' in '%s'", key, resources.Type().Field(i).Name, ) diff --git a/bundle/config/mutator/select_default_environment.go b/bundle/config/mutator/select_default_environment.go deleted file mode 100644 index 0ed1d2db..00000000 --- a/bundle/config/mutator/select_default_environment.go +++ /dev/null @@ -1,54 +0,0 @@ -package mutator - -import ( - "context" - "fmt" - "strings" - - "github.com/databricks/cli/bundle" - "golang.org/x/exp/maps" -) - -type selectDefaultEnvironment struct{} - -// SelectDefaultEnvironment merges the default environment into the root configuration. -func SelectDefaultEnvironment() bundle.Mutator { - return &selectDefaultEnvironment{} -} - -func (m *selectDefaultEnvironment) Name() string { - return "SelectDefaultEnvironment" -} - -func (m *selectDefaultEnvironment) Apply(ctx context.Context, b *bundle.Bundle) error { - if len(b.Config.Environments) == 0 { - return fmt.Errorf("no environments defined") - } - - // One environment means there's only one default. - names := maps.Keys(b.Config.Environments) - if len(names) == 1 { - return SelectEnvironment(names[0]).Apply(ctx, b) - } - - // Multiple environments means we look for the `default` flag. - var defaults []string - for name, env := range b.Config.Environments { - if env != nil && env.Default { - defaults = append(defaults, name) - } - } - - // It is invalid to have multiple environments with the `default` flag set. - if len(defaults) > 1 { - return fmt.Errorf("multiple environments are marked as default (%s)", strings.Join(defaults, ", ")) - } - - // If no environment has the `default` flag set, ask the user to specify one. - if len(defaults) == 0 { - return fmt.Errorf("please specify environment") - } - - // One default remaining. - return SelectEnvironment(defaults[0]).Apply(ctx, b) -} diff --git a/bundle/config/mutator/select_default_environment_test.go b/bundle/config/mutator/select_default_environment_test.go deleted file mode 100644 index cc8f9c01..00000000 --- a/bundle/config/mutator/select_default_environment_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package mutator_test - -import ( - "context" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/mutator" - "github.com/stretchr/testify/assert" -) - -func TestSelectDefaultEnvironmentNoEnvironments(t *testing.T) { - bundle := &bundle.Bundle{ - Config: config.Root{ - Environments: map[string]*config.Environment{}, - }, - } - err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle) - assert.ErrorContains(t, err, "no environments defined") -} - -func TestSelectDefaultEnvironmentSingleEnvironments(t *testing.T) { - bundle := &bundle.Bundle{ - Config: config.Root{ - Environments: map[string]*config.Environment{ - "foo": {}, - }, - }, - } - err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle) - assert.NoError(t, err) - assert.Equal(t, "foo", bundle.Config.Bundle.Environment) -} - -func TestSelectDefaultEnvironmentNoDefaults(t *testing.T) { - bundle := &bundle.Bundle{ - Config: config.Root{ - Environments: map[string]*config.Environment{ - "foo": {}, - "bar": {}, - "qux": {}, - }, - }, - } - err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle) - assert.ErrorContains(t, err, "please specify environment") -} - -func TestSelectDefaultEnvironmentNoDefaultsWithNil(t *testing.T) { - bundle := &bundle.Bundle{ - Config: config.Root{ - Environments: map[string]*config.Environment{ - "foo": nil, - "bar": nil, - }, - }, - } - err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle) - assert.ErrorContains(t, err, "please specify environment") -} - -func TestSelectDefaultEnvironmentMultipleDefaults(t *testing.T) { - bundle := &bundle.Bundle{ - Config: config.Root{ - Environments: map[string]*config.Environment{ - "foo": {Default: true}, - "bar": {Default: true}, - "qux": {Default: true}, - }, - }, - } - err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle) - assert.ErrorContains(t, err, "multiple environments are marked as default") -} - -func TestSelectDefaultEnvironmentSingleDefault(t *testing.T) { - bundle := &bundle.Bundle{ - Config: config.Root{ - Environments: map[string]*config.Environment{ - "foo": {}, - "bar": {Default: true}, - "qux": {}, - }, - }, - } - err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle) - assert.NoError(t, err) - assert.Equal(t, "bar", bundle.Config.Bundle.Environment) -} diff --git a/bundle/config/mutator/select_default_target.go b/bundle/config/mutator/select_default_target.go new file mode 100644 index 00000000..8abcfe4f --- /dev/null +++ b/bundle/config/mutator/select_default_target.go @@ -0,0 +1,54 @@ +package mutator + +import ( + "context" + "fmt" + "strings" + + "github.com/databricks/cli/bundle" + "golang.org/x/exp/maps" +) + +type selectDefaultTarget struct{} + +// SelectDefaultTarget merges the default target into the root configuration. +func SelectDefaultTarget() bundle.Mutator { + return &selectDefaultTarget{} +} + +func (m *selectDefaultTarget) Name() string { + return "SelectDefaultTarget" +} + +func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error { + if len(b.Config.Targets) == 0 { + return fmt.Errorf("no targets defined") + } + + // One target means there's only one default. + names := maps.Keys(b.Config.Targets) + if len(names) == 1 { + return SelectTarget(names[0]).Apply(ctx, b) + } + + // Multiple targets means we look for the `default` flag. + var defaults []string + for name, env := range b.Config.Targets { + if env != nil && env.Default { + defaults = append(defaults, name) + } + } + + // It is invalid to have multiple targets with the `default` flag set. + if len(defaults) > 1 { + return fmt.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", ")) + } + + // If no target has the `default` flag set, ask the user to specify one. + if len(defaults) == 0 { + return fmt.Errorf("please specify target") + } + + // One default remaining. + return SelectTarget(defaults[0]).Apply(ctx, b) +} diff --git a/bundle/config/mutator/select_default_target_test.go b/bundle/config/mutator/select_default_target_test.go new file mode 100644 index 00000000..5d7b93b2 --- /dev/null +++ b/bundle/config/mutator/select_default_target_test.go @@ -0,0 +1,90 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/assert" +) + +func TestSelectDefaultTargetNoTargets(t *testing.T) { + bundle := &bundle.Bundle{ + Config: config.Root{ + Targets: map[string]*config.Target{}, + }, + } + err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + assert.ErrorContains(t, err, "no targets defined") +} + +func TestSelectDefaultTargetSingleTargets(t *testing.T) { + bundle := &bundle.Bundle{ + Config: config.Root{ + Targets: map[string]*config.Target{ + "foo": {}, + }, + }, + } + err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + assert.NoError(t, err) + assert.Equal(t, "foo", bundle.Config.Bundle.Target) +} + +func TestSelectDefaultTargetNoDefaults(t *testing.T) { + bundle := &bundle.Bundle{ + Config: config.Root{ + Targets: map[string]*config.Target{ + "foo": {}, + "bar": {}, + "qux": {}, + }, + }, + } + err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + assert.ErrorContains(t, err, "please specify target") +} + +func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) { + bundle := &bundle.Bundle{ + Config: config.Root{ + Targets: map[string]*config.Target{ + "foo": nil, + "bar": nil, + }, + }, + } + err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + assert.ErrorContains(t, err, "please specify target") +} + +func TestSelectDefaultTargetMultipleDefaults(t *testing.T) { + bundle := &bundle.Bundle{ + Config: config.Root{ + Targets: map[string]*config.Target{ + "foo": {Default: true}, + "bar": {Default: true}, + "qux": {Default: true}, + }, + }, + } + err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + assert.ErrorContains(t, err, "multiple targets are marked as default") +} + +func TestSelectDefaultTargetSingleDefault(t *testing.T) { + bundle := &bundle.Bundle{ + Config: config.Root{ + Targets: map[string]*config.Target{ + "foo": {}, + "bar": {Default: true}, + "qux": {}, + }, + }, + } + err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + assert.NoError(t, err) + assert.Equal(t, "bar", bundle.Config.Bundle.Target) +} diff --git a/bundle/config/mutator/select_environment.go b/bundle/config/mutator/select_environment.go deleted file mode 100644 index 6ced66e8..00000000 --- a/bundle/config/mutator/select_environment.go +++ /dev/null @@ -1,48 +0,0 @@ -package mutator - -import ( - "context" - "fmt" - - "github.com/databricks/cli/bundle" -) - -type selectEnvironment struct { - name string -} - -// SelectEnvironment merges the specified environment into the root configuration. -func SelectEnvironment(name string) bundle.Mutator { - return &selectEnvironment{ - name: name, - } -} - -func (m *selectEnvironment) Name() string { - return fmt.Sprintf("SelectEnvironment(%s)", m.name) -} - -func (m *selectEnvironment) Apply(_ context.Context, b *bundle.Bundle) error { - if b.Config.Environments == nil { - return fmt.Errorf("no environments defined") - } - - // Get specified environment - env, ok := b.Config.Environments[m.name] - if !ok { - return fmt.Errorf("%s: no such environment", m.name) - } - - // Merge specified environment into root configuration structure. - err := b.Config.MergeEnvironment(env) - if err != nil { - return err - } - - // Store specified environment in configuration for reference. - b.Config.Bundle.Environment = m.name - - // Clear environments after loading. - b.Config.Environments = nil - return nil -} diff --git a/bundle/config/mutator/select_target.go b/bundle/config/mutator/select_target.go new file mode 100644 index 00000000..3be1f2e1 --- /dev/null +++ b/bundle/config/mutator/select_target.go @@ -0,0 +1,54 @@ +package mutator + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" +) + +type selectTarget struct { + name string +} + +// SelectTarget merges the specified target into the root configuration. +func SelectTarget(name string) bundle.Mutator { + return &selectTarget{ + name: name, + } +} + +func (m *selectTarget) Name() string { + return fmt.Sprintf("SelectTarget(%s)", m.name) +} + +func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) error { + if b.Config.Targets == nil { + return fmt.Errorf("no targets defined") + } + + // Get specified target + target, ok := b.Config.Targets[m.name] + if !ok { + return fmt.Errorf("%s: no such target", m.name) + } + + // Merge specified target into root configuration structure. + err := b.Config.MergeTargetOverrides(target) + if err != nil { + return err + } + + // Store specified target in configuration for reference. + b.Config.Bundle.Target = m.name + + // We do this for backward compatibility. + // TODO: remove when Environments section is not supported anymore. + b.Config.Bundle.Environment = b.Config.Bundle.Target + + // Clear targets after loading. + b.Config.Targets = nil + b.Config.Environments = nil + + return nil +} diff --git a/bundle/config/mutator/select_environment_test.go b/bundle/config/mutator/select_target_test.go similarity index 62% rename from bundle/config/mutator/select_environment_test.go rename to bundle/config/mutator/select_target_test.go index 73b3a789..dfcd8cb0 100644 --- a/bundle/config/mutator/select_environment_test.go +++ b/bundle/config/mutator/select_target_test.go @@ -11,13 +11,13 @@ import ( "github.com/stretchr/testify/require" ) -func TestSelectEnvironment(t *testing.T) { +func TestSelectTarget(t *testing.T) { bundle := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ Host: "foo", }, - Environments: map[string]*config.Environment{ + Targets: map[string]*config.Target{ "default": { Workspace: &config.Workspace{ Host: "bar", @@ -26,19 +26,19 @@ func TestSelectEnvironment(t *testing.T) { }, }, } - err := mutator.SelectEnvironment("default").Apply(context.Background(), bundle) + err := mutator.SelectTarget("default").Apply(context.Background(), bundle) require.NoError(t, err) assert.Equal(t, "bar", bundle.Config.Workspace.Host) } -func TestSelectEnvironmentNotFound(t *testing.T) { +func TestSelectTargetNotFound(t *testing.T) { bundle := &bundle.Bundle{ Config: config.Root{ - Environments: map[string]*config.Environment{ + Targets: map[string]*config.Target{ "default": {}, }, }, } - err := mutator.SelectEnvironment("doesnt-exist").Apply(context.Background(), bundle) - require.Error(t, err, "no environments defined") + err := mutator.SelectTarget("doesnt-exist").Apply(context.Background(), bundle) + require.Error(t, err, "no targets defined") } diff --git a/bundle/config/resources.go b/bundle/config/resources.go index b15158b4..5d47b918 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -115,7 +115,7 @@ func (r *Resources) SetConfigFilePath(path string) { } // MergeJobClusters iterates over all jobs and merges their job clusters. -// This is called after applying the environment overrides. +// This is called after applying the target overrides. func (r *Resources) MergeJobClusters() error { for _, job := range r.Jobs { if err := job.MergeJobClusters(); err != nil { diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index 327d7e13..6200062a 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -22,7 +22,7 @@ func (j *Job) MergeJobClusters() error { keys := make(map[string]*jobs.JobCluster) output := make([]jobs.JobCluster, 0, len(j.JobClusters)) - // Environment overrides are always appended, so we can iterate in natural order to + // Target overrides are always appended, so we can iterate in natural order to // first find the base definition, and merge instances we encounter later. for i := range j.JobClusters { key := j.JobClusters[i].JobClusterKey diff --git a/bundle/config/root.go b/bundle/config/root.go index b6d1efc9..24426dd8 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -69,11 +69,14 @@ type Root struct { // to deploy in this bundle (e.g. jobs, pipelines, etc.). Resources Resources `json:"resources,omitempty"` - // Environments can be used to differentiate settings and resources between - // bundle deployment environments (e.g. development, staging, production). + // Targets can be used to differentiate settings and resources between + // bundle deployment targets (e.g. development, staging, production). // If not specified, the code below initializes this field with a - // single default-initialized environment called "default". - Environments map[string]*Environment `json:"environments,omitempty"` + // single default-initialized target called "default". + Targets map[string]*Target `json:"targets,omitempty"` + + // DEPRECATED. Left for backward compatibility with Targets + Environments map[string]*Target `json:"environments,omitempty"` } func Load(path string) (*Root, error) { @@ -103,8 +106,8 @@ func Load(path string) (*Root, error) { // was loaded from in configuration leafs that require it. func (r *Root) SetConfigFilePath(path string) { r.Resources.SetConfigFilePath(path) - if r.Environments != nil { - for _, env := range r.Environments { + if r.Targets != nil { + for _, env := range r.Targets { if env == nil { continue } @@ -148,6 +151,15 @@ func (r *Root) Load(path string) error { return fmt.Errorf("failed to load %s: %w", path, err) } + if r.Environments != nil && r.Targets != nil { + return fmt.Errorf("both 'environments' and 'targets' are specified, only 'targets' should be used: %s", path) + } + + if r.Environments != nil { + //TODO: add a command line notice that this is a deprecated option. + r.Targets = r.Environments + } + r.Path = filepath.Dir(path) r.SetConfigFilePath(path) @@ -169,37 +181,37 @@ func (r *Root) Merge(other *Root) error { return mergo.Merge(r, other, mergo.WithOverride) } -func (r *Root) MergeEnvironment(env *Environment) error { +func (r *Root) MergeTargetOverrides(target *Target) error { var err error - // Environment may be nil if it's empty. - if env == nil { + // Target may be nil if it's empty. + if target == nil { return nil } - if env.Bundle != nil { - err = mergo.Merge(&r.Bundle, env.Bundle, mergo.WithOverride) + if target.Bundle != nil { + err = mergo.Merge(&r.Bundle, target.Bundle, mergo.WithOverride) if err != nil { return err } } - if env.Workspace != nil { - err = mergo.Merge(&r.Workspace, env.Workspace, mergo.WithOverride) + if target.Workspace != nil { + err = mergo.Merge(&r.Workspace, target.Workspace, mergo.WithOverride) if err != nil { return err } } - if env.Artifacts != nil { - err = mergo.Merge(&r.Artifacts, env.Artifacts, mergo.WithOverride, mergo.WithAppendSlice) + if target.Artifacts != nil { + err = mergo.Merge(&r.Artifacts, target.Artifacts, mergo.WithOverride, mergo.WithAppendSlice) if err != nil { return err } } - if env.Resources != nil { - err = mergo.Merge(&r.Resources, env.Resources, mergo.WithOverride, mergo.WithAppendSlice) + if target.Resources != nil { + err = mergo.Merge(&r.Resources, target.Resources, mergo.WithOverride, mergo.WithAppendSlice) if err != nil { return err } @@ -210,8 +222,8 @@ func (r *Root) MergeEnvironment(env *Environment) error { } } - if env.Variables != nil { - for k, v := range env.Variables { + if target.Variables != nil { + for k, v := range target.Variables { variable, ok := r.Variables[k] if !ok { return fmt.Errorf("variable %s is not defined but is assigned a value", k) @@ -222,24 +234,24 @@ func (r *Root) MergeEnvironment(env *Environment) error { } } - if env.Mode != "" { - r.Bundle.Mode = env.Mode + if target.Mode != "" { + r.Bundle.Mode = target.Mode } - if env.ComputeID != "" { - r.Bundle.ComputeID = env.ComputeID + if target.ComputeID != "" { + r.Bundle.ComputeID = target.ComputeID } git := &r.Bundle.Git - if env.Git.Branch != "" { - git.Branch = env.Git.Branch + if target.Git.Branch != "" { + git.Branch = target.Git.Branch git.Inferred = false } - if env.Git.Commit != "" { - git.Commit = env.Git.Commit + if target.Git.Commit != "" { + git.Commit = target.Git.Commit } - if env.Git.OriginURL != "" { - git.OriginURL = env.Git.OriginURL + if target.Git.OriginURL != "" { + git.OriginURL = target.Git.OriginURL } return nil diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index 531ffcec..6e263667 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -57,7 +57,7 @@ func TestRootMergeStruct(t *testing.T) { func TestRootMergeMap(t *testing.T) { root := &Root{ Path: "path", - Environments: map[string]*Environment{ + Targets: map[string]*Target{ "development": { Workspace: &Workspace{ Host: "foo", @@ -68,7 +68,7 @@ func TestRootMergeMap(t *testing.T) { } other := &Root{ Path: "path", - Environments: map[string]*Environment{ + Targets: map[string]*Target{ "development": { Workspace: &Workspace{ Host: "bar", @@ -77,7 +77,7 @@ func TestRootMergeMap(t *testing.T) { }, } assert.NoError(t, root.Merge(other)) - assert.Equal(t, &Workspace{Host: "bar", Profile: "profile"}, root.Environments["development"].Workspace) + assert.Equal(t, &Workspace{Host: "bar", Profile: "profile"}, root.Targets["development"].Workspace) } func TestDuplicateIdOnLoadReturnsError(t *testing.T) { @@ -159,12 +159,12 @@ func TestInitializeVariablesUndefinedVariables(t *testing.T) { assert.ErrorContains(t, err, "variable bar has not been defined") } -func TestRootMergeEnvironmentWithMode(t *testing.T) { +func TestRootMergeTargetOverridesWithMode(t *testing.T) { root := &Root{ Bundle: Bundle{}, } - env := &Environment{Mode: Development} - require.NoError(t, root.MergeEnvironment(env)) + env := &Target{Mode: Development} + require.NoError(t, root.MergeTargetOverrides(env)) assert.Equal(t, Development, root.Bundle.Mode) } diff --git a/bundle/config/environment.go b/bundle/config/target.go similarity index 80% rename from bundle/config/environment.go rename to bundle/config/target.go index 7152f791..10775049 100644 --- a/bundle/config/environment.go +++ b/bundle/config/target.go @@ -2,14 +2,14 @@ package config type Mode string -// Environment defines overrides for a single environment. +// Target defines overrides for a single target. // This structure is recursively merged into the root configuration. -type Environment struct { - // Default marks that this environment must be used if one isn't specified - // by the user (through environment variable or command line argument). +type Target struct { + // Default marks that this target must be used if one isn't specified + // by the user (through target variable or command line argument). Default bool `json:"default,omitempty"` - // Determines the mode of the environment. + // Determines the mode of the target. // For example, 'mode: development' can be used for deployments for // development purposes. Mode Mode `json:"mode,omitempty"` @@ -27,7 +27,7 @@ type Environment struct { // Override default values for defined variables // Does not permit defining new variables or redefining existing ones - // in the scope of an environment + // in the scope of an target Variables map[string]string `json:"variables,omitempty"` Git Git `json:"git,omitempty"` diff --git a/bundle/config/variable/variable.go b/bundle/config/variable/variable.go index 132920bb..73925d43 100644 --- a/bundle/config/variable/variable.go +++ b/bundle/config/variable/variable.go @@ -18,7 +18,7 @@ type Variable struct { // resolved in the following priority order (from highest to lowest) // // 1. Command line flag. For example: `--var="foo=bar"` - // 2. Environment variable. eg: BUNDLE_VAR_foo=bar + // 2. Target variable. eg: BUNDLE_VAR_foo=bar // 3. Default value as defined in the applicable environments block // 4. Default value defined in variable definition // 5. Throw error, since if no default value is defined, then the variable diff --git a/bundle/config/workspace.go b/bundle/config/workspace.go index bd116a9c..90cd59c6 100644 --- a/bundle/config/workspace.go +++ b/bundle/config/workspace.go @@ -45,7 +45,7 @@ type Workspace struct { CurrentUser *User `json:"current_user,omitempty" bundle:"readonly"` // Remote workspace base path for deployment state, for artifacts, as synchronization target. - // This defaults to "~/.bundle/${bundle.name}/${bundle.environment}" where "~" expands to + // This defaults to "~/.bundle/${bundle.name}/${bundle.target}" where "~" expands to // the current user's home directory in the workspace (e.g. `/Users/jane@doe.com`). RootPath string `json:"root_path,omitempty"` diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index 79e18170..5bb5929e 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -31,7 +31,7 @@ func TestInitEnvironmentVariables(t *testing.T) { Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ - Environment: "whatever", + Target: "whatever", Terraform: &config.Terraform{ ExecPath: "terraform", }, @@ -58,7 +58,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) { Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ - Environment: "whatever", + Target: "whatever", }, }, } @@ -86,7 +86,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirNotSet(t *testing.T) { Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ - Environment: "whatever", + Target: "whatever", }, }, } @@ -112,7 +112,7 @@ func TestSetTempDirEnvVarsForWindowWithAllTmpDirEnvVarsSet(t *testing.T) { Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ - Environment: "whatever", + Target: "whatever", }, }, } @@ -142,7 +142,7 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) { Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ - Environment: "whatever", + Target: "whatever", }, }, } @@ -172,7 +172,7 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileSet(t *testing.T) { Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ - Environment: "whatever", + Target: "whatever", }, }, } @@ -202,7 +202,7 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) { Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ - Environment: "whatever", + Target: "whatever", }, }, } @@ -230,7 +230,7 @@ func TestSetProxyEnvVars(t *testing.T) { Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ - Environment: "whatever", + Target: "whatever", }, }, } diff --git a/bundle/deploy/terraform/load_test.go b/bundle/deploy/terraform/load_test.go index c235c08e..1937ca8a 100644 --- a/bundle/deploy/terraform/load_test.go +++ b/bundle/deploy/terraform/load_test.go @@ -20,7 +20,7 @@ func TestLoadWithNoState(t *testing.T) { Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ - Environment: "whatever", + Target: "whatever", Terraform: &config.Terraform{ ExecPath: "terraform", }, diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index fc5056f6..219ec26c 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -26,7 +26,7 @@ func Initialize() bundle.Mutator { interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), ), mutator.OverrideCompute(), - mutator.ProcessEnvironmentMode(), + mutator.ProcessTargetMode(), mutator.TranslatePaths(), terraform.Initialize(), }, diff --git a/bundle/schema/README.md b/bundle/schema/README.md index 4df43cf2..fe6b149c 100644 --- a/bundle/schema/README.md +++ b/bundle/schema/README.md @@ -3,7 +3,7 @@ `docs/bundle_descriptions.json` contains both autogenerated as well as manually written descriptions for the json schema. Specifically 1. `resources` : almost all descriptions are autogenerated from the OpenAPI spec -2. `environments` : almost all descriptions are copied over from root level entities (eg: `bundle`, `artifacts`) +2. `targets` : almost all descriptions are copied over from root level entities (eg: `bundle`, `artifacts`) 3. `bundle` : manually editted 4. `include` : manually editted 5. `workspace` : manually editted @@ -17,7 +17,7 @@ These descriptions are rendered in the inline documentation in an IDE `databricks bundle schema --only-docs > ~/databricks/bundle/schema/docs/bundle_descriptions.json` 2. Manually edit bundle_descriptions.json to add your descriptions 3. Build again to embed the new `bundle_descriptions.json` into the binary (`go build`) -4. Again run `databricks bundle schema --only-docs > ~/databricks/bundle/schema/docs/bundle_descriptions.json` to copy over any applicable descriptions to `environments` +4. Again run `databricks bundle schema --only-docs > ~/databricks/bundle/schema/docs/bundle_descriptions.json` to copy over any applicable descriptions to `targets` 5. push to repo diff --git a/bundle/schema/docs.go b/bundle/schema/docs.go index 5fcef4ed..4b2fd36a 100644 --- a/bundle/schema/docs.go +++ b/bundle/schema/docs.go @@ -52,20 +52,20 @@ func BundleDocs(openapiSpecPath string) (*Docs, error) { } docs.Properties["resources"] = schemaToDocs(resourceSchema) } - docs.refreshEnvironmentsDocs() + docs.refreshTargetsDocs() return docs, nil } -func (docs *Docs) refreshEnvironmentsDocs() error { - environmentsDocs, ok := docs.Properties["environments"] - if !ok || environmentsDocs.AdditionalProperties == nil || - environmentsDocs.AdditionalProperties.Properties == nil { - return fmt.Errorf("invalid environments descriptions") +func (docs *Docs) refreshTargetsDocs() error { + targetsDocs, ok := docs.Properties["targets"] + if !ok || targetsDocs.AdditionalProperties == nil || + targetsDocs.AdditionalProperties.Properties == nil { + return fmt.Errorf("invalid targets descriptions") } - environmentProperties := environmentsDocs.AdditionalProperties.Properties + targetProperties := targetsDocs.AdditionalProperties.Properties propertiesToCopy := []string{"artifacts", "bundle", "resources", "workspace"} for _, p := range propertiesToCopy { - environmentProperties[p] = docs.Properties[p] + targetProperties[p] = docs.Properties[p] } return nil } diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 2adb11f2..84f0492f 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -36,7 +36,7 @@ } } }, - "environments": { + "targets": { "description": "", "additionalproperties": { "description": "", @@ -1827,7 +1827,7 @@ "description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg." }, "root_path": { - "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.environment}`" + "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`" }, "state_path": { "description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`" @@ -3591,7 +3591,7 @@ "description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg." }, "root_path": { - "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.environment}`" + "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`" }, "state_path": { "description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`" diff --git a/bundle/tests/autoload_git/databricks.yml b/bundle/tests/autoload_git/databricks.yml index ba4785ae..92ab8d66 100644 --- a/bundle/tests/autoload_git/databricks.yml +++ b/bundle/tests/autoload_git/databricks.yml @@ -1,7 +1,7 @@ bundle: name: autoload git config test -environments: +targets: development: default: true diff --git a/bundle/tests/environment_empty/databricks.yml b/bundle/tests/environment_empty/databricks.yml deleted file mode 100644 index 17c03c8d..00000000 --- a/bundle/tests/environment_empty/databricks.yml +++ /dev/null @@ -1,5 +0,0 @@ -bundle: - name: environment_empty - -environments: - development: diff --git a/bundle/tests/environment_empty_test.go b/bundle/tests/environment_empty_test.go deleted file mode 100644 index fb2e3341..00000000 --- a/bundle/tests/environment_empty_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEnvironmentEmpty(t *testing.T) { - b := loadEnvironment(t, "./environment_empty", "development") - assert.Equal(t, "development", b.Config.Bundle.Environment) -} diff --git a/bundle/tests/environment_git_test.go b/bundle/tests/environment_git_test.go new file mode 100644 index 00000000..bb10825e --- /dev/null +++ b/bundle/tests/environment_git_test.go @@ -0,0 +1,20 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGitAutoLoadWithEnvironment(t *testing.T) { + b := load(t, "./environments_autoload_git") + assert.True(t, b.Config.Bundle.Git.Inferred) + assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") +} + +func TestGitManuallySetBranchWithEnvironment(t *testing.T) { + b := loadTarget(t, "./environments_autoload_git", "production") + assert.False(t, b.Config.Bundle.Git.Inferred) + assert.Equal(t, "main", b.Config.Bundle.Git.Branch) + assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") +} diff --git a/bundle/tests/environment_overrides_test.go b/bundle/tests/environment_overrides_test.go index 0a3f9fcd..91dc2c81 100644 --- a/bundle/tests/environment_overrides_test.go +++ b/bundle/tests/environment_overrides_test.go @@ -7,17 +7,17 @@ import ( ) func TestEnvironmentOverridesWorkspaceDev(t *testing.T) { - b := loadEnvironment(t, "./environment_overrides/workspace", "development") + b := loadTarget(t, "./environment_overrides/workspace", "development") assert.Equal(t, "https://development.acme.cloud.databricks.com/", b.Config.Workspace.Host) } func TestEnvironmentOverridesWorkspaceStaging(t *testing.T) { - b := loadEnvironment(t, "./environment_overrides/workspace", "staging") + b := loadTarget(t, "./environment_overrides/workspace", "staging") assert.Equal(t, "https://staging.acme.cloud.databricks.com/", b.Config.Workspace.Host) } func TestEnvironmentOverridesResourcesDev(t *testing.T) { - b := loadEnvironment(t, "./environment_overrides/resources", "development") + b := loadTarget(t, "./environment_overrides/resources", "development") assert.Equal(t, "base job", b.Config.Resources.Jobs["job1"].Name) // Base values are preserved in the development environment. @@ -26,7 +26,7 @@ func TestEnvironmentOverridesResourcesDev(t *testing.T) { } func TestEnvironmentOverridesResourcesStaging(t *testing.T) { - b := loadEnvironment(t, "./environment_overrides/resources", "staging") + b := loadTarget(t, "./environment_overrides/resources", "staging") assert.Equal(t, "staging job", b.Config.Resources.Jobs["job1"].Name) // Overrides are only applied if they are not zero-valued. diff --git a/bundle/tests/environments_autoload_git/databricks.yml b/bundle/tests/environments_autoload_git/databricks.yml new file mode 100644 index 00000000..ba4785ae --- /dev/null +++ b/bundle/tests/environments_autoload_git/databricks.yml @@ -0,0 +1,11 @@ +bundle: + name: autoload git config test + +environments: + development: + default: true + + production: + # production can only be deployed from the 'main' branch + git: + branch: main diff --git a/bundle/tests/environments_job_and_pipeline/databricks.yml b/bundle/tests/environments_job_and_pipeline/databricks.yml new file mode 100644 index 00000000..e29fa034 --- /dev/null +++ b/bundle/tests/environments_job_and_pipeline/databricks.yml @@ -0,0 +1,44 @@ +resources: + pipelines: + nyc_taxi_pipeline: + name: "nyc taxi loader" + libraries: + - notebook: + path: ./dlt/nyc_taxi_loader + +environments: + development: + mode: development + resources: + pipelines: + nyc_taxi_pipeline: + target: nyc_taxi_development + development: true + + staging: + resources: + pipelines: + nyc_taxi_pipeline: + target: nyc_taxi_staging + development: false + + production: + mode: production + resources: + pipelines: + nyc_taxi_pipeline: + target: nyc_taxi_production + development: false + photon: true + + jobs: + pipeline_schedule: + name: Daily refresh of production pipeline + + schedule: + quartz_cron_expression: 6 6 11 * * ? + timezone_id: UTC + + tasks: + - pipeline_task: + pipeline_id: "to be interpolated" diff --git a/bundle/tests/environments_job_and_pipeline_test.go b/bundle/tests/environments_job_and_pipeline_test.go new file mode 100644 index 00000000..a18daf90 --- /dev/null +++ b/bundle/tests/environments_job_and_pipeline_test.go @@ -0,0 +1,56 @@ +package config_tests + +import ( + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestJobAndPipelineDevelopmentWithEnvironment(t *testing.T) { + b := loadTarget(t, "./environments_job_and_pipeline", "development") + assert.Len(t, b.Config.Resources.Jobs, 0) + assert.Len(t, b.Config.Resources.Pipelines, 1) + + p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] + assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) + assert.Equal(t, b.Config.Bundle.Mode, config.Development) + assert.True(t, p.Development) + require.Len(t, p.Libraries, 1) + assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) + assert.Equal(t, "nyc_taxi_development", p.Target) +} + +func TestJobAndPipelineStagingWithEnvironment(t *testing.T) { + b := loadTarget(t, "./environments_job_and_pipeline", "staging") + assert.Len(t, b.Config.Resources.Jobs, 0) + assert.Len(t, b.Config.Resources.Pipelines, 1) + + p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] + assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) + assert.False(t, p.Development) + require.Len(t, p.Libraries, 1) + assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) + assert.Equal(t, "nyc_taxi_staging", p.Target) +} + +func TestJobAndPipelineProductionWithEnvironment(t *testing.T) { + b := loadTarget(t, "./environments_job_and_pipeline", "production") + assert.Len(t, b.Config.Resources.Jobs, 1) + assert.Len(t, b.Config.Resources.Pipelines, 1) + + p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] + assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) + assert.False(t, p.Development) + require.Len(t, p.Libraries, 1) + assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) + assert.Equal(t, "nyc_taxi_production", p.Target) + + j := b.Config.Resources.Jobs["pipeline_schedule"] + assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(j.ConfigFilePath)) + assert.Equal(t, "Daily refresh of production pipeline", j.Name) + require.Len(t, j.Tasks, 1) + assert.NotEmpty(t, j.Tasks[0].PipelineTask.PipelineId) +} diff --git a/bundle/tests/environments_override_job_cluster/databricks.yml b/bundle/tests/environments_override_job_cluster/databricks.yml new file mode 100644 index 00000000..33061b2e --- /dev/null +++ b/bundle/tests/environments_override_job_cluster/databricks.yml @@ -0,0 +1,35 @@ +bundle: + name: override_job_cluster + +workspace: + host: https://acme.cloud.databricks.com/ + +resources: + jobs: + foo: + name: job + job_clusters: + - job_cluster_key: key + new_cluster: + spark_version: 13.3.x-scala2.12 + +environments: + development: + resources: + jobs: + foo: + job_clusters: + - job_cluster_key: key + new_cluster: + node_type_id: i3.xlarge + num_workers: 1 + + staging: + resources: + jobs: + foo: + job_clusters: + - job_cluster_key: key + new_cluster: + node_type_id: i3.2xlarge + num_workers: 4 diff --git a/bundle/tests/environments_override_job_cluster_test.go b/bundle/tests/environments_override_job_cluster_test.go new file mode 100644 index 00000000..b3ec7445 --- /dev/null +++ b/bundle/tests/environments_override_job_cluster_test.go @@ -0,0 +1,29 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOverrideJobClusterDevWithEnvironment(t *testing.T) { + b := loadTarget(t, "./environments_override_job_cluster", "development") + assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) + assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) + + c := b.Config.Resources.Jobs["foo"].JobClusters[0] + assert.Equal(t, "13.3.x-scala2.12", c.NewCluster.SparkVersion) + assert.Equal(t, "i3.xlarge", c.NewCluster.NodeTypeId) + assert.Equal(t, 1, c.NewCluster.NumWorkers) +} + +func TestOverrideJobClusterStagingWithEnvironment(t *testing.T) { + b := loadTarget(t, "./environments_override_job_cluster", "staging") + assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) + assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) + + c := b.Config.Resources.Jobs["foo"].JobClusters[0] + assert.Equal(t, "13.3.x-scala2.12", c.NewCluster.SparkVersion) + assert.Equal(t, "i3.2xlarge", c.NewCluster.NodeTypeId) + assert.Equal(t, 4, c.NewCluster.NumWorkers) +} diff --git a/bundle/tests/git_test.go b/bundle/tests/git_test.go index daab4d30..c5ae83a2 100644 --- a/bundle/tests/git_test.go +++ b/bundle/tests/git_test.go @@ -17,7 +17,7 @@ func TestGitAutoLoad(t *testing.T) { } func TestGitManuallySetBranch(t *testing.T) { - b := loadEnvironment(t, "./autoload_git", "production") + b := loadTarget(t, "./autoload_git", "production") assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "main", b.Config.Bundle.Git.Branch) assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") diff --git a/bundle/tests/interpolation_target/databricks.yml b/bundle/tests/interpolation_target/databricks.yml new file mode 100644 index 00000000..ad4ebe19 --- /dev/null +++ b/bundle/tests/interpolation_target/databricks.yml @@ -0,0 +1,14 @@ +bundle: + name: foo ${workspace.profile} + +workspace: + profile: bar + +targets: + development: + default: true + +resources: + jobs: + my_job: + name: "${bundle.name} | ${workspace.profile} | ${bundle.environment} | ${bundle.target}" diff --git a/bundle/tests/interpolation_test.go b/bundle/tests/interpolation_test.go index 47b0c775..837891a0 100644 --- a/bundle/tests/interpolation_test.go +++ b/bundle/tests/interpolation_test.go @@ -20,3 +20,15 @@ func TestInterpolation(t *testing.T) { assert.Equal(t, "foo bar", b.Config.Bundle.Name) assert.Equal(t, "foo bar | bar", b.Config.Resources.Jobs["my_job"].Name) } + +func TestInterpolationWithTarget(t *testing.T) { + b := loadTarget(t, "./interpolation_target", "development") + err := bundle.Apply(context.Background(), b, interpolation.Interpolate( + interpolation.IncludeLookupsInPath("bundle"), + interpolation.IncludeLookupsInPath("workspace"), + )) + require.NoError(t, err) + assert.Equal(t, "foo bar", b.Config.Bundle.Name) + assert.Equal(t, "foo bar | bar | development | development", b.Config.Resources.Jobs["my_job"].Name) + +} diff --git a/bundle/tests/job_and_pipeline/databricks.yml b/bundle/tests/job_and_pipeline/databricks.yml index e29fa034..67d306ff 100644 --- a/bundle/tests/job_and_pipeline/databricks.yml +++ b/bundle/tests/job_and_pipeline/databricks.yml @@ -6,7 +6,7 @@ resources: - notebook: path: ./dlt/nyc_taxi_loader -environments: +targets: development: mode: development resources: diff --git a/bundle/tests/job_and_pipeline_test.go b/bundle/tests/job_and_pipeline_test.go index d92eabd3..5e8febc3 100644 --- a/bundle/tests/job_and_pipeline_test.go +++ b/bundle/tests/job_and_pipeline_test.go @@ -10,7 +10,7 @@ import ( ) func TestJobAndPipelineDevelopment(t *testing.T) { - b := loadEnvironment(t, "./job_and_pipeline", "development") + b := loadTarget(t, "./job_and_pipeline", "development") assert.Len(t, b.Config.Resources.Jobs, 0) assert.Len(t, b.Config.Resources.Pipelines, 1) @@ -24,7 +24,7 @@ func TestJobAndPipelineDevelopment(t *testing.T) { } func TestJobAndPipelineStaging(t *testing.T) { - b := loadEnvironment(t, "./job_and_pipeline", "staging") + b := loadTarget(t, "./job_and_pipeline", "staging") assert.Len(t, b.Config.Resources.Jobs, 0) assert.Len(t, b.Config.Resources.Pipelines, 1) @@ -37,7 +37,7 @@ func TestJobAndPipelineStaging(t *testing.T) { } func TestJobAndPipelineProduction(t *testing.T) { - b := loadEnvironment(t, "./job_and_pipeline", "production") + b := loadTarget(t, "./job_and_pipeline", "production") assert.Len(t, b.Config.Resources.Jobs, 1) assert.Len(t, b.Config.Resources.Pipelines, 1) diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index 056a82d9..f23b1076 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -18,9 +18,9 @@ func load(t *testing.T, path string) *bundle.Bundle { return b } -func loadEnvironment(t *testing.T, path, env string) *bundle.Bundle { +func loadTarget(t *testing.T, path, env string) *bundle.Bundle { b := load(t, path) - err := bundle.Apply(context.Background(), b, mutator.SelectEnvironment(env)) + err := bundle.Apply(context.Background(), b, mutator.SelectTarget(env)) require.NoError(t, err) return b } diff --git a/bundle/tests/override_job_cluster/databricks.yml b/bundle/tests/override_job_cluster/databricks.yml index 33061b2e..a85b3b71 100644 --- a/bundle/tests/override_job_cluster/databricks.yml +++ b/bundle/tests/override_job_cluster/databricks.yml @@ -13,7 +13,7 @@ resources: new_cluster: spark_version: 13.3.x-scala2.12 -environments: +targets: development: resources: jobs: diff --git a/bundle/tests/override_job_cluster_test.go b/bundle/tests/override_job_cluster_test.go index 97f7c04e..1393e03e 100644 --- a/bundle/tests/override_job_cluster_test.go +++ b/bundle/tests/override_job_cluster_test.go @@ -7,7 +7,7 @@ import ( ) func TestOverrideJobClusterDev(t *testing.T) { - b := loadEnvironment(t, "./override_job_cluster", "development") + b := loadTarget(t, "./override_job_cluster", "development") assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) @@ -18,7 +18,7 @@ func TestOverrideJobClusterDev(t *testing.T) { } func TestOverrideJobClusterStaging(t *testing.T) { - b := loadEnvironment(t, "./override_job_cluster", "staging") + b := loadTarget(t, "./override_job_cluster", "staging") assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) diff --git a/bundle/tests/target_empty/databricks.yml b/bundle/tests/target_empty/databricks.yml new file mode 100644 index 00000000..cd415377 --- /dev/null +++ b/bundle/tests/target_empty/databricks.yml @@ -0,0 +1,5 @@ +bundle: + name: target_empty + +targets: + development: diff --git a/bundle/tests/target_empty_test.go b/bundle/tests/target_empty_test.go new file mode 100644 index 00000000..88705d8b --- /dev/null +++ b/bundle/tests/target_empty_test.go @@ -0,0 +1,12 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTargetEmpty(t *testing.T) { + b := loadTarget(t, "./target_empty", "development") + assert.Equal(t, "development", b.Config.Bundle.Target) +} diff --git a/bundle/tests/target_overrides/resources/databricks.yml b/bundle/tests/target_overrides/resources/databricks.yml new file mode 100644 index 00000000..f6e2a7ed --- /dev/null +++ b/bundle/tests/target_overrides/resources/databricks.yml @@ -0,0 +1,20 @@ +bundle: + name: environment_overrides + +workspace: + host: https://acme.cloud.databricks.com/ + +resources: + jobs: + job1: + name: "base job" + +targets: + development: + default: true + + staging: + resources: + jobs: + job1: + name: "staging job" diff --git a/bundle/tests/target_overrides/workspace/databricks.yml b/bundle/tests/target_overrides/workspace/databricks.yml new file mode 100644 index 00000000..8c4f9487 --- /dev/null +++ b/bundle/tests/target_overrides/workspace/databricks.yml @@ -0,0 +1,14 @@ +bundle: + name: environment_overrides + +workspace: + host: https://acme.cloud.databricks.com/ + +targets: + development: + workspace: + host: https://development.acme.cloud.databricks.com/ + + staging: + workspace: + host: https://staging.acme.cloud.databricks.com/ diff --git a/bundle/tests/target_overrides_test.go b/bundle/tests/target_overrides_test.go new file mode 100644 index 00000000..2516ce2a --- /dev/null +++ b/bundle/tests/target_overrides_test.go @@ -0,0 +1,27 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTargetOverridesWorkspaceDev(t *testing.T) { + b := loadTarget(t, "./target_overrides/workspace", "development") + assert.Equal(t, "https://development.acme.cloud.databricks.com/", b.Config.Workspace.Host) +} + +func TestTargetOverridesWorkspaceStaging(t *testing.T) { + b := loadTarget(t, "./target_overrides/workspace", "staging") + assert.Equal(t, "https://staging.acme.cloud.databricks.com/", b.Config.Workspace.Host) +} + +func TestTargetOverridesResourcesDev(t *testing.T) { + b := loadTarget(t, "./target_overrides/resources", "development") + assert.Equal(t, "base job", b.Config.Resources.Jobs["job1"].Name) +} + +func TestTargetOverridesResourcesStaging(t *testing.T) { + b := loadTarget(t, "./target_overrides/resources", "staging") + assert.Equal(t, "staging job", b.Config.Resources.Jobs["job1"].Name) +} diff --git a/bundle/tests/variables/env_overrides/databricks.yml b/bundle/tests/variables/env_overrides/databricks.yml index 1fec1073..2157596c 100644 --- a/bundle/tests/variables/env_overrides/databricks.yml +++ b/bundle/tests/variables/env_overrides/databricks.yml @@ -12,7 +12,7 @@ bundle: workspace: profile: ${var.a} ${var.b} -environments: +targets: env-with-single-variable-override: variables: b: dev-b diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index 365ffbd4..93c82250 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -34,10 +34,10 @@ func TestVariablesLoadingFailsWhenRequiredVariableIsNotSpecified(t *testing.T) { assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") } -func TestVariablesEnvironmentsBlockOverride(t *testing.T) { +func TestVariablesTargetsBlockOverride(t *testing.T) { b := load(t, "./variables/env_overrides") err := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectEnvironment("env-with-single-variable-override"), + mutator.SelectTarget("env-with-single-variable-override"), mutator.SetVariables(), interpolation.Interpolate( interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), @@ -46,10 +46,10 @@ func TestVariablesEnvironmentsBlockOverride(t *testing.T) { assert.Equal(t, "default-a dev-b", b.Config.Workspace.Profile) } -func TestVariablesEnvironmentsBlockOverrideForMultipleVariables(t *testing.T) { +func TestVariablesTargetsBlockOverrideForMultipleVariables(t *testing.T) { b := load(t, "./variables/env_overrides") err := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectEnvironment("env-with-two-variable-overrides"), + mutator.SelectTarget("env-with-two-variable-overrides"), mutator.SetVariables(), interpolation.Interpolate( interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), @@ -58,11 +58,11 @@ func TestVariablesEnvironmentsBlockOverrideForMultipleVariables(t *testing.T) { assert.Equal(t, "prod-a prod-b", b.Config.Workspace.Profile) } -func TestVariablesEnvironmentsBlockOverrideWithProcessEnvVars(t *testing.T) { +func TestVariablesTargetsBlockOverrideWithProcessEnvVars(t *testing.T) { t.Setenv("BUNDLE_VAR_b", "env-var-b") b := load(t, "./variables/env_overrides") err := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectEnvironment("env-with-two-variable-overrides"), + mutator.SelectTarget("env-with-two-variable-overrides"), mutator.SetVariables(), interpolation.Interpolate( interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), @@ -71,10 +71,10 @@ func TestVariablesEnvironmentsBlockOverrideWithProcessEnvVars(t *testing.T) { assert.Equal(t, "prod-a env-var-b", b.Config.Workspace.Profile) } -func TestVariablesEnvironmentsBlockOverrideWithMissingVariables(t *testing.T) { +func TestVariablesTargetsBlockOverrideWithMissingVariables(t *testing.T) { b := load(t, "./variables/env_overrides") err := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectEnvironment("env-missing-a-required-variable-assignment"), + mutator.SelectTarget("env-missing-a-required-variable-assignment"), mutator.SetVariables(), interpolation.Interpolate( interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), @@ -82,10 +82,10 @@ func TestVariablesEnvironmentsBlockOverrideWithMissingVariables(t *testing.T) { assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") } -func TestVariablesEnvironmentsBlockOverrideWithUndefinedVariables(t *testing.T) { +func TestVariablesTargetsBlockOverrideWithUndefinedVariables(t *testing.T) { b := load(t, "./variables/env_overrides") err := bundle.Apply(context.Background(), b, bundle.Seq( - mutator.SelectEnvironment("env-using-an-undefined-variable"), + mutator.SelectTarget("env-using-an-undefined-variable"), mutator.SetVariables(), interpolation.Interpolate( interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), diff --git a/cmd/bundle/variables.go b/cmd/bundle/variables.go index 33f557cc..c3e4af64 100644 --- a/cmd/bundle/variables.go +++ b/cmd/bundle/variables.go @@ -7,7 +7,7 @@ import ( ) func ConfigureBundleWithVariables(cmd *cobra.Command, args []string) error { - // Load bundle config and apply environment + // Load bundle config and apply target err := root.MustConfigureBundle(cmd, args) if err != nil { return err diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index c51fd830..0c1e4052 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -131,7 +131,7 @@ func newConfigureCommand() *cobra.Command { // Include token flag for compatibility with the legacy CLI. // It doesn't actually do anything because we always use PATs. - cmd.Flags().BoolP("token", "t", true, "Configure using Databricks Personal Access Token") + cmd.Flags().Bool("token", true, "Configure using Databricks Personal Access Token") cmd.Flags().MarkHidden("token") cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index f691bbfc..e1c12336 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -11,11 +11,12 @@ import ( ) const envName = "DATABRICKS_BUNDLE_ENV" +const targetName = "DATABRICKS_BUNDLE_TARGET" -// getEnvironment returns the name of the environment to operate in. -func getEnvironment(cmd *cobra.Command) (value string) { +// getTarget returns the name of the target to operate in. +func getTarget(cmd *cobra.Command) (value string) { // The command line flag takes precedence. - flag := cmd.Flag("environment") + flag := cmd.Flag("target") if flag != nil { value = flag.Value.String() if value != "" { @@ -23,8 +24,23 @@ func getEnvironment(cmd *cobra.Command) (value string) { } } + oldFlag := cmd.Flag("environment") + if oldFlag != nil { + value = flag.Value.String() + if value != "" { + return + } + } + // If it's not set, use the environment variable. - return os.Getenv(envName) + target := os.Getenv(targetName) + // If target env is not set with a new variable, try to check for old variable name + // TODO: remove when environments section is not supported anymore + if target == "" { + target = os.Getenv(envName) + } + + return target } func getProfile(cmd *cobra.Command) (value string) { @@ -80,11 +96,11 @@ func configureBundle(cmd *cobra.Command, args []string, load func(ctx context.Co } var m bundle.Mutator - env := getEnvironment(cmd) + env := getTarget(cmd) if env == "" { - m = mutator.SelectDefaultEnvironment() + m = mutator.SelectDefaultTarget() } else { - m = mutator.SelectEnvironment(env) + m = mutator.SelectTarget(env) } ctx := cmd.Context() @@ -108,19 +124,27 @@ func TryConfigureBundle(cmd *cobra.Command, args []string) error { return configureBundle(cmd, args, bundle.TryLoad) } -// environmentCompletion executes to autocomplete the argument to the environment flag. -func environmentCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +// targetCompletion executes to autocomplete the argument to the target flag. +func targetCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { b, err := loadBundle(cmd, args, bundle.MustLoad) if err != nil { cobra.CompErrorln(err.Error()) return nil, cobra.ShellCompDirectiveError } - return maps.Keys(b.Config.Environments), cobra.ShellCompDirectiveDefault + return maps.Keys(b.Config.Targets), cobra.ShellCompDirectiveDefault } +func initTargetFlag(cmd *cobra.Command) { + // To operate in the context of a bundle, all commands must take an "target" parameter. + cmd.PersistentFlags().StringP("target", "t", "", "bundle target to use (if applicable)") + cmd.RegisterFlagCompletionFunc("target", targetCompletion) +} + +// DEPRECATED flag func initEnvironmentFlag(cmd *cobra.Command) { // To operate in the context of a bundle, all commands must take an "environment" parameter. - cmd.PersistentFlags().StringP("environment", "e", "", "bundle environment to use (if applicable)") - cmd.RegisterFlagCompletionFunc("environment", environmentCompletion) + cmd.PersistentFlags().StringP("environment", "e", "", "bundle target to use (if applicable)") + cmd.PersistentFlags().MarkDeprecated("environment", "use --target flag instead") + cmd.RegisterFlagCompletionFunc("environment", targetCompletion) } diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 4382cf22..8aff9018 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -128,3 +128,27 @@ func TestBundleConfigureWithProfileFlagAndEnvVariable(t *testing.T) { b.WorkspaceClient() }) } + +func TestTargetFlagFull(t *testing.T) { + cmd := emptyCommand(t) + initTargetFlag(cmd) + cmd.SetArgs([]string{"version", "--target", "development"}) + + ctx := context.Background() + err := cmd.ExecuteContext(ctx) + assert.NoError(t, err) + + assert.Equal(t, cmd.Flag("target").Value.String(), "development") +} + +func TestTargetFlagShort(t *testing.T) { + cmd := emptyCommand(t) + initTargetFlag(cmd) + cmd.SetArgs([]string{"version", "-t", "production"}) + + ctx := context.Background() + err := cmd.ExecuteContext(ctx) + assert.NoError(t, err) + + assert.Equal(t, cmd.Flag("target").Value.String(), "production") +} diff --git a/cmd/root/root.go b/cmd/root/root.go index 48868b41..c71cf9ea 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -36,6 +36,7 @@ func New() *cobra.Command { outputFlag := initOutputFlag(cmd) initProfileFlag(cmd) initEnvironmentFlag(cmd) + initTargetFlag(cmd) cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index a6eedbe6..06e97540 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -18,7 +18,7 @@ func TestSyncOptionsFromBundle(t *testing.T) { Path: tempDir, Bundle: config.Bundle{ - Environment: "default", + Target: "default", }, Workspace: config.Workspace{ From 042fbaa61414f818ed2a9cb3bf60afd49e348c10 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 17 Aug 2023 22:32:30 +0200 Subject: [PATCH 028/310] Rename init project-dir flag to output-dir (#676) ## Changes This PR: 1. Renames the project-dir flag to output-dir 2. Makes the project dir flag optional. When unspecified we default to the current working directory. ## Tests Manually --------- Co-authored-by: Pieter Noordhuis --- cmd/bundle/init.go | 9 ++++----- libs/template/materialize.go | 6 +++--- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index e3d76ecf..14c57582 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -42,10 +42,9 @@ func newInitCommand() *cobra.Command { } var configFile string - var projectDir string + var outputDir string cmd.Flags().StringVar(&configFile, "config-file", "", "File containing input parameters for template initialization.") - cmd.Flags().StringVar(&projectDir, "project-dir", "", "The project will be initialized in this directory.") - cmd.MarkFlagRequired("project-dir") + cmd.Flags().StringVar(&outputDir, "output-dir", "", "Directory to write the initialized template to.") cmd.RunE = func(cmd *cobra.Command, args []string) error { templatePath := args[0] @@ -54,7 +53,7 @@ func newInitCommand() *cobra.Command { if !isRepoUrl(templatePath) { // skip downloading the repo because input arg is not a URL. We assume // it's a path on the local file system in that case - return template.Materialize(ctx, configFile, templatePath, projectDir) + return template.Materialize(ctx, configFile, templatePath, outputDir) } // Download the template in a temporary directory @@ -72,7 +71,7 @@ func newInitCommand() *cobra.Command { } defer os.RemoveAll(templateDir) - return template.Materialize(ctx, configFile, templateDir, projectDir) + return template.Materialize(ctx, configFile, templateDir, outputDir) } return cmd diff --git a/libs/template/materialize.go b/libs/template/materialize.go index bbc9e8da..426646c3 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -16,8 +16,8 @@ const schemaFileName = "databricks_template_schema.json" // ctx: context containing a cmdio object. This is used to prompt the user // configFilePath: file path containing user defined config values // templateRoot: root of the template definition -// projectDir: root of directory where to initialize the project -func Materialize(ctx context.Context, configFilePath, templateRoot, projectDir string) error { +// outputDir: root of directory where to initialize the template +func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir string) error { templatePath := filepath.Join(templateRoot, templateDirName) libraryPath := filepath.Join(templateRoot, libraryDirName) schemaPath := filepath.Join(templateRoot, schemaFileName) @@ -48,7 +48,7 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, projectDir s } // Walk and render the template, since input configuration is complete - r, err := newRenderer(ctx, config.values, templatePath, libraryPath, projectDir) + r, err := newRenderer(ctx, config.values, templatePath, libraryPath, outputDir) if err != nil { return err } From e3e9bc6def4e55603a316d0f155664d6bacdb11d Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 18 Aug 2023 10:07:25 +0200 Subject: [PATCH 029/310] Added support for sync.include and sync.exclude sections (#671) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes Added support for `sync.include` and `sync.exclude` sections ## Tests Added `sample-java` folder to gitignore ``` bundle: name: wheel-task sync: include: - "./sample-java/*.kts" ``` Kotlin files were correctly synced. ``` [DEBUG] Test execution command: /opt/homebrew/opt/go@1.21/bin/go test ./... -json -timeout 1h -coverpkg=./... -coverprofile=coverage.txt -run ^TestAcc [DEBUG] Test execution directory: /Users/andrew.nester/cli 2023/08/17 17:12:10 [INFO] ✅ TestAccAlertsCreateErrWhenNoArguments (2.320s) 2023/08/17 17:12:10 [INFO] ✅ TestAccApiGet (0.650s) 2023/08/17 17:12:12 [INFO] ✅ TestAccClustersList (1.060s) 2023/08/17 17:12:12 [INFO] ✅ TestAccClustersGet (0.760s) 2023/08/17 17:12:26 [INFO] ✅ TestAccFilerWorkspaceFilesReadWrite (13.270s) 2023/08/17 17:12:32 [INFO] ✅ TestAccFilerWorkspaceFilesReadDir (6.860s) 2023/08/17 17:12:46 [INFO] ✅ TestAccFilerDbfsReadWrite (13.380s) 2023/08/17 17:12:53 [INFO] ✅ TestAccFilerDbfsReadDir (7.460s) 2023/08/17 17:13:01 [INFO] ✅ TestAccFilerWorkspaceNotebookConflict (7.920s) 2023/08/17 17:13:10 [INFO] ✅ TestAccFilerWorkspaceNotebookWithOverwriteFlag (9.290s) 2023/08/17 17:13:10 [INFO] ✅ TestAccFilerLocalReadWrite (0.010s) 2023/08/17 17:13:11 [INFO] ✅ TestAccFilerLocalReadDir (0.010s) 2023/08/17 17:13:14 [INFO] ✅ TestAccFsCatForDbfs (3.180s) 2023/08/17 17:13:15 [INFO] ✅ TestAccFsCatForDbfsOnNonExistentFile (0.940s) 2023/08/17 17:13:15 [INFO] ✅ TestAccFsCatForDbfsInvalidScheme (0.560s) 2023/08/17 17:13:18 [INFO] ✅ TestAccFsCatDoesNotSupportOutputModeJson (2.910s) 2023/08/17 17:13:51 [INFO] ✅ TestAccFsCpDir (32.730s) 2023/08/17 17:14:06 [INFO] ✅ TestAccFsCpFileToFile (14.740s) 2023/08/17 17:14:20 [INFO] ✅ TestAccFsCpFileToDir (14.340s) 2023/08/17 17:14:53 [INFO] ✅ TestAccFsCpDirToDirFileNotOverwritten (32.710s) 2023/08/17 17:15:12 [INFO] ✅ TestAccFsCpFileToDirFileNotOverwritten (19.590s) 2023/08/17 17:15:32 [INFO] ✅ TestAccFsCpFileToFileFileNotOverwritten (19.950s) 2023/08/17 17:16:11 [INFO] ✅ TestAccFsCpDirToDirWithOverwriteFlag (38.970s) 2023/08/17 17:16:32 [INFO] ✅ TestAccFsCpFileToFileWithOverwriteFlag (21.040s) 2023/08/17 17:16:52 [INFO] ✅ TestAccFsCpFileToDirWithOverwriteFlag (19.670s) 2023/08/17 17:16:54 [INFO] ✅ TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag (1.890s) 2023/08/17 17:16:54 [INFO] ✅ TestAccFsCpErrorsOnInvalidScheme (0.690s) 2023/08/17 17:17:10 [INFO] ✅ TestAccFsCpSourceIsDirectoryButTargetIsFile (15.810s) 2023/08/17 17:17:14 [INFO] ✅ TestAccFsLsForDbfs (4.000s) 2023/08/17 17:17:18 [INFO] ✅ TestAccFsLsForDbfsWithAbsolutePaths (4.000s) 2023/08/17 17:17:21 [INFO] ✅ TestAccFsLsForDbfsOnFile (3.140s) 2023/08/17 17:17:23 [INFO] ✅ TestAccFsLsForDbfsOnEmptyDir (2.030s) 2023/08/17 17:17:24 [INFO] ✅ TestAccFsLsForDbfsForNonexistingDir (0.840s) 2023/08/17 17:17:25 [INFO] ✅ TestAccFsLsWithoutScheme (0.590s) 2023/08/17 17:17:27 [INFO] ✅ TestAccFsMkdirCreatesDirectory (2.310s) 2023/08/17 17:17:30 [INFO] ✅ TestAccFsMkdirCreatesMultipleDirectories (2.800s) 2023/08/17 17:17:33 [INFO] ✅ TestAccFsMkdirWhenDirectoryAlreadyExists (2.700s) 2023/08/17 17:17:35 [INFO] ✅ TestAccFsMkdirWhenFileExistsAtPath (2.870s) 2023/08/17 17:17:40 [INFO] ✅ TestAccFsRmForFile (4.030s) 2023/08/17 17:17:43 [INFO] ✅ TestAccFsRmForEmptyDirectory (3.470s) 2023/08/17 17:17:46 [INFO] ✅ TestAccFsRmForNonEmptyDirectory (3.350s) 2023/08/17 17:17:47 [INFO] ✅ TestAccFsRmForNonExistentFile (0.940s) 2023/08/17 17:17:51 [INFO] ✅ TestAccFsRmForNonEmptyDirectoryWithRecursiveFlag (3.570s) 2023/08/17 17:17:52 [INFO] ✅ TestAccGitClone (0.890s) 2023/08/17 17:17:52 [INFO] ✅ TestAccGitCloneWithOnlyRepoNameOnAlternateBranch (0.730s) 2023/08/17 17:17:53 [INFO] ✅ TestAccGitCloneErrorsWhenRepositoryDoesNotExist (0.540s) 2023/08/17 17:18:02 [INFO] ✅ TestAccLock (8.800s) 2023/08/17 17:18:06 [INFO] ✅ TestAccLockUnlockWithoutAllowsLockFileNotExist (3.930s) 2023/08/17 17:18:09 [INFO] ✅ TestAccLockUnlockWithAllowsLockFileNotExist (3.320s) 2023/08/17 17:18:20 [INFO] ✅ TestAccSyncFullFileSync (10.570s) 2023/08/17 17:18:31 [INFO] ✅ TestAccSyncIncrementalFileSync (11.460s) 2023/08/17 17:18:42 [INFO] ✅ TestAccSyncNestedFolderSync (10.850s) 2023/08/17 17:18:53 [INFO] ✅ TestAccSyncNestedFolderDoesntFailOnNonEmptyDirectory (10.650s) 2023/08/17 17:19:04 [INFO] ✅ TestAccSyncNestedSpacePlusAndHashAreEscapedSync (10.930s) 2023/08/17 17:19:11 [INFO] ✅ TestAccSyncIncrementalFileOverwritesFolder (7.010s) 2023/08/17 17:19:18 [INFO] ✅ TestAccSyncIncrementalSyncPythonNotebookToFile (7.380s) 2023/08/17 17:19:24 [INFO] ✅ TestAccSyncIncrementalSyncFileToPythonNotebook (6.220s) 2023/08/17 17:19:30 [INFO] ✅ TestAccSyncIncrementalSyncPythonNotebookDelete (5.530s) 2023/08/17 17:19:32 [INFO] ✅ TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist (2.620s) 2023/08/17 17:19:38 [INFO] ✅ TestAccSyncEnsureRemotePathIsUsableIfRepoExists (5.460s) 2023/08/17 17:19:40 [INFO] ✅ TestAccSyncEnsureRemotePathIsUsableInWorkspace (1.850s) 2023/08/17 17:19:40 [INFO] ✅ TestAccWorkspaceList (0.780s) 2023/08/17 17:19:51 [INFO] ✅ TestAccExportDir (10.350s) 2023/08/17 17:19:54 [INFO] ✅ TestAccExportDirDoesNotOverwrite (3.330s) 2023/08/17 17:19:58 [INFO] ✅ TestAccExportDirWithOverwriteFlag (3.770s) 2023/08/17 17:20:07 [INFO] ✅ TestAccImportDir (9.320s) 2023/08/17 17:20:24 [INFO] ✅ TestAccImportDirDoesNotOverwrite (16.950s) 2023/08/17 17:20:35 [INFO] ✅ TestAccImportDirWithOverwriteFlag (10.620s) 2023/08/17 17:20:35 [INFO] ✅ 68/68 passed, 0 failed, 3 skipped ``` --- bundle/bundle.go | 34 +++++++++++ bundle/config/root.go | 3 + bundle/config/sync.go | 13 ++++ bundle/deploy/files/sync.go | 12 +++- cmd/bundle/sync.go | 7 +++ cmd/sync/sync.go | 7 +++ libs/fileset/glob.go | 49 +++++++++++++++ libs/fileset/glob_test.go | 65 ++++++++++++++++++++ libs/set/set.go | 75 +++++++++++++++++++++++ libs/set/set_test.go | 111 +++++++++++++++++++++++++++++++++ libs/sync/sync.go | 72 ++++++++++++++++++---- libs/sync/sync_test.go | 119 ++++++++++++++++++++++++++++++++++++ 12 files changed, 554 insertions(+), 13 deletions(-) create mode 100644 bundle/config/sync.go create mode 100644 libs/fileset/glob.go create mode 100644 libs/fileset/glob_test.go create mode 100644 libs/set/set.go create mode 100644 libs/set/set_test.go create mode 100644 libs/sync/sync_test.go diff --git a/bundle/bundle.go b/bundle/bundle.go index a5eaa289..d69d5815 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -24,6 +24,8 @@ import ( "github.com/hashicorp/terraform-exec/tfexec" ) +const internalFolder = ".internal" + type Bundle struct { Config config.Root @@ -155,6 +157,38 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) { return dir, nil } +// This directory is used to store and automaticaly sync internal bundle files, such as, f.e +// notebook trampoline files for Python wheel and etc. +func (b *Bundle) InternalDir() (string, error) { + cacheDir, err := b.CacheDir() + if err != nil { + return "", err + } + + dir := filepath.Join(cacheDir, internalFolder) + err = os.MkdirAll(dir, 0700) + if err != nil { + return dir, err + } + + return dir, nil +} + +// GetSyncIncludePatterns returns a list of user defined includes +// And also adds InternalDir folder to include list for sync command +// so this folder is always synced +func (b *Bundle) GetSyncIncludePatterns() ([]string, error) { + internalDir, err := b.InternalDir() + if err != nil { + return nil, err + } + internalDirRel, err := filepath.Rel(b.Config.Path, internalDir) + if err != nil { + return nil, err + } + return append(b.Config.Sync.Include, filepath.ToSlash(filepath.Join(internalDirRel, "*.*"))), nil +} + func (b *Bundle) GitRepository() (*git.Repository, error) { rootPath, err := folders.FindDirWithLeaf(b.Config.Path, ".git") if err != nil { diff --git a/bundle/config/root.go b/bundle/config/root.go index 24426dd8..e0d20425 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -77,6 +77,9 @@ type Root struct { // DEPRECATED. Left for backward compatibility with Targets Environments map[string]*Target `json:"environments,omitempty"` + + // Sync section specifies options for files synchronization + Sync Sync `json:"sync"` } func Load(path string) (*Root, error) { diff --git a/bundle/config/sync.go b/bundle/config/sync.go new file mode 100644 index 00000000..0580e4c4 --- /dev/null +++ b/bundle/config/sync.go @@ -0,0 +1,13 @@ +package config + +type Sync struct { + // Include contains a list of globs evaluated relative to the bundle root path + // to explicitly include files that were excluded by the user's gitignore. + Include []string `json:"include,omitempty"` + + // Exclude contains a list of globs evaluated relative to the bundle root path + // to explicitly exclude files that were included by + // 1) the default that observes the user's gitignore, or + // 2) the `Include` field above. + Exclude []string `json:"exclude,omitempty"` +} diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index 84d79dc8..2dccd20a 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -14,9 +14,17 @@ func getSync(ctx context.Context, b *bundle.Bundle) (*sync.Sync, error) { return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) } + includes, err := b.GetSyncIncludePatterns() + if err != nil { + return nil, fmt.Errorf("cannot get list of sync includes: %w", err) + } + opts := sync.SyncOptions{ - LocalPath: b.Config.Path, - RemotePath: b.Config.Workspace.FilesPath, + LocalPath: b.Config.Path, + RemotePath: b.Config.Workspace.FilesPath, + Include: includes, + Exclude: b.Config.Sync.Exclude, + Full: false, CurrentUser: b.Config.Workspace.CurrentUser.User, diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index 2fff7baf..be45626a 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -23,9 +23,16 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) } + includes, err := b.GetSyncIncludePatterns() + if err != nil { + return nil, fmt.Errorf("cannot get list of sync includes: %w", err) + } + opts := sync.SyncOptions{ LocalPath: b.Config.Path, RemotePath: b.Config.Workspace.FilesPath, + Include: includes, + Exclude: b.Config.Sync.Exclude, Full: f.full, PollInterval: f.interval, diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index d2aad0c3..4a62123b 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -35,9 +35,16 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b * return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) } + includes, err := b.GetSyncIncludePatterns() + if err != nil { + return nil, fmt.Errorf("cannot get list of sync includes: %w", err) + } + opts := sync.SyncOptions{ LocalPath: b.Config.Path, RemotePath: b.Config.Workspace.FilesPath, + Include: includes, + Exclude: b.Config.Sync.Exclude, Full: f.full, PollInterval: f.interval, diff --git a/libs/fileset/glob.go b/libs/fileset/glob.go new file mode 100644 index 00000000..7a9f130b --- /dev/null +++ b/libs/fileset/glob.go @@ -0,0 +1,49 @@ +package fileset + +import ( + "io/fs" + "os" + "path/filepath" +) + +type GlobSet struct { + root string + patterns []string +} + +func NewGlobSet(root string, includes []string) (*GlobSet, error) { + absRoot, err := filepath.Abs(root) + if err != nil { + return nil, err + } + for k := range includes { + includes[k] = filepath.Join(absRoot, filepath.FromSlash(includes[k])) + } + return &GlobSet{absRoot, includes}, nil +} + +// Return all files which matches defined glob patterns +func (s *GlobSet) All() ([]File, error) { + files := make([]File, 0) + for _, pattern := range s.patterns { + matches, err := filepath.Glob(pattern) + if err != nil { + return files, err + } + + for _, match := range matches { + matchRel, err := filepath.Rel(s.root, match) + if err != nil { + return files, err + } + + stat, err := os.Stat(match) + if err != nil { + return files, err + } + files = append(files, File{fs.FileInfoToDirEntry(stat), match, matchRel}) + } + } + + return files, nil +} diff --git a/libs/fileset/glob_test.go b/libs/fileset/glob_test.go new file mode 100644 index 00000000..f6ac7e19 --- /dev/null +++ b/libs/fileset/glob_test.go @@ -0,0 +1,65 @@ +package fileset + +import ( + "io/fs" + "os" + "path/filepath" + "slices" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGlobFileset(t *testing.T) { + cwd, err := os.Getwd() + require.NoError(t, err) + root := filepath.Join(cwd, "..", "filer") + + entries, err := os.ReadDir(root) + require.NoError(t, err) + + g, err := NewGlobSet(root, []string{ + "./*.go", + }) + require.NoError(t, err) + + files, err := g.All() + require.NoError(t, err) + + require.Equal(t, len(files), len(entries)) + for _, f := range files { + exists := slices.ContainsFunc(entries, func(de fs.DirEntry) bool { + return de.Name() == f.Name() + }) + require.True(t, exists) + } + + g, err = NewGlobSet(root, []string{ + "./*.js", + }) + require.NoError(t, err) + + files, err = g.All() + require.NoError(t, err) + require.Equal(t, len(files), 0) +} + +func TestGlobFilesetWithRelativeRoot(t *testing.T) { + root := filepath.Join("..", "filer") + + entries, err := os.ReadDir(root) + require.NoError(t, err) + + g, err := NewGlobSet(root, []string{ + "./*.go", + }) + require.NoError(t, err) + + files, err := g.All() + require.NoError(t, err) + + require.Equal(t, len(files), len(entries)) + for _, f := range files { + require.True(t, filepath.IsAbs(f.Absolute)) + } +} diff --git a/libs/set/set.go b/libs/set/set.go new file mode 100644 index 00000000..4798ed09 --- /dev/null +++ b/libs/set/set.go @@ -0,0 +1,75 @@ +package set + +import ( + "fmt" + + "golang.org/x/exp/maps" +) + +type hashFunc[T any] func(a T) string + +// Set struct represents set data structure +type Set[T any] struct { + key hashFunc[T] + data map[string]T +} + +// NewSetFromF initialise a new set with initial values and a hash function +// to define uniqueness of value +func NewSetFromF[T any](values []T, f hashFunc[T]) *Set[T] { + s := &Set[T]{ + key: f, + data: make(map[string]T), + } + + for _, v := range values { + s.Add(v) + } + + return s +} + +// NewSetF initialise a new empty and a hash function +// to define uniqueness of value +func NewSetF[T any](f hashFunc[T]) *Set[T] { + return NewSetFromF([]T{}, f) +} + +// NewSetFrom initialise a new set with initial values which are comparable +func NewSetFrom[T comparable](values []T) *Set[T] { + return NewSetFromF(values, func(item T) string { + return fmt.Sprintf("%v", item) + }) +} + +// NewSetFrom initialise a new empty set for comparable values +func NewSet[T comparable]() *Set[T] { + return NewSetFrom([]T{}) +} + +func (s *Set[T]) addOne(item T) { + s.data[s.key(item)] = item +} + +// Add one or multiple items to set +func (s *Set[T]) Add(items ...T) { + for _, i := range items { + s.addOne(i) + } +} + +// Remove an item from set. No-op if the item does not exist +func (s *Set[T]) Remove(item T) { + delete(s.data, s.key(item)) +} + +// Indicates if the item exists in the set +func (s *Set[T]) Has(item T) bool { + _, ok := s.data[s.key(item)] + return ok +} + +// Returns an iterable slice of values from set +func (s *Set[T]) Iter() []T { + return maps.Values(s.data) +} diff --git a/libs/set/set_test.go b/libs/set/set_test.go new file mode 100644 index 00000000..c2b6e25c --- /dev/null +++ b/libs/set/set_test.go @@ -0,0 +1,111 @@ +package set + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSet(t *testing.T) { + s := NewSetFrom([]string{}) + require.ElementsMatch(t, []string{}, s.Iter()) + + s = NewSetFrom([]string{"a", "a", "a", "b", "b", "c", "d", "e"}) + require.ElementsMatch(t, []string{"a", "b", "c", "d", "e"}, s.Iter()) + + i := NewSetFrom([]int{1, 1, 2, 3, 4, 5, 7, 7, 7, 10, 11}) + require.ElementsMatch(t, []int{1, 2, 3, 4, 5, 7, 10, 11}, i.Iter()) + + f := NewSetFrom([]float32{1.1, 1.1, 2.0, 3.1, 4.5, 5.1, 7.1, 7.2, 7.1, 10.1, 11.0}) + require.ElementsMatch(t, []float32{1.1, 2.0, 3.1, 4.5, 5.1, 7.1, 7.2, 10.1, 11.0}, f.Iter()) +} + +type testStruct struct { + key string + value int +} + +func TestSetCustomKey(t *testing.T) { + s := NewSetF(func(item *testStruct) string { + return fmt.Sprintf("%s:%d", item.key, item.value) + }) + s.Add(&testStruct{"a", 1}) + s.Add(&testStruct{"b", 2}) + s.Add(&testStruct{"c", 1}) + s.Add(&testStruct{"a", 1}) + s.Add(&testStruct{"a", 1}) + s.Add(&testStruct{"a", 1}) + s.Add(&testStruct{"c", 1}) + s.Add(&testStruct{"c", 3}) + + require.ElementsMatch(t, []*testStruct{ + {"a", 1}, + {"b", 2}, + {"c", 1}, + {"c", 3}, + }, s.Iter()) +} + +func TestSetAdd(t *testing.T) { + s := NewSet[string]() + s.Add("a") + s.Add("a") + s.Add("a") + s.Add("b") + s.Add("c") + s.Add("c") + s.Add("d") + s.Add("d") + + require.ElementsMatch(t, []string{"a", "b", "c", "d"}, s.Iter()) +} + +func TestSetRemove(t *testing.T) { + s := NewSet[string]() + s.Add("a") + s.Add("a") + s.Add("a") + s.Add("b") + s.Add("c") + s.Add("c") + s.Add("d") + s.Add("d") + + s.Remove("d") + s.Remove("d") + s.Remove("a") + + require.ElementsMatch(t, []string{"b", "c"}, s.Iter()) +} + +func TestSetHas(t *testing.T) { + s := NewSet[string]() + require.False(t, s.Has("a")) + + s.Add("a") + require.True(t, s.Has("a")) + + s.Add("a") + s.Add("a") + require.True(t, s.Has("a")) + + s.Add("b") + s.Add("c") + s.Add("c") + s.Add("d") + s.Add("d") + + require.True(t, s.Has("a")) + require.True(t, s.Has("b")) + require.True(t, s.Has("c")) + require.True(t, s.Has("d")) + + s.Remove("d") + s.Remove("a") + + require.False(t, s.Has("a")) + require.True(t, s.Has("b")) + require.True(t, s.Has("c")) + require.False(t, s.Has("d")) +} diff --git a/libs/sync/sync.go b/libs/sync/sync.go index a299214d..8be478fc 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -6,8 +6,10 @@ import ( "time" "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/set" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/iam" ) @@ -15,6 +17,8 @@ import ( type SyncOptions struct { LocalPath string RemotePath string + Include []string + Exclude []string Full bool @@ -32,7 +36,10 @@ type SyncOptions struct { type Sync struct { *SyncOptions - fileSet *git.FileSet + fileSet *git.FileSet + includeFileSet *fileset.GlobSet + excludeFileSet *fileset.GlobSet + snapshot *Snapshot filer filer.Filer @@ -52,6 +59,16 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { return nil, err } + includeFileSet, err := fileset.NewGlobSet(opts.LocalPath, opts.Include) + if err != nil { + return nil, err + } + + excludeFileSet, err := fileset.NewGlobSet(opts.LocalPath, opts.Exclude) + if err != nil { + return nil, err + } + // Verify that the remote path we're about to synchronize to is valid and allowed. err = EnsureRemotePathIsUsable(ctx, opts.WorkspaceClient, opts.RemotePath, opts.CurrentUser) if err != nil { @@ -88,11 +105,13 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { return &Sync{ SyncOptions: &opts, - fileSet: fileSet, - snapshot: snapshot, - filer: filer, - notifier: &NopNotifier{}, - seq: 0, + fileSet: fileSet, + includeFileSet: includeFileSet, + excludeFileSet: excludeFileSet, + snapshot: snapshot, + filer: filer, + notifier: &NopNotifier{}, + seq: 0, }, nil } @@ -132,15 +151,12 @@ func (s *Sync) notifyComplete(ctx context.Context, d diff) { } func (s *Sync) RunOnce(ctx context.Context) error { - // tradeoff: doing portable monitoring only due to macOS max descriptor manual ulimit setting requirement - // https://github.com/gorakhargosh/watchdog/blob/master/src/watchdog/observers/kqueue.py#L394-L418 - all, err := s.fileSet.All() + files, err := getFileList(ctx, s) if err != nil { - log.Errorf(ctx, "cannot list files: %s", err) return err } - change, err := s.snapshot.diff(ctx, all) + change, err := s.snapshot.diff(ctx, files) if err != nil { return err } @@ -166,6 +182,40 @@ func (s *Sync) RunOnce(ctx context.Context) error { return nil } +func getFileList(ctx context.Context, s *Sync) ([]fileset.File, error) { + // tradeoff: doing portable monitoring only due to macOS max descriptor manual ulimit setting requirement + // https://github.com/gorakhargosh/watchdog/blob/master/src/watchdog/observers/kqueue.py#L394-L418 + all := set.NewSetF(func(f fileset.File) string { + return f.Absolute + }) + gitFiles, err := s.fileSet.All() + if err != nil { + log.Errorf(ctx, "cannot list files: %s", err) + return nil, err + } + all.Add(gitFiles...) + + include, err := s.includeFileSet.All() + if err != nil { + log.Errorf(ctx, "cannot list include files: %s", err) + return nil, err + } + + all.Add(include...) + + exclude, err := s.excludeFileSet.All() + if err != nil { + log.Errorf(ctx, "cannot list exclude files: %s", err) + return nil, err + } + + for _, f := range exclude { + all.Remove(f) + } + + return all.Iter(), nil +} + func (s *Sync) DestroySnapshot(ctx context.Context) error { return s.snapshot.Destroy(ctx) } diff --git a/libs/sync/sync_test.go b/libs/sync/sync_test.go new file mode 100644 index 00000000..99c7e04b --- /dev/null +++ b/libs/sync/sync_test.go @@ -0,0 +1,119 @@ +package sync + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/git" + "github.com/stretchr/testify/require" +) + +func createFile(dir string, name string) error { + f, err := os.Create(filepath.Join(dir, name)) + if err != nil { + return err + } + + return f.Close() +} + +func setupFiles(t *testing.T) string { + dir := t.TempDir() + + err := createFile(dir, "a.go") + require.NoError(t, err) + + err = createFile(dir, "b.go") + require.NoError(t, err) + + err = createFile(dir, "ab.go") + require.NoError(t, err) + + err = createFile(dir, "abc.go") + require.NoError(t, err) + + err = createFile(dir, "c.go") + require.NoError(t, err) + + err = createFile(dir, "d.go") + require.NoError(t, err) + + dbDir := filepath.Join(dir, ".databricks") + err = os.Mkdir(dbDir, 0755) + require.NoError(t, err) + + err = createFile(dbDir, "e.go") + require.NoError(t, err) + + return dir + +} + +func TestGetFileSet(t *testing.T) { + ctx := context.Background() + + dir := setupFiles(t) + fileSet, err := git.NewFileSet(dir) + require.NoError(t, err) + + err = fileSet.EnsureValidGitIgnoreExists() + require.NoError(t, err) + + inc, err := fileset.NewGlobSet(dir, []string{}) + require.NoError(t, err) + + excl, err := fileset.NewGlobSet(dir, []string{}) + require.NoError(t, err) + + s := &Sync{ + SyncOptions: &SyncOptions{}, + + fileSet: fileSet, + includeFileSet: inc, + excludeFileSet: excl, + } + + fileList, err := getFileList(ctx, s) + require.NoError(t, err) + require.Equal(t, len(fileList), 7) + + inc, err = fileset.NewGlobSet(dir, []string{}) + require.NoError(t, err) + + excl, err = fileset.NewGlobSet(dir, []string{"*.go"}) + require.NoError(t, err) + + s = &Sync{ + SyncOptions: &SyncOptions{}, + + fileSet: fileSet, + includeFileSet: inc, + excludeFileSet: excl, + } + + fileList, err = getFileList(ctx, s) + require.NoError(t, err) + require.Equal(t, len(fileList), 1) + + inc, err = fileset.NewGlobSet(dir, []string{".databricks/*.*"}) + require.NoError(t, err) + + excl, err = fileset.NewGlobSet(dir, []string{}) + require.NoError(t, err) + + s = &Sync{ + SyncOptions: &SyncOptions{}, + + fileSet: fileSet, + includeFileSet: inc, + excludeFileSet: excl, + } + + fileList, err = getFileList(ctx, s) + require.NoError(t, err) + require.Equal(t, len(fileList), 8) + +} From ffc78b4b8b7cab8999472ae90e78f8a5db812abd Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 18 Aug 2023 11:29:48 +0200 Subject: [PATCH 030/310] Add template directory flag for bundle templates (#675) ## Changes This flag allows users to initialize a template from a subdirectory in the repo root. Also enables multi template repositories. ## Tests Manually --- cmd/bundle/init.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 14c57582..9ba7e190 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -43,7 +43,9 @@ func newInitCommand() *cobra.Command { var configFile string var outputDir string + var templateDir string cmd.Flags().StringVar(&configFile, "config-file", "", "File containing input parameters for template initialization.") + cmd.Flags().StringVar(&templateDir, "template-dir", "", "Directory within repository that holds the template specification.") cmd.Flags().StringVar(&outputDir, "output-dir", "", "Directory to write the initialized template to.") cmd.RunE = func(cmd *cobra.Command, args []string) error { @@ -59,19 +61,18 @@ func newInitCommand() *cobra.Command { // Download the template in a temporary directory tmpDir := os.TempDir() templateURL := templatePath - templateDir := filepath.Join(tmpDir, repoName(templateURL)) - err := os.MkdirAll(templateDir, 0755) + repoDir := filepath.Join(tmpDir, repoName(templateURL)) + err := os.MkdirAll(repoDir, 0755) if err != nil { return err } // TODO: Add automated test that the downloaded git repo is cleaned up. - err = git.Clone(ctx, templateURL, "", templateDir) + err = git.Clone(ctx, templateURL, "", repoDir) if err != nil { return err } defer os.RemoveAll(templateDir) - - return template.Materialize(ctx, configFile, templateDir, outputDir) + return template.Materialize(ctx, configFile, filepath.Join(repoDir, templateDir), outputDir) } return cmd From c25bc041b113802cf55c56a5c428445947d8f01f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 21 Aug 2023 09:35:02 +0200 Subject: [PATCH 031/310] Never ignore root directory when enumerating files in a repository (#683) ## Changes The pattern `.*` in a `.gitignore` file can match `.` when walking all files in a repository. If it does, then the walker immediately aborts and no files are returned. The root directory (an unnamed directory) must never be ignored. Reported in https://github.com/databricks/databricks-vscode/issues/837. ## Tests New tests pass. --- libs/fileset/fileset.go | 6 +++--- libs/git/fileset.go | 7 +------ libs/git/fileset_test.go | 31 ++++++++++++++++++------------- libs/git/repository.go | 5 +++++ libs/git/testdata/.gitignore | 3 +++ libs/git/view_test.go | 7 +++++++ 6 files changed, 37 insertions(+), 22 deletions(-) diff --git a/libs/fileset/fileset.go b/libs/fileset/fileset.go index 07494fe8..81b85525 100644 --- a/libs/fileset/fileset.go +++ b/libs/fileset/fileset.go @@ -39,14 +39,14 @@ func (w *FileSet) Root() string { // Return all tracked files for Repo func (w *FileSet) All() ([]File, error) { - return w.RecursiveListFiles(w.root) + return w.recursiveListFiles() } // Recursively traverses dir in a depth first manner and returns a list of all files // that are being tracked in the FileSet (ie not being ignored for matching one of the // patterns in w.ignore) -func (w *FileSet) RecursiveListFiles(dir string) (fileList []File, err error) { - err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { +func (w *FileSet) recursiveListFiles() (fileList []File, err error) { + err = filepath.WalkDir(w.root, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } diff --git a/libs/git/fileset.go b/libs/git/fileset.go index 885a19b9..c604ac7f 100644 --- a/libs/git/fileset.go +++ b/libs/git/fileset.go @@ -6,7 +6,7 @@ import ( // FileSet is Git repository aware implementation of [fileset.FileSet]. // It forces checking if gitignore files have been modified every -// time a call to [FileSet.All] or [FileSet.RecursiveListFiles] is made. +// time a call to [FileSet.All] is made. type FileSet struct { fileset *fileset.FileSet view *View @@ -43,11 +43,6 @@ func (f *FileSet) All() ([]fileset.File, error) { return f.fileset.All() } -func (f *FileSet) RecursiveListFiles(dir string) ([]fileset.File, error) { - f.view.repo.taintIgnoreRules() - return f.fileset.RecursiveListFiles(dir) -} - func (f *FileSet) EnsureValidGitIgnoreExists() error { return f.view.EnsureValidGitIgnoreExists() } diff --git a/libs/git/fileset_test.go b/libs/git/fileset_test.go index 4fa2ca4b..74133f52 100644 --- a/libs/git/fileset_test.go +++ b/libs/git/fileset_test.go @@ -10,18 +10,23 @@ import ( "github.com/stretchr/testify/require" ) -func TestFileSetRecursiveListFiles(t *testing.T) { - fileSet, err := NewFileSet("./testdata") +func testFileSetAll(t *testing.T, path string) { + fileSet, err := NewFileSet(path) require.NoError(t, err) - files, err := fileSet.RecursiveListFiles("./testdata") + files, err := fileSet.All() require.NoError(t, err) - require.Len(t, files, 6) - assert.Equal(t, filepath.Join(".gitignore"), files[0].Relative) - assert.Equal(t, filepath.Join("a", ".gitignore"), files[1].Relative) - assert.Equal(t, filepath.Join("a", "b", ".gitignore"), files[2].Relative) - assert.Equal(t, filepath.Join("a", "b", "world.txt"), files[3].Relative) - assert.Equal(t, filepath.Join("a", "hello.txt"), files[4].Relative) - assert.Equal(t, filepath.Join("databricks.yml"), files[5].Relative) + require.Len(t, files, 3) + assert.Equal(t, filepath.Join("a", "b", "world.txt"), files[0].Relative) + assert.Equal(t, filepath.Join("a", "hello.txt"), files[1].Relative) + assert.Equal(t, filepath.Join("databricks.yml"), files[2].Relative) +} + +func TestFileSetListAllInRepo(t *testing.T) { + testFileSetAll(t, "./testdata") +} + +func TestFileSetListAllInTempDir(t *testing.T) { + testFileSetAll(t, copyTestdata(t, "./testdata")) } func TestFileSetNonCleanRoot(t *testing.T) { @@ -32,10 +37,10 @@ func TestFileSetNonCleanRoot(t *testing.T) { require.NoError(t, err) files, err := fileSet.All() require.NoError(t, err) - assert.Len(t, files, 6) + assert.Len(t, files, 3) } -func TestFilesetAddsCacheDirToGitIgnore(t *testing.T) { +func TestFileSetAddsCacheDirToGitIgnore(t *testing.T) { projectDir := t.TempDir() fileSet, err := NewFileSet(projectDir) require.NoError(t, err) @@ -48,7 +53,7 @@ func TestFilesetAddsCacheDirToGitIgnore(t *testing.T) { assert.Contains(t, string(fileBytes), ".databricks") } -func TestFilesetDoesNotCacheDirToGitIgnoreIfAlreadyPresent(t *testing.T) { +func TestFileSetDoesNotCacheDirToGitIgnoreIfAlreadyPresent(t *testing.T) { projectDir := t.TempDir() gitIgnorePath := filepath.Join(projectDir, ".gitignore") diff --git a/libs/git/repository.go b/libs/git/repository.go index 2f19cff9..9c847687 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -160,6 +160,11 @@ func (r *Repository) Ignore(relPath string) (bool, error) { trailingSlash = "/" } + // Never ignore the root path (an unnamed path) + if len(parts) == 1 && parts[0] == "." { + return false, nil + } + // Walk over path prefixes to check applicable gitignore files. for i := range parts { prefix := path.Clean(strings.Join(parts[:i], "/")) diff --git a/libs/git/testdata/.gitignore b/libs/git/testdata/.gitignore index 3d68fc1c..5bfc9c1e 100644 --- a/libs/git/testdata/.gitignore +++ b/libs/git/testdata/.gitignore @@ -7,3 +7,6 @@ root.* # Directory pattern. ignoredirectory/ + +# Ignore dotfiles +.* diff --git a/libs/git/view_test.go b/libs/git/view_test.go index 795e7b6e..3ecd301b 100644 --- a/libs/git/view_test.go +++ b/libs/git/view_test.go @@ -68,8 +68,15 @@ func testViewAtRoot(t *testing.T, tv testView) { assert.True(t, tv.Ignore("root/foo")) assert.True(t, tv.Ignore("root_double")) assert.False(t, tv.Ignore("newfile")) + assert.True(t, tv.Ignore(".gitignore")) + assert.False(t, tv.Ignore("newfile.py")) assert.True(t, tv.Ignore("ignoredirectory/")) + // Never ignore the root directory. + // This is the only path that may be checked as `.`, + // and would match the `.*` ignore pattern if specified. + assert.False(t, tv.Ignore(".")) + // Nested .gitignores should not affect root. assert.False(t, tv.Ignore("a.sh")) From e1ca24d5d5416e974de17da4045e25b6a3235418 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 21 Aug 2023 11:09:08 +0200 Subject: [PATCH 032/310] Improve 'mode' error message (#681) ## Changes `unsupported value specified for 'mode': %s` was not a helpful error message. --- bundle/config/mutator/process_target_mode.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index b5dc2559..fca4e4b0 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -182,7 +182,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error { case "": // No action default: - return fmt.Errorf("unsupported value specified for 'mode': %s", b.Config.Bundle.Mode) + return fmt.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode) } return nil From 5ed635a24091872d00d2a20ee9cefcf829a556ac Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Mon, 21 Aug 2023 18:17:02 +0200 Subject: [PATCH 033/310] Added `databricks account o-auth-enrollment enable` command (#687) This command takes the user through the interactive flow to set up OAuth for a fresh account, where only Basic authentication works. --------- Co-authored-by: Andrew Nester --- cmd/account/o-auth-enrollment/overrides.go | 107 +++++++++++++++++++++ libs/cmdio/io.go | 29 ++++++ 2 files changed, 136 insertions(+) create mode 100644 cmd/account/o-auth-enrollment/overrides.go diff --git a/cmd/account/o-auth-enrollment/overrides.go b/cmd/account/o-auth-enrollment/overrides.go new file mode 100644 index 00000000..1fc3aacc --- /dev/null +++ b/cmd/account/o-auth-enrollment/overrides.go @@ -0,0 +1,107 @@ +package o_auth_enrollment + +import ( + "context" + "fmt" + "time" + + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/retries" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/spf13/cobra" +) + +func promptForBasicAccountConfig(ctx context.Context) (*databricks.Config, error) { + if !cmdio.IsInTTY(ctx) { + return nil, fmt.Errorf("this command requires a TTY") + } + // OAuth Enrollment only works on AWS + host, err := cmdio.DefaultPrompt(ctx, "Host", "https://accounts.cloud.databricks.com") + if err != nil { + return nil, fmt.Errorf("host: %w", err) + } + accountID, err := cmdio.SimplePrompt(ctx, "Account ID") + if err != nil { + return nil, fmt.Errorf("account: %w", err) + } + username, err := cmdio.SimplePrompt(ctx, "Username") + if err != nil { + return nil, fmt.Errorf("username: %w", err) + } + password, err := cmdio.Secret(ctx, "Password") + if err != nil { + return nil, fmt.Errorf("password: %w", err) + } + return &databricks.Config{ + Host: host, + AccountID: accountID, + Username: username, + Password: password, + }, nil +} + +func enableOAuthForAccount(ctx context.Context, cfg *databricks.Config) error { + ac, err := databricks.NewAccountClient(cfg) + if err != nil { + return fmt.Errorf("failed to instantiate account client: %w", err) + } + // The enrollment is executed asynchronously, so the API returns HTTP 204 immediately + err = ac.OAuthEnrollment.Create(ctx, oauth2.CreateOAuthEnrollment{ + EnableAllPublishedApps: true, + }) + if err != nil { + return fmt.Errorf("failed to create oauth enrollment: %w", err) + } + enableSpinner := cmdio.Spinner(ctx) + // The actual enrollment take a few minutes + err = retries.Wait(ctx, 10*time.Minute, func() *retries.Err { + status, err := ac.OAuthEnrollment.Get(ctx) + if err != nil { + return retries.Halt(err) + } + if !status.IsEnabled { + msg := "Enabling OAuth..." + enableSpinner <- msg + return retries.Continues(msg) + } + enableSpinner <- "OAuth is enabled" + close(enableSpinner) + return nil + }) + if err != nil { + return fmt.Errorf("wait for enrollment: %w", err) + } + // enable Databricks CLI, so that `databricks auth login` works + _, err = ac.PublishedAppIntegration.Create(ctx, oauth2.CreatePublishedAppIntegration{ + AppId: "databricks-cli", + }) + if err != nil { + return fmt.Errorf("failed to enable databricks CLI: %w", err) + } + return nil +} + +func newEnable() *cobra.Command { + return &cobra.Command{ + Use: "enable", + Short: "Enable Databricks CLI, Tableau Desktop, and PowerBI for this account.", + Long: `Before you can do 'databricks auth login', you have to enable OAuth for this account. + +This command prompts you for Account ID, username, and password and waits until OAuth is enabled.`, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cfg, err := promptForBasicAccountConfig(ctx) + if err != nil { + return fmt.Errorf("account config: %w", err) + } + return enableOAuthForAccount(ctx, cfg) + }, + } +} + +func init() { + cmdOverrides = append(cmdOverrides, func(c *cobra.Command) { + c.AddCommand(newEnable()) + }) +} diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index bc5a5f30..9d712e35 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -205,6 +205,35 @@ func Prompt(ctx context.Context) *promptui.Prompt { } } +func (c *cmdIO) simplePrompt(label string) *promptui.Prompt { + return &promptui.Prompt{ + Label: label, + Stdin: io.NopCloser(c.in), + Stdout: nopWriteCloser{c.out}, + } +} + +func (c *cmdIO) SimplePrompt(label string) (value string, err error) { + return c.simplePrompt(label).Run() +} + +func SimplePrompt(ctx context.Context, label string) (value string, err error) { + c := fromContext(ctx) + return c.SimplePrompt(label) +} + +func (c *cmdIO) DefaultPrompt(label, defaultValue string) (value string, err error) { + prompt := c.simplePrompt(label) + prompt.Default = defaultValue + prompt.AllowEdit = true + return prompt.Run() +} + +func DefaultPrompt(ctx context.Context, label, defaultValue string) (value string, err error) { + c := fromContext(ctx) + return c.DefaultPrompt(label, defaultValue) +} + func (c *cmdIO) Spinner(ctx context.Context) chan string { var sp *spinner.Spinner if c.interactive { From 4ee926b8858bf1583fcd8bbe9a5222b1594e72ec Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 23 Aug 2023 18:47:07 +0200 Subject: [PATCH 034/310] Added run_as section for bundle configuration (#692) ## Changes Added run_as section for bundle configuration. This section allows to define an user name or service principal which will be applied as an execution identity for jobs and DLT pipelines. In the case of DLT, identity defined in `run_as` will be assigned `IS_OWNER` permission on this pipeline. ## Tests Added unit tests for configuration. Also ran deploy for the following bundle configuration ``` bundle: name: "run_as" run_as: # service_principal_name: "f7263fcc-56d0-4981-8baf-c2a45296690b" user_name: "lennart.kats@databricks.com" resources: pipelines: andrew_pipeline: name: "Andrew Nester pipeline" libraries: - notebook: path: ./test.py jobs: job_one: name: Job One tasks: - task_key: "task" new_cluster: num_workers: 1 spark_version: 13.2.x-snapshot-scala2.12 node_type_id: i3.xlarge runtime_engine: PHOTON notebook_task: notebook_path: "./test.py" ``` --- bundle/config/mutator/run_as.go | 65 +++++++++++++++++++++++ bundle/config/root.go | 8 +++ bundle/config/target.go | 4 ++ bundle/phases/initialize.go | 1 + bundle/tests/run_as/databricks.yml | 42 +++++++++++++++ bundle/tests/run_as_test.go | 82 ++++++++++++++++++++++++++++++ 6 files changed, 202 insertions(+) create mode 100644 bundle/config/mutator/run_as.go create mode 100644 bundle/tests/run_as/databricks.yml create mode 100644 bundle/tests/run_as_test.go diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go new file mode 100644 index 00000000..7d1a4917 --- /dev/null +++ b/bundle/config/mutator/run_as.go @@ -0,0 +1,65 @@ +package mutator + +import ( + "context" + "slices" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +type setRunAs struct { +} + +// SetRunAs mutator is used to go over defined resources such as Jobs and DLT Pipelines +// And set correct execution identity ("run_as" for a job or "is_owner" permission for DLT) +// if top-level "run-as" section is defined in the configuration. +func SetRunAs() bundle.Mutator { + return &setRunAs{} +} + +func (m *setRunAs) Name() string { + return "SetRunAs" +} + +func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error { + runAs := b.Config.RunAs + if runAs == nil { + return nil + } + + for i := range b.Config.Resources.Jobs { + job := b.Config.Resources.Jobs[i] + if job.RunAs != nil { + continue + } + job.RunAs = &jobs.JobRunAs{ + ServicePrincipalName: runAs.ServicePrincipalName, + UserName: runAs.UserName, + } + } + + me := b.Config.Workspace.CurrentUser.UserName + // If user deploying the bundle and the one defined in run_as are the same + // Do not add IS_OWNER permission. Current user is implied to be an owner in this case. + // Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407 + if runAs.UserName == me || runAs.ServicePrincipalName == me { + return nil + } + + for i := range b.Config.Resources.Pipelines { + pipeline := b.Config.Resources.Pipelines[i] + pipeline.Permissions = slices.DeleteFunc(pipeline.Permissions, func(p resources.Permission) bool { + return (runAs.ServicePrincipalName != "" && p.ServicePrincipalName == runAs.ServicePrincipalName) || + (runAs.UserName != "" && p.UserName == runAs.UserName) + }) + pipeline.Permissions = append(pipeline.Permissions, resources.Permission{ + Level: "IS_OWNER", + ServicePrincipalName: runAs.ServicePrincipalName, + UserName: runAs.UserName, + }) + } + + return nil +} diff --git a/bundle/config/root.go b/bundle/config/root.go index e0d20425..1275dab4 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/ghodss/yaml" "github.com/imdario/mergo" ) @@ -80,6 +81,9 @@ type Root struct { // Sync section specifies options for files synchronization Sync Sync `json:"sync"` + + // RunAs section allows to define an execution identity for jobs and pipelines runs + RunAs *jobs.JobRunAs `json:"run_as,omitempty"` } func Load(path string) (*Root, error) { @@ -237,6 +241,10 @@ func (r *Root) MergeTargetOverrides(target *Target) error { } } + if target.RunAs != nil { + r.RunAs = target.RunAs + } + if target.Mode != "" { r.Bundle.Mode = target.Mode } diff --git a/bundle/config/target.go b/bundle/config/target.go index 10775049..6a45fdb8 100644 --- a/bundle/config/target.go +++ b/bundle/config/target.go @@ -1,5 +1,7 @@ package config +import "github.com/databricks/databricks-sdk-go/service/jobs" + type Mode string // Target defines overrides for a single target. @@ -31,6 +33,8 @@ type Target struct { Variables map[string]string `json:"variables,omitempty"` Git Git `json:"git,omitempty"` + + RunAs *jobs.JobRunAs `json:"run_as,omitempty"` } const ( diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 219ec26c..546a8478 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -16,6 +16,7 @@ func Initialize() bundle.Mutator { "initialize", []bundle.Mutator{ mutator.PopulateCurrentUser(), + mutator.SetRunAs(), mutator.DefineDefaultWorkspaceRoot(), mutator.ExpandWorkspaceRoot(), mutator.DefineDefaultWorkspacePaths(), diff --git a/bundle/tests/run_as/databricks.yml b/bundle/tests/run_as/databricks.yml new file mode 100644 index 00000000..18ea5573 --- /dev/null +++ b/bundle/tests/run_as/databricks.yml @@ -0,0 +1,42 @@ +bundle: + name: "run_as" + +run_as: + service_principal_name: "my_service_principal" + +targets: + development: + mode: development + run_as: + user_name: "my_user_name" + +resources: + pipelines: + nyc_taxi_pipeline: + permissions: + - level: CAN_VIEW + service_principal_name: my_service_principal + - level: CAN_VIEW + user_name: my_user_name + name: "nyc taxi loader" + libraries: + - notebook: + path: ./dlt/nyc_taxi_loader + jobs: + job_one: + name: Job One + tasks: + - task: + notebook_path: "./test.py" + job_two: + name: Job Two + tasks: + - task: + notebook_path: "./test.py" + job_three: + name: Job Three + run_as: + service_principal_name: "my_service_principal_for_job" + tasks: + - task: + notebook_path: "./test.py" diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go new file mode 100644 index 00000000..44c06816 --- /dev/null +++ b/bundle/tests/run_as_test.go @@ -0,0 +1,82 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/assert" +) + +func TestRunAsDefault(t *testing.T) { + b := load(t, "./run_as") + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + ctx := context.Background() + err := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, err) + + assert.Len(t, b.Config.Resources.Jobs, 3) + jobs := b.Config.Resources.Jobs + + assert.NotNil(t, jobs["job_one"].RunAs) + assert.Equal(t, "my_service_principal", jobs["job_one"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_one"].RunAs.UserName) + + assert.NotNil(t, jobs["job_two"].RunAs) + assert.Equal(t, "my_service_principal", jobs["job_two"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_two"].RunAs.UserName) + + assert.NotNil(t, jobs["job_three"].RunAs) + assert.Equal(t, "my_service_principal_for_job", jobs["job_three"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_three"].RunAs.UserName) + + pipelines := b.Config.Resources.Pipelines + assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2) + assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].Level, "CAN_VIEW") + assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].UserName, "my_user_name") + + assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].Level, "IS_OWNER") + assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].ServicePrincipalName, "my_service_principal") +} + +func TestRunAsDevelopment(t *testing.T) { + b := loadTarget(t, "./run_as", "development") + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + ctx := context.Background() + err := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, err) + + assert.Len(t, b.Config.Resources.Jobs, 3) + jobs := b.Config.Resources.Jobs + + assert.NotNil(t, jobs["job_one"].RunAs) + assert.Equal(t, "", jobs["job_one"].RunAs.ServicePrincipalName) + assert.Equal(t, "my_user_name", jobs["job_one"].RunAs.UserName) + + assert.NotNil(t, jobs["job_two"].RunAs) + assert.Equal(t, "", jobs["job_two"].RunAs.ServicePrincipalName) + assert.Equal(t, "my_user_name", jobs["job_two"].RunAs.UserName) + + assert.NotNil(t, jobs["job_three"].RunAs) + assert.Equal(t, "my_service_principal_for_job", jobs["job_three"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_three"].RunAs.UserName) + + pipelines := b.Config.Resources.Pipelines + assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2) + assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].Level, "CAN_VIEW") + assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].ServicePrincipalName, "my_service_principal") + + assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].Level, "IS_OWNER") + assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].UserName, "my_user_name") +} From c5cd20de23fe6ca01d31de0a68bdfcf821804d21 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 24 Aug 2023 13:04:20 +0200 Subject: [PATCH 035/310] Release v0.203.2 (#694) CLI: * Added `databricks account o-auth-enrollment enable` command ([#687](https://github.com/databricks/cli/pull/687)). Bundles: * Do not try auto detect Python package if no Python wheel tasks defined ([#674](https://github.com/databricks/cli/pull/674)). * Renamed `environments` to `targets` in bundle configuration ([#670](https://github.com/databricks/cli/pull/670)). * Rename init project-dir flag to output-dir ([#676](https://github.com/databricks/cli/pull/676)). * Added support for sync.include and sync.exclude sections ([#671](https://github.com/databricks/cli/pull/671)). * Add template directory flag for bundle templates ([#675](https://github.com/databricks/cli/pull/675)). * Never ignore root directory when enumerating files in a repository ([#683](https://github.com/databricks/cli/pull/683)). * Improve 'mode' error message ([#681](https://github.com/databricks/cli/pull/681)). * Added run_as section for bundle configuration ([#692](https://github.com/databricks/cli/pull/692)). --- CHANGELOG.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0b6bc0e..fa0dec13 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Version changelog +## 0.203.2 + +CLI: + * Added `databricks account o-auth-enrollment enable` command ([#687](https://github.com/databricks/cli/pull/687)). + +Bundles: + * Do not try auto detect Python package if no Python wheel tasks defined ([#674](https://github.com/databricks/cli/pull/674)). + * Renamed `environments` to `targets` in bundle configuration ([#670](https://github.com/databricks/cli/pull/670)). + * Rename init project-dir flag to output-dir ([#676](https://github.com/databricks/cli/pull/676)). + * Added support for sync.include and sync.exclude sections ([#671](https://github.com/databricks/cli/pull/671)). + * Add template directory flag for bundle templates ([#675](https://github.com/databricks/cli/pull/675)). + * Never ignore root directory when enumerating files in a repository ([#683](https://github.com/databricks/cli/pull/683)). + * Improve 'mode' error message ([#681](https://github.com/databricks/cli/pull/681)). + * Added run_as section for bundle configuration ([#692](https://github.com/databricks/cli/pull/692)). + ## 0.203.1 CLI: From a5b86093ecc15989bf8473699e94a2518017488a Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Fri, 25 Aug 2023 11:03:42 +0200 Subject: [PATCH 036/310] Add a foundation for built-in templates (#685) ## Changes This pull request extends the templating support in preparation of a new, default template (WIP, https://github.com/databricks/cli/pull/686): * builtin templates that can be initialized using e.g. `databricks bundle init default-python` * builtin templates are embedded into the executable using go's `embed` functionality, making sure they're co-versioned with the CLI * new helpers to get the workspace name, current user name, etc. help craft a complete template * (not enabled yet) when the user types `databricks bundle init` they can interactively select the `default-python` template And makes two tangentially related changes: * IsServicePrincipal now uses the "users" API rather than the "principals" API, since the latter is too slow for our purposes. * mode: prod no longer requires the 'target.prod.git' setting. It's hard to set that from a template. (Pieter is planning an overhaul of warnings support; this would be one of the first warnings we show.) The actual `default-python` template is maintained in a separate PR: https://github.com/databricks/cli/pull/686 ## Tests Unit tests, manual testing --- .gitignore | 3 + .vscode/__builtins__.pyi | 3 + .vscode/settings.json | 3 +- bundle/config/mutator/process_target_mode.go | 25 +--- .../mutator/process_target_mode_test.go | 11 -- cmd/bundle/init.go | 25 +++- cmd/root/auth.go | 4 + cmd/root/bundle.go | 4 +- libs/auth/service_principal.go | 16 +++ libs/template/helpers.go | 120 +++++++++++++----- libs/template/helpers_test.go | 68 +++++++++- libs/template/materialize.go | 64 +++++++++- libs/template/renderer.go | 6 +- libs/template/renderer_test.go | 46 +++++-- .../databricks_template_schema.json | 9 ++ .../templates/default-python/defaults.json | 3 + .../template/{{.project_name}}/README.md | 3 + .../workspace-host/template/file.tmpl | 2 + 18 files changed, 326 insertions(+), 89 deletions(-) create mode 100644 .vscode/__builtins__.pyi create mode 100644 libs/auth/service_principal.go create mode 100644 libs/template/templates/default-python/databricks_template_schema.json create mode 100644 libs/template/templates/default-python/defaults.json create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/README.md create mode 100644 libs/template/testdata/workspace-host/template/file.tmpl diff --git a/.gitignore b/.gitignore index 5f00a82b..edd1409a 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,6 @@ __pycache__ .terraform.lock.hcl .vscode/launch.json +.vscode/tasks.json + +.databricks diff --git a/.vscode/__builtins__.pyi b/.vscode/__builtins__.pyi new file mode 100644 index 00000000..81f9a49e --- /dev/null +++ b/.vscode/__builtins__.pyi @@ -0,0 +1,3 @@ +# Typings for Pylance in VS Code +# see https://github.com/microsoft/pyright/blob/main/docs/builtins.md +from databricks.sdk.runtime import * diff --git a/.vscode/settings.json b/.vscode/settings.json index 76be94af..687e0fc0 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -7,5 +7,6 @@ "files.insertFinalNewline": true, "files.trimFinalNewlines": true, "python.envFile": "${workspaceFolder}/.databricks/.databricks.env", - "databricks.python.envFile": "${workspaceFolder}/.env" + "databricks.python.envFile": "${workspaceFolder}/.env", + "python.analysis.stubPath": ".vscode" } diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index fca4e4b0..3a00d42f 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -8,7 +8,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -111,7 +112,7 @@ func findIncorrectPath(b *bundle.Bundle, mode config.Mode) string { func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) error { if b.Config.Bundle.Git.Inferred { env := b.Config.Bundle.Target - return fmt.Errorf("target with 'mode: production' must specify an explicit 'targets.%s.git' configuration", env) + log.Warnf(ctx, "target with 'mode: production' should specify an explicit 'targets.%s.git' configuration", env) } r := b.Config.Resources @@ -138,21 +139,6 @@ func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUs return nil } -// Determines whether a service principal identity is used to run the CLI. -func isServicePrincipalUsed(ctx context.Context, b *bundle.Bundle) (bool, error) { - ws := b.WorkspaceClient() - - // Check if a principal with the current user's ID exists. - // We need to use the ListAll method since Get is only usable by admins. - matches, err := ws.ServicePrincipals.ListAll(ctx, iam.ListServicePrincipalsRequest{ - Filter: "id eq " + b.Config.Workspace.CurrentUser.Id, - }) - if err != nil { - return false, err - } - return len(matches) > 0, nil -} - // Determines whether run_as is explicitly set for all resources. // We do this in a best-effort fashion rather than check the top-level // 'run_as' field because the latter is not required to be set. @@ -174,10 +160,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error { } return transformDevelopmentMode(b) case config.Production: - isPrincipal, err := isServicePrincipalUsed(ctx, b) - if err != nil { - return err - } + isPrincipal := auth.IsServicePrincipal(ctx, b.WorkspaceClient(), b.Config.Workspace.CurrentUser.Id) return validateProductionMode(ctx, b, isPrincipal) case "": // No action diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 76db64de..489632e1 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -118,17 +118,6 @@ func TestProcessTargetModeProduction(t *testing.T) { assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) } -func TestProcessTargetModeProductionGit(t *testing.T) { - bundle := mockBundle(config.Production) - - // Pretend the user didn't set Git configuration explicitly - bundle.Config.Bundle.Git.Inferred = true - - err := validateProductionMode(context.Background(), bundle, false) - require.ErrorContains(t, err, "git") - bundle.Config.Bundle.Git.Inferred = false -} - func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { bundle := mockBundle(config.Production) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 9ba7e190..2127a7bc 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -1,10 +1,12 @@ package bundle import ( + "errors" "os" "path/filepath" "strings" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/template" "github.com/spf13/cobra" @@ -36,9 +38,9 @@ func repoName(url string) string { func newInitCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "init TEMPLATE_PATH", + Use: "init [TEMPLATE_PATH]", Short: "Initialize Template", - Args: cobra.ExactArgs(1), + Args: cobra.MaximumNArgs(1), } var configFile string @@ -48,9 +50,26 @@ func newInitCommand() *cobra.Command { cmd.Flags().StringVar(&templateDir, "template-dir", "", "Directory within repository that holds the template specification.") cmd.Flags().StringVar(&outputDir, "output-dir", "", "Directory to write the initialized template to.") + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) error { - templatePath := args[0] ctx := cmd.Context() + var templatePath string + if len(args) > 0 { + templatePath = args[0] + } else { + return errors.New("please specify a template") + + /* TODO: propose to use default-python (once template is ready) + var err error + if !cmdio.IsOutTTY(ctx) || !cmdio.IsInTTY(ctx) { + return errors.New("please specify a template") + } + templatePath, err = cmdio.Ask(ctx, "Template to use", "default-python") + if err != nil { + return err + } + */ + } if !isRepoUrl(templatePath) { // skip downloading the repo because input arg is not a URL. We assume diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 2f32d260..e56074ef 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -113,6 +113,10 @@ TRY_AUTH: // or try picking a config profile dynamically return nil } +func SetWorkspaceClient(ctx context.Context, w *databricks.WorkspaceClient) context.Context { + return context.WithValue(ctx, &workspaceClient, w) +} + func transformLoadError(path string, err error) error { if os.IsNotExist(err) { return fmt.Errorf("no configuration file found at %s; please create one first", path) diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index e1c12336..ba7a5dfd 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -43,7 +43,7 @@ func getTarget(cmd *cobra.Command) (value string) { return target } -func getProfile(cmd *cobra.Command) (value string) { +func GetProfile(cmd *cobra.Command) (value string) { // The command line flag takes precedence. flag := cmd.Flag("profile") if flag != nil { @@ -70,7 +70,7 @@ func loadBundle(cmd *cobra.Command, args []string, load func(ctx context.Context return nil, nil } - profile := getProfile(cmd) + profile := GetProfile(cmd) if profile != "" { b.Config.Workspace.Profile = profile } diff --git a/libs/auth/service_principal.go b/libs/auth/service_principal.go new file mode 100644 index 00000000..58fcc6a7 --- /dev/null +++ b/libs/auth/service_principal.go @@ -0,0 +1,16 @@ +package auth + +import ( + "context" + + "github.com/databricks/databricks-sdk-go" +) + +// Determines whether a given user id is a service principal. +// This function uses a heuristic: if no user exists with this id, we assume +// it's a service principal. Unfortunately, the standard service principal API is too +// slow for our purposes. +func IsServicePrincipal(ctx context.Context, ws *databricks.WorkspaceClient, userId string) bool { + _, err := ws.Users.GetById(ctx, userId) + return err != nil +} diff --git a/libs/template/helpers.go b/libs/template/helpers.go index ac846658..b8f2fe45 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -1,10 +1,16 @@ package template import ( + "context" + "errors" "fmt" "net/url" "regexp" "text/template" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/auth" + "github.com/databricks/databricks-sdk-go/service/iam" ) type ErrFail struct { @@ -20,35 +26,87 @@ type pair struct { v any } -var helperFuncs = template.FuncMap{ - "fail": func(format string, args ...any) (any, error) { - return nil, ErrFail{fmt.Sprintf(format, args...)} - }, - // Alias for https://pkg.go.dev/net/url#Parse. Allows usage of all methods of url.URL - "url": func(rawUrl string) (*url.URL, error) { - return url.Parse(rawUrl) - }, - // Alias for https://pkg.go.dev/regexp#Compile. Allows usage of all methods of regexp.Regexp - "regexp": func(expr string) (*regexp.Regexp, error) { - return regexp.Compile(expr) - }, - // A key value pair. This is used with the map function to generate maps - // to use inside a template - "pair": func(k string, v any) pair { - return pair{k, v} - }, - // map converts a list of pairs to a map object. This is useful to pass multiple - // objects to templates defined in the library directory. Go text template - // syntax for invoking a template only allows specifying a single argument, - // this function can be used to workaround that limitation. - // - // For example: {{template "my_template" (map (pair "foo" $arg1) (pair "bar" $arg2))}} - // $arg1 and $arg2 can be referred from inside "my_template" as ".foo" and ".bar" - "map": func(pairs ...pair) map[string]any { - result := make(map[string]any, 0) - for _, p := range pairs { - result[p.k] = p.v - } - return result - }, +func loadHelpers(ctx context.Context) template.FuncMap { + var user *iam.User + var is_service_principal *bool + w := root.WorkspaceClient(ctx) + return template.FuncMap{ + "fail": func(format string, args ...any) (any, error) { + return nil, ErrFail{fmt.Sprintf(format, args...)} + }, + // Alias for https://pkg.go.dev/net/url#Parse. Allows usage of all methods of url.URL + "url": func(rawUrl string) (*url.URL, error) { + return url.Parse(rawUrl) + }, + // Alias for https://pkg.go.dev/regexp#Compile. Allows usage of all methods of regexp.Regexp + "regexp": func(expr string) (*regexp.Regexp, error) { + return regexp.Compile(expr) + }, + // A key value pair. This is used with the map function to generate maps + // to use inside a template + "pair": func(k string, v any) pair { + return pair{k, v} + }, + // map converts a list of pairs to a map object. This is useful to pass multiple + // objects to templates defined in the library directory. Go text template + // syntax for invoking a template only allows specifying a single argument, + // this function can be used to workaround that limitation. + // + // For example: {{template "my_template" (map (pair "foo" $arg1) (pair "bar" $arg2))}} + // $arg1 and $arg2 can be referred from inside "my_template" as ".foo" and ".bar" + "map": func(pairs ...pair) map[string]any { + result := make(map[string]any, 0) + for _, p := range pairs { + result[p.k] = p.v + } + return result + }, + // Get smallest node type (follows Terraform's GetSmallestNodeType) + "smallest_node_type": func() (string, error) { + if w.Config.Host == "" { + return "", errors.New("cannot determine target workspace, please first setup a configuration profile using 'databricks auth login'") + } + if w.Config.IsAzure() { + return "Standard_D3_v2", nil + } else if w.Config.IsGcp() { + return "n1-standard-4", nil + } + return "i3.xlarge", nil + }, + "workspace_host": func() (string, error) { + if w.Config.Host == "" { + return "", errors.New("cannot determine target workspace, please first setup a configuration profile using 'databricks auth login'") + } + return w.Config.Host, nil + }, + "user_name": func() (string, error) { + if user == nil { + var err error + user, err = w.CurrentUser.Me(ctx) + if err != nil { + return "", err + } + } + result := user.UserName + if result == "" { + result = user.Id + } + return result, nil + }, + "is_service_principal": func() (bool, error) { + if is_service_principal != nil { + return *is_service_principal, nil + } + if user == nil { + var err error + user, err = w.CurrentUser.Me(ctx) + if err != nil { + return false, err + } + } + result := auth.IsServicePrincipal(ctx, w, user.Id) + is_service_principal = &result + return result, nil + }, + } } diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index 023eed29..d495ae89 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -2,9 +2,15 @@ package template import ( "context" + "os" "strings" "testing" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go" + workspaceConfig "github.com/databricks/databricks-sdk-go/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -13,7 +19,9 @@ func TestTemplatePrintStringWithoutProcessing(t *testing.T) { ctx := context.Background() tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/print-without-processing/template", "./testdata/print-without-processing/library", tmpDir) + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/print-without-processing/template", "./testdata/print-without-processing/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -28,7 +36,9 @@ func TestTemplateRegexpCompileFunction(t *testing.T) { ctx := context.Background() tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/regexp-compile/template", "./testdata/regexp-compile/library", tmpDir) + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/regexp-compile/template", "./testdata/regexp-compile/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -44,7 +54,9 @@ func TestTemplateUrlFunction(t *testing.T) { ctx := context.Background() tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/urlparse-function/template", "./testdata/urlparse-function/library", tmpDir) + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/urlparse-function/template", "./testdata/urlparse-function/library", tmpDir) require.NoError(t, err) @@ -59,7 +71,9 @@ func TestTemplateMapPairFunction(t *testing.T) { ctx := context.Background() tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/map-pair/template", "./testdata/map-pair/library", tmpDir) + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/map-pair/template", "./testdata/map-pair/library", tmpDir) require.NoError(t, err) @@ -69,3 +83,49 @@ func TestTemplateMapPairFunction(t *testing.T) { assert.Len(t, r.files, 1) assert.Equal(t, "false 123 hello 12.3", string(r.files[0].(*inMemoryFile).content)) } + +func TestWorkspaceHost(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + w := &databricks.WorkspaceClient{ + Config: &workspaceConfig.Config{ + Host: "https://myhost.com", + }, + } + ctx = root.SetWorkspaceClient(ctx, w) + + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/workspace-host/template", "./testdata/map-pair/library", tmpDir) + + require.NoError(t, err) + + err = r.walk() + assert.NoError(t, err) + + assert.Len(t, r.files, 1) + assert.Contains(t, string(r.files[0].(*inMemoryFile).content), "https://myhost.com") + assert.Contains(t, string(r.files[0].(*inMemoryFile).content), "i3.xlarge") + +} + +func TestWorkspaceHostNotConfigured(t *testing.T) { + ctx := context.Background() + cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "template") + ctx = cmdio.InContext(ctx, cmd) + tmpDir := t.TempDir() + + w := &databricks.WorkspaceClient{ + Config: &workspaceConfig.Config{}, + } + ctx = root.SetWorkspaceClient(ctx, w) + + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/workspace-host/template", "./testdata/map-pair/library", tmpDir) + + assert.NoError(t, err) + + err = r.walk() + require.ErrorContains(t, err, "cannot determine target workspace") + +} diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 426646c3..5422160d 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -2,6 +2,10 @@ package template import ( "context" + "embed" + "io/fs" + "os" + "path" "path/filepath" ) @@ -9,6 +13,9 @@ const libraryDirName = "library" const templateDirName = "template" const schemaFileName = "databricks_template_schema.json" +//go:embed all:templates +var builtinTemplates embed.FS + // This function materializes the input templates as a project, using user defined // configurations. // Parameters: @@ -18,9 +25,21 @@ const schemaFileName = "databricks_template_schema.json" // templateRoot: root of the template definition // outputDir: root of directory where to initialize the template func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir string) error { + // Use a temporary directory in case any builtin templates like default-python are used + tempDir, err := os.MkdirTemp("", "templates") + defer os.RemoveAll(tempDir) + if err != nil { + return err + } + templateRoot, err = prepareBuiltinTemplates(templateRoot, tempDir) + if err != nil { + return err + } + templatePath := filepath.Join(templateRoot, templateDirName) libraryPath := filepath.Join(templateRoot, libraryDirName) schemaPath := filepath.Join(templateRoot, schemaFileName) + helpers := loadHelpers(ctx) config, err := newConfig(ctx, schemaPath) if err != nil { @@ -48,7 +67,7 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st } // Walk and render the template, since input configuration is complete - r, err := newRenderer(ctx, config.values, templatePath, libraryPath, outputDir) + r, err := newRenderer(ctx, config.values, helpers, templatePath, libraryPath, outputDir) if err != nil { return err } @@ -56,5 +75,46 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st if err != nil { return err } - return r.persistToDisk() + + err = r.persistToDisk() + if err != nil { + return err + } + println("✨ Successfully initialized template") + return nil +} + +// If the given templateRoot matches +func prepareBuiltinTemplates(templateRoot string, tempDir string) (string, error) { + _, err := fs.Stat(builtinTemplates, path.Join("templates", templateRoot)) + if err != nil { + // The given path doesn't appear to be using out built-in templates + return templateRoot, nil + } + + // We have a built-in template with the same name as templateRoot! + // Now we need to make a fully copy of the builtin templates to a real file system + // since template.Parse() doesn't support embed.FS. + err = fs.WalkDir(builtinTemplates, "templates", func(path string, entry fs.DirEntry, err error) error { + if err != nil { + return err + } + + targetPath := filepath.Join(tempDir, path) + if entry.IsDir() { + return os.Mkdir(targetPath, 0755) + } else { + content, err := fs.ReadFile(builtinTemplates, path) + if err != nil { + return err + } + return os.WriteFile(targetPath, content, 0644) + } + }) + + if err != nil { + return "", err + } + + return filepath.Join(tempDir, "templates", templateRoot), nil } diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 9be1b58e..f4bd99d2 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -57,9 +57,9 @@ type renderer struct { instanceRoot string } -func newRenderer(ctx context.Context, config map[string]any, templateRoot, libraryRoot, instanceRoot string) (*renderer, error) { +func newRenderer(ctx context.Context, config map[string]any, helpers template.FuncMap, templateRoot, libraryRoot, instanceRoot string) (*renderer, error) { // Initialize new template, with helper functions loaded - tmpl := template.New("").Funcs(helperFuncs) + tmpl := template.New("").Funcs(helpers) // Load user defined associated templates from the library root libraryGlob := filepath.Join(libraryRoot, "*") @@ -104,7 +104,7 @@ func (r *renderer) executeTemplate(templateDefinition string) (string, error) { // Parse the template text tmpl, err = tmpl.Parse(templateDefinition) if err != nil { - return "", err + return "", fmt.Errorf("error in %s: %w", templateDefinition, err) } // Execute template and get result diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index f3f7f234..a2e5675e 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -12,6 +12,7 @@ import ( "testing" "text/template" + "github.com/databricks/cli/cmd/root" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -31,7 +32,10 @@ func assertFilePermissions(t *testing.T, path string, perm fs.FileMode) { func TestRendererWithAssociatedTemplateInLibrary(t *testing.T) { tmpDir := t.TempDir() - r, err := newRenderer(context.Background(), nil, "./testdata/email/template", "./testdata/email/library", tmpDir) + ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/email/template", "./testdata/email/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -202,9 +206,11 @@ func TestRendererPersistToDisk(t *testing.T) { func TestRendererWalk(t *testing.T) { ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/walk/template", "./testdata/walk/library", tmpDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/walk/template", "./testdata/walk/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -241,9 +247,11 @@ func TestRendererWalk(t *testing.T) { func TestRendererFailFunction(t *testing.T) { ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/fail/template", "./testdata/fail/library", tmpDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/fail/template", "./testdata/fail/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -252,9 +260,11 @@ func TestRendererFailFunction(t *testing.T) { func TestRendererSkipsDirsEagerly(t *testing.T) { ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/skip-dir-eagerly/template", "./testdata/skip-dir-eagerly/library", tmpDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/skip-dir-eagerly/template", "./testdata/skip-dir-eagerly/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -267,9 +277,11 @@ func TestRendererSkipsDirsEagerly(t *testing.T) { func TestRendererSkipAllFilesInCurrentDirectory(t *testing.T) { ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/skip-all-files-in-cwd/template", "./testdata/skip-all-files-in-cwd/library", tmpDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/skip-all-files-in-cwd/template", "./testdata/skip-all-files-in-cwd/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -288,9 +300,11 @@ func TestRendererSkipAllFilesInCurrentDirectory(t *testing.T) { func TestRendererSkipPatternsAreRelativeToFileDirectory(t *testing.T) { ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/skip-is-relative/template", "./testdata/skip-is-relative/library", tmpDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/skip-is-relative/template", "./testdata/skip-is-relative/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -304,9 +318,11 @@ func TestRendererSkipPatternsAreRelativeToFileDirectory(t *testing.T) { func TestRendererSkip(t *testing.T) { ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/skip/template", "./testdata/skip/library", tmpDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/skip/template", "./testdata/skip/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -335,8 +351,10 @@ func TestRendererReadsPermissionsBits(t *testing.T) { } tmpDir := t.TempDir() ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) - r, err := newRenderer(ctx, nil, "./testdata/executable-bit-read/template", "./testdata/executable-bit-read/library", tmpDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/executable-bit-read/template", "./testdata/executable-bit-read/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -422,9 +440,11 @@ func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { func TestRendererNonTemplatesAreCreatedAsCopyFiles(t *testing.T) { ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/copy-file-walk/template", "./testdata/copy-file-walk/library", tmpDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/copy-file-walk/template", "./testdata/copy-file-walk/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -437,12 +457,14 @@ func TestRendererNonTemplatesAreCreatedAsCopyFiles(t *testing.T) { func TestRendererFileTreeRendering(t *testing.T) { ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) tmpDir := t.TempDir() + helpers := loadHelpers(ctx) r, err := newRenderer(ctx, map[string]any{ "dir_name": "my_directory", "file_name": "my_file", - }, "./testdata/file-tree-rendering/template", "./testdata/file-tree-rendering/library", tmpDir) + }, helpers, "./testdata/file-tree-rendering/template", "./testdata/file-tree-rendering/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -462,9 +484,11 @@ func TestRendererFileTreeRendering(t *testing.T) { func TestRendererSubTemplateInPath(t *testing.T) { ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, "./testdata/template-in-path/template", "./testdata/template-in-path/library", tmpDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/template-in-path/template", "./testdata/template-in-path/library", tmpDir) require.NoError(t, err) err = r.walk() diff --git a/libs/template/templates/default-python/databricks_template_schema.json b/libs/template/templates/default-python/databricks_template_schema.json new file mode 100644 index 00000000..b680c5fb --- /dev/null +++ b/libs/template/templates/default-python/databricks_template_schema.json @@ -0,0 +1,9 @@ +{ + "properties": { + "project_name": { + "type": "string", + "default": "my_project", + "description": "Name of the directory" + } + } +} diff --git a/libs/template/templates/default-python/defaults.json b/libs/template/templates/default-python/defaults.json new file mode 100644 index 00000000..99ecd36d --- /dev/null +++ b/libs/template/templates/default-python/defaults.json @@ -0,0 +1,3 @@ +{ + "project_name": "my_project" +} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/README.md b/libs/template/templates/default-python/template/{{.project_name}}/README.md new file mode 100644 index 00000000..3187b9ed --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/README.md @@ -0,0 +1,3 @@ +# {{.project_name}} + +The '{{.project_name}}' bundle was generated using the default-python template. diff --git a/libs/template/testdata/workspace-host/template/file.tmpl b/libs/template/testdata/workspace-host/template/file.tmpl new file mode 100644 index 00000000..2098e41b --- /dev/null +++ b/libs/template/testdata/workspace-host/template/file.tmpl @@ -0,0 +1,2 @@ +{{workspace_host}} +{{smallest_node_type}} From 861f33d37696413f07fea6c6bb72fa1f8486fda6 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 28 Aug 2023 00:51:35 -0700 Subject: [PATCH 037/310] Support cluster overrides with cluster_key and compute_key (#696) ## Changes Support `databricks bundle deploy --compute-id my_all_purpose_cluster` in two missing cases. --- bundle/config/mutator/override_compute.go | 6 +-- .../config/mutator/override_compute_test.go | 37 +++++++++++++++++++ 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 12439249..ee2e2a82 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -23,10 +23,10 @@ func (m *overrideCompute) Name() string { func overrideJobCompute(j *resources.Job, compute string) { for i := range j.Tasks { task := &j.Tasks[i] - if task.NewCluster != nil { + if task.NewCluster != nil || task.ExistingClusterId != "" || task.ComputeKey != "" || task.JobClusterKey != "" { task.NewCluster = nil - task.ExistingClusterId = compute - } else if task.ExistingClusterId != "" { + task.JobClusterKey = "" + task.ComputeKey = "" task.ExistingClusterId = compute } } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 9eb99edb..f04c91c4 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -34,6 +34,12 @@ func TestOverrideDevelopment(t *testing.T) { { ExistingClusterId: "cluster2", }, + { + ComputeKey: "compute_key", + }, + { + JobClusterKey: "cluster_key", + }, }, }}, }, @@ -47,6 +53,12 @@ func TestOverrideDevelopment(t *testing.T) { assert.Nil(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) + assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[2].ExistingClusterId) + assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[3].ExistingClusterId) + + assert.Nil(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) + assert.Empty(t, bundle.Config.Resources.Jobs["job1"].Tasks[2].ComputeKey) + assert.Empty(t, bundle.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey) } func TestOverrideDevelopmentEnv(t *testing.T) { @@ -77,6 +89,31 @@ func TestOverrideDevelopmentEnv(t *testing.T) { assert.Equal(t, "cluster2", bundle.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } +func TestOverridePipelineTask(t *testing.T) { + os.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") + bundle := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": {JobSettings: &jobs.JobSettings{ + Name: "job1", + Tasks: []jobs.Task{ + { + PipelineTask: &jobs.PipelineTask{}, + }, + }, + }}, + }, + }, + }, + } + + m := mutator.OverrideCompute() + err := m.Apply(context.Background(), bundle) + require.NoError(t, err) + assert.Empty(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) +} + func TestOverrideProduction(t *testing.T) { bundle := &bundle.Bundle{ Config: config.Root{ From 5f6289e3a71928f8d4f50908db8e88e485b673be Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 28 Aug 2023 18:29:04 +0200 Subject: [PATCH 038/310] Allow referencing local Python wheels without artifacts section defined (#703) ## Changes Now if the user reference local Python wheel files and do not specify "artifacts" section, this file will be automatically uploaded by CLI. Fixes #693 ## Tests Added unit tests Ran bundle deploy for this configuration ``` resources: jobs: some_other_job: name: "[${bundle.environment}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: ${var.job_existing_cluster} python_wheel_task: package_name: "my_test_code" entry_point: "run" libraries: - whl: ./dist/*.whl ``` Result ``` andrew.nester@HFW9Y94129 wheel % databricks bundle deploy artifacts.whl.AutoDetect: Detecting Python wheel project... artifacts.whl.AutoDetect: No Python wheel project found at bundle root folder Starting upload of bundle files Uploaded bundle files at /Users/andrew.nester@databricks.com/.bundle/wheel-task/default/files! artifacts.Upload(my_test_code-0.0.1-py3-none-any.whl): Uploading... artifacts.Upload(my_test_code-0.0.1-py3-none-any.whl): Upload succeeded ``` --- bundle/artifacts/autodetect.go | 1 + bundle/artifacts/infer.go | 6 +- bundle/artifacts/whl/from_libraries.go | 56 ++++++++++++++++++ .../.gitignore | 3 + .../bundle.yml | 22 +++++++ .../my_test_code-0.0.1-py3-none-any.whl | Bin 0 -> 1909 bytes bundle/tests/bundle/wheel_test.go | 26 ++++++++ 7 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 bundle/artifacts/whl/from_libraries.go create mode 100644 bundle/tests/bundle/python_wheel_no_artifact_no_setup/.gitignore create mode 100644 bundle/tests/bundle/python_wheel_no_artifact_no_setup/bundle.yml create mode 100644 bundle/tests/bundle/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl diff --git a/bundle/artifacts/autodetect.go b/bundle/artifacts/autodetect.go index fa8126f9..6e80ef0b 100644 --- a/bundle/artifacts/autodetect.go +++ b/bundle/artifacts/autodetect.go @@ -28,5 +28,6 @@ func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) error { return bundle.Apply(ctx, b, bundle.Seq( whl.DetectPackage(), + whl.DefineArtifactsFromLibraries(), )) } diff --git a/bundle/artifacts/infer.go b/bundle/artifacts/infer.go index 233fbda8..ade5def5 100644 --- a/bundle/artifacts/infer.go +++ b/bundle/artifacts/infer.go @@ -47,7 +47,11 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error { return fmt.Errorf("artifact doesn't exist: %s", m.name) } - if artifact.BuildCommand != "" { + // only try to infer command if it's not already defined + // and there is no explicitly files defined which means + // that the package is built outside of bundle cycles + // manually by customer + if artifact.BuildCommand != "" || len(artifact.Files) > 0 { return nil } diff --git a/bundle/artifacts/whl/from_libraries.go b/bundle/artifacts/whl/from_libraries.go new file mode 100644 index 00000000..855e5b94 --- /dev/null +++ b/bundle/artifacts/whl/from_libraries.go @@ -0,0 +1,56 @@ +package whl + +import ( + "context" + "path/filepath" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/log" +) + +type fromLibraries struct{} + +func DefineArtifactsFromLibraries() bundle.Mutator { + return &fromLibraries{} +} + +func (m *fromLibraries) Name() string { + return "artifacts.whl.DefineArtifactsFromLibraries" +} + +func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error { + if len(b.Config.Artifacts) != 0 { + log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined") + return nil + } + + tasks := libraries.FindAllWheelTasks(b) + for _, task := range tasks { + for _, lib := range task.Libraries { + matches, err := filepath.Glob(filepath.Join(b.Config.Path, lib.Whl)) + // File referenced from libraries section does not exists, skipping + if err != nil { + continue + } + + for _, match := range matches { + name := filepath.Base(match) + if b.Config.Artifacts == nil { + b.Config.Artifacts = make(map[string]*config.Artifact) + } + + log.Debugf(ctx, "Adding an artifact block for %s", match) + b.Config.Artifacts[name] = &config.Artifact{ + Files: []config.ArtifactFile{ + {Source: match}, + }, + Type: config.ArtifactPythonWheel, + } + } + } + } + + return nil +} diff --git a/bundle/tests/bundle/python_wheel_no_artifact_no_setup/.gitignore b/bundle/tests/bundle/python_wheel_no_artifact_no_setup/.gitignore new file mode 100644 index 00000000..f03e23bc --- /dev/null +++ b/bundle/tests/bundle/python_wheel_no_artifact_no_setup/.gitignore @@ -0,0 +1,3 @@ +build/ +*.egg-info +.databricks diff --git a/bundle/tests/bundle/python_wheel_no_artifact_no_setup/bundle.yml b/bundle/tests/bundle/python_wheel_no_artifact_no_setup/bundle.yml new file mode 100644 index 00000000..1bac4eba --- /dev/null +++ b/bundle/tests/bundle/python_wheel_no_artifact_no_setup/bundle.yml @@ -0,0 +1,22 @@ +bundle: + name: python-wheel-local + +resources: + jobs: + test_job: + name: "[${bundle.environment}] My Wheel Job" + tasks: + - task_key: TestTask + existing_cluster_id: "0717-aaaaa-bbbbbb" + python_wheel_task: + package_name: "my_test_code" + entry_point: "run" + libraries: + - whl: ./package/*.whl + - task_key: TestTask2 + existing_cluster_id: "0717-aaaaa-bbbbbb" + python_wheel_task: + package_name: "my_test_code" + entry_point: "run" + libraries: + - whl: ./non-existing/*.whl diff --git a/bundle/tests/bundle/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl b/bundle/tests/bundle/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..14702281d87f6180c03799b19ec1ab6a10f95509 GIT binary patch literal 1909 zcmWIWW@Zs#U|`^2*jp?fe!}|sR6QWi0Ei`kIJYvsB(=CCJ~=-nRX;vHGcU6wK3=b& zvb$g3TUYDcne*NK;Tu8&j4l{oFb;Xt)$gyPd-8;S6UQ!scPxs+J%e^0VsBe z?D2WA7-;q<>~`cPX6AwI2<_X=cSu2`t$f}ie!eZ;{1=vN?&DMITQaeELIL+u(>;5) z$X+r2xBYYBxtn(dG^WH%T*S*7xY*=nSa&BQzw;A`e9N@juAK?%jFWvPw$0CZ@vCv+ zUC#34^olIq*6pQyn)$`I!%h`$?Gjn`C#t5}?EPHZ3!U1Q;tPwvd=RbT^uPH3*Yt>e zM<1Tg)_I<`wpMwQR8ZT=$x|fUIW*1fG>RX#;XTV4(hyusmi%+F9@wUD;JW4nu!?^$dy zymf_ll8nmFA_hkt!=|h^YuC#bJFA^;`|kbeX>&dRJ69 z$LIY8Z@pKnw6_ymom8!NL@#1rQkVUv_Vm}n6HeJ~oXB#=tVhD5W4+QnRF8HC7_Eu{ zdJu$_2zfNz!`0P?`@Dzm*^9i8G_yI#;EM5sBLB1Ax?Vab^*051c%9VI>(pmo8F<0q zg0b0k;|ou?PMz1k_*Lf`uZFJI*^@q-f;2R>e4lc8`fLxF(do6c`hOW@qmTJp+EAUA$0x@kV?}enEUrYFTOy$imY| zRy;-bO4j6ipW_%A7#x@w7?cTlCCJs;Kggx^!s((W1BTXzwewju6GS3P6a`)N*1GB} zo6R$|BhW*GGw|1!x5sAg%8Y;T`}~}=1Cuy9zwc4$mk4%KJyvC%@hBo<^2^E7%>*5v zoVe}L_+0g#4p+3F{uw5}TWgyqthzT(JRh{-dqHsOvazjxvS@7*ijT2DGQG1vS2$uXOsx>kEV}tyyM^QLgJ<_EQLtFYDR9(i4JT8T(!p zvsMQSZum7N^~wA&w+~NeY*}qm{HI*u>qBp=>s#9M1H2iTM3`}xmcTdygC&h13ON-Z zm!0UQpqGabGZ+|_G^Svh0xunrO~F?9AWX3ZCLv_E;4F&JjYQAp2qV>iDH5-d;7pHh z9(vY9n0JVfdFUAx-Dvb2h%nlWnSfJK@*}#r=vfeE?iR*gLgqp;WPmp-8%Ps75E=u0 Jsly850RXT?oKXM( literal 0 HcmV?d00001 diff --git a/bundle/tests/bundle/wheel_test.go b/bundle/tests/bundle/wheel_test.go index ee745773..f7f0e75e 100644 --- a/bundle/tests/bundle/wheel_test.go +++ b/bundle/tests/bundle/wheel_test.go @@ -60,3 +60,29 @@ func TestBundlePythonWheelWithDBFSLib(t *testing.T) { err = match.Apply(ctx, b) require.NoError(t, err) } + +func TestBundlePythonWheelBuildNoBuildJustUpload(t *testing.T) { + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel_no_artifact_no_setup") + require.NoError(t, err) + + m := phases.Build() + err = m.Apply(ctx, b) + require.NoError(t, err) + + match := libraries.MatchWithArtifacts() + err = match.Apply(ctx, b) + require.ErrorContains(t, err, "./non-existing/*.whl") + + require.NotZero(t, len(b.Config.Artifacts)) + + artifact := b.Config.Artifacts["my_test_code-0.0.1-py3-none-any.whl"] + require.NotNil(t, artifact) + require.Empty(t, artifact.BuildCommand) + require.Contains(t, artifact.Files[0].Source, filepath.Join( + b.Config.Path, + "package", + "my_test_code-0.0.1-py3-none-any.whl", + )) + require.True(t, artifact.Files[0].NeedsUpload()) +} From 5477afe4f43f27e4880bde87d2fe066e1362f7dd Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 28 Aug 2023 19:05:55 +0200 Subject: [PATCH 039/310] Fixed --environment flag (#705) ## Changes Fixed --environment flag Fixes https://github.com/databricks/setup-cli/issues/35 ## Tests Added regression test --- cmd/root/bundle.go | 2 +- cmd/root/bundle_test.go | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index ba7a5dfd..fe97fbf2 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -26,7 +26,7 @@ func getTarget(cmd *cobra.Command) (value string) { oldFlag := cmd.Flag("environment") if oldFlag != nil { - value = flag.Value.String() + value = oldFlag.Value.String() if value != "" { return } diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 8aff9018..09b33d58 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -138,7 +138,7 @@ func TestTargetFlagFull(t *testing.T) { err := cmd.ExecuteContext(ctx) assert.NoError(t, err) - assert.Equal(t, cmd.Flag("target").Value.String(), "development") + assert.Equal(t, getTarget(cmd), "development") } func TestTargetFlagShort(t *testing.T) { @@ -150,5 +150,19 @@ func TestTargetFlagShort(t *testing.T) { err := cmd.ExecuteContext(ctx) assert.NoError(t, err) - assert.Equal(t, cmd.Flag("target").Value.String(), "production") + assert.Equal(t, getTarget(cmd), "production") +} + +// TODO: remove when environment flag is fully deprecated +func TestTargetEnvironmentFlag(t *testing.T) { + cmd := emptyCommand(t) + initTargetFlag(cmd) + initEnvironmentFlag(cmd) + cmd.SetArgs([]string{"version", "--environment", "development"}) + + ctx := context.Background() + err := cmd.ExecuteContext(ctx) + assert.NoError(t, err) + + assert.Equal(t, getTarget(cmd), "development") } From 842cd8b7aea55352aed3a60103e352d5332e905b Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 29 Aug 2023 10:26:09 +0200 Subject: [PATCH 040/310] Correctly identify local paths in libraries section (#702) ## Changes Fixes #699 ## Tests Added unit test --- bundle/libraries/libraries.go | 36 +++++++++++++++++++++++++----- bundle/libraries/libraries_test.go | 30 +++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 6 deletions(-) create mode 100644 bundle/libraries/libraries_test.go diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 29848236..d26768f9 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -3,6 +3,7 @@ package libraries import ( "context" "fmt" + "net/url" "path/filepath" "strings" @@ -92,13 +93,13 @@ func findArtifactsAndMarkForUpload(ctx context.Context, lib *compute.Library, b } if len(matches) == 0 && isLocalLibrary(lib) { - return fmt.Errorf("no library found for %s", libPath(lib)) + return fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libPath(lib)) } for _, match := range matches { af, err := findArtifactFileByLocalPath(match, b) if err != nil { - cmdio.LogString(ctx, fmt.Sprintf("%s. Skipping %s. In order to use the library upload it manually", err.Error(), match)) + cmdio.LogString(ctx, fmt.Sprintf("%s. Skipping uploading. In order to use the define 'artifacts' section", err.Error())) } else { af.Libraries = append(af.Libraries, lib) } @@ -116,7 +117,7 @@ func findArtifactFileByLocalPath(path string, b *bundle.Bundle) (*config.Artifac } } - return nil, fmt.Errorf("artifact file is not found for path %s", path) + return nil, fmt.Errorf("artifact section is not defined for file at %s", path) } func libPath(library *compute.Library) string { @@ -139,11 +140,34 @@ func isLocalLibrary(library *compute.Library) bool { return false } - return !isDbfsPath(path) && !isWorkspacePath(path) + if isExplicitFileScheme(path) { + return true + } + + if isRemoteStorageScheme(path) { + return false + } + + return !isWorkspacePath(path) } -func isDbfsPath(path string) bool { - return strings.HasPrefix(path, "dbfs:/") +func isExplicitFileScheme(path string) bool { + return strings.HasPrefix(path, "file://") +} + +func isRemoteStorageScheme(path string) bool { + url, err := url.Parse(path) + if err != nil { + return false + } + + if url.Scheme == "" { + return false + } + + // If the path starts with scheme:// format, it's a correct remote storage scheme + return strings.HasPrefix(path, url.Scheme+"://") + } func isWorkspacePath(path string) bool { diff --git a/bundle/libraries/libraries_test.go b/bundle/libraries/libraries_test.go new file mode 100644 index 00000000..050efe74 --- /dev/null +++ b/bundle/libraries/libraries_test.go @@ -0,0 +1,30 @@ +package libraries + +import ( + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/require" +) + +var testCases map[string]bool = map[string]bool{ + "./some/local/path": true, + "/some/full/path": true, + "/Workspace/path/to/package": false, + "/Users/path/to/package": false, + "file://path/to/package": true, + "C:\\path\\to\\package": true, + "dbfs://path/to/package": false, + "s3://path/to/package": false, + "abfss://path/to/package": false, +} + +func TestIsLocalLbrary(t *testing.T) { + for p, result := range testCases { + lib := compute.Library{ + Whl: p, + } + require.Equal(t, result, isLocalLibrary(&lib), fmt.Sprintf("isLocalLibrary must return %t for path %s ", result, p)) + } +} From 3f2cf3c6b73de97df75e11e37bf1f759c6ab8006 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 29 Aug 2023 10:26:26 +0200 Subject: [PATCH 041/310] Fixed path joining in FindFilesWithSuffixInPath (#704) ## Changes Fixes #693 ## Tests Newly added tests failed before the fix: https://github.com/databricks/cli/actions/runs/6000754026/job/16273507998?pr=704 --- python/utils.go | 8 ++++---- python/utils_test.go | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) create mode 100644 python/utils_test.go diff --git a/python/utils.go b/python/utils.go index 10654edc..a8408fae 100644 --- a/python/utils.go +++ b/python/utils.go @@ -5,7 +5,7 @@ package python import ( "context" "os" - "path" + "path/filepath" "strings" "github.com/databricks/cli/libs/log" @@ -13,8 +13,8 @@ import ( func CleanupWheelFolder(dir string) { // there or not there - we don't care - os.RemoveAll(path.Join(dir, "__pycache__")) - os.RemoveAll(path.Join(dir, "build")) + os.RemoveAll(filepath.Join(dir, "__pycache__")) + os.RemoveAll(filepath.Join(dir, "build")) eggInfo := FindFilesWithSuffixInPath(dir, ".egg-info") if len(eggInfo) == 0 { return @@ -42,7 +42,7 @@ func FindFilesWithSuffixInPath(dir, suffix string) []string { if !strings.HasSuffix(child.Name(), suffix) { continue } - files = append(files, path.Join(dir, child.Name())) + files = append(files, filepath.Join(dir, child.Name())) } return files } diff --git a/python/utils_test.go b/python/utils_test.go new file mode 100644 index 00000000..1656d1ec --- /dev/null +++ b/python/utils_test.go @@ -0,0 +1,21 @@ +package python + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFindFilesWithSuffixInPath(t *testing.T) { + dir, err := os.Getwd() + require.NoError(t, err) + + files := FindFilesWithSuffixInPath(dir, "test.go") + + matches, err := filepath.Glob(filepath.Join(dir, "*test.go")) + require.NoError(t, err) + + require.ElementsMatch(t, files, matches) +} From 12368e3382f59cfdba3bbc775423181f458c62cb Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 30 Aug 2023 14:21:39 +0200 Subject: [PATCH 042/310] Added transformation mutator for Python wheel task for them to work on DBR <13.1 (#635) ## Changes ***Note: this PR relies on sync.include functionality from here: https://github.com/databricks/cli/pull/671*** Added transformation mutator for Python wheel task for them to work on DBR <13.1 Using wheels upload to Workspace file system as cluster libraries is not supported in DBR < 13.1 In order to make Python wheel work correctly on DBR < 13.1 we do the following: 1. Build and upload python wheel as usual 2. Transform python wheel task into special notebook task which does the following a. Installs all necessary wheels with %pip magic b. Executes defined entry point with all provided parameters 3. Upload this notebook file to workspace file system 4. Deploy transformed job task This is also beneficial for executing on existing clusters because this notebook always reinstall wheels so if there are any changes to the wheel package, they are correctly picked up ## Tests bundle.yml ```yaml bundle: name: wheel-task workspace: host: **** resources: jobs: test_job: name: "[${bundle.environment}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: "***" python_wheel_task: package_name: "my_test_code" entry_point: "run" parameters: ["first argument","first value","second argument","second value"] libraries: - whl: ./dist/*.whl ``` Output ``` andrew.nester@HFW9Y94129 wheel % databricks bundle run test_job Run URL: *** 2023-08-03 15:58:04 "[default] My Wheel Job" TERMINATED SUCCESS Output: ======= Task TestTask: Hello from my func Got arguments v1: ['python', 'first argument', 'first value', 'second argument', 'second value'] ``` --- bundle/config/mutator/trampoline.go | 100 +++++++++++++++++++++ bundle/config/mutator/trampoline_test.go | 97 ++++++++++++++++++++ bundle/phases/deploy.go | 4 +- bundle/python/transform.go | 109 +++++++++++++++++++++++ bundle/python/transform_test.go | 66 ++++++++++++++ 5 files changed, 375 insertions(+), 1 deletion(-) create mode 100644 bundle/config/mutator/trampoline.go create mode 100644 bundle/config/mutator/trampoline_test.go create mode 100644 bundle/python/transform.go create mode 100644 bundle/python/transform_test.go diff --git a/bundle/config/mutator/trampoline.go b/bundle/config/mutator/trampoline.go new file mode 100644 index 00000000..7c06c7fa --- /dev/null +++ b/bundle/config/mutator/trampoline.go @@ -0,0 +1,100 @@ +package mutator + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "text/template" + + "github.com/databricks/cli/bundle" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +type TaskWithJobKey struct { + Task *jobs.Task + JobKey string +} + +type TrampolineFunctions interface { + GetTemplateData(task *jobs.Task) (map[string]any, error) + GetTasks(b *bundle.Bundle) []TaskWithJobKey + CleanUp(task *jobs.Task) error +} +type trampoline struct { + name string + functions TrampolineFunctions + template string +} + +func NewTrampoline( + name string, + functions TrampolineFunctions, + template string, +) *trampoline { + return &trampoline{name, functions, template} +} + +func (m *trampoline) Name() string { + return fmt.Sprintf("trampoline(%s)", m.name) +} + +func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) error { + tasks := m.functions.GetTasks(b) + for _, task := range tasks { + err := m.generateNotebookWrapper(b, task) + if err != nil { + return err + } + } + return nil +} + +func (m *trampoline) generateNotebookWrapper(b *bundle.Bundle, task TaskWithJobKey) error { + internalDir, err := b.InternalDir() + if err != nil { + return err + } + + notebookName := fmt.Sprintf("notebook_%s_%s", task.JobKey, task.Task.TaskKey) + localNotebookPath := filepath.Join(internalDir, notebookName+".py") + + err = os.MkdirAll(filepath.Dir(localNotebookPath), 0755) + if err != nil { + return err + } + + f, err := os.Create(localNotebookPath) + if err != nil { + return err + } + defer f.Close() + + data, err := m.functions.GetTemplateData(task.Task) + if err != nil { + return err + } + + t, err := template.New(notebookName).Parse(m.template) + if err != nil { + return err + } + + internalDirRel, err := filepath.Rel(b.Config.Path, internalDir) + if err != nil { + return err + } + + err = m.functions.CleanUp(task.Task) + if err != nil { + return err + } + remotePath := path.Join(b.Config.Workspace.FilesPath, filepath.ToSlash(internalDirRel), notebookName) + + task.Task.NotebookTask = &jobs.NotebookTask{ + NotebookPath: remotePath, + } + + return t.Execute(f, data) +} diff --git a/bundle/config/mutator/trampoline_test.go b/bundle/config/mutator/trampoline_test.go new file mode 100644 index 00000000..e523250e --- /dev/null +++ b/bundle/config/mutator/trampoline_test.go @@ -0,0 +1,97 @@ +package mutator + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +type functions struct{} + +func (f *functions) GetTasks(b *bundle.Bundle) []TaskWithJobKey { + tasks := make([]TaskWithJobKey, 0) + for k := range b.Config.Resources.Jobs["test"].Tasks { + tasks = append(tasks, TaskWithJobKey{ + JobKey: "test", + Task: &b.Config.Resources.Jobs["test"].Tasks[k], + }) + } + + return tasks +} + +func (f *functions) GetTemplateData(task *jobs.Task) (map[string]any, error) { + if task.PythonWheelTask == nil { + return nil, fmt.Errorf("PythonWheelTask cannot be nil") + } + + data := make(map[string]any) + data["MyName"] = "Trampoline" + return data, nil +} + +func (f *functions) CleanUp(task *jobs.Task) error { + task.PythonWheelTask = nil + return nil +} + +func TestGenerateTrampoline(t *testing.T) { + tmpDir := t.TempDir() + + tasks := []jobs.Task{ + { + TaskKey: "to_trampoline", + PythonWheelTask: &jobs.PythonWheelTask{ + PackageName: "test", + EntryPoint: "run", + }}, + } + + b := &bundle.Bundle{ + Config: config.Root{ + Path: tmpDir, + Bundle: config.Bundle{ + Target: "development", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test": { + Paths: resources.Paths{ + ConfigFilePath: tmpDir, + }, + JobSettings: &jobs.JobSettings{ + Tasks: tasks, + }, + }, + }, + }, + }, + } + ctx := context.Background() + + funcs := functions{} + trampoline := NewTrampoline("test_trampoline", &funcs, "Hello from {{.MyName}}") + err := bundle.Apply(ctx, b, trampoline) + require.NoError(t, err) + + dir, err := b.InternalDir() + require.NoError(t, err) + filename := filepath.Join(dir, "notebook_test_to_trampoline.py") + + bytes, err := os.ReadFile(filename) + require.NoError(t, err) + + require.Equal(t, "Hello from Trampoline", string(bytes)) + + task := b.Config.Resources.Jobs["test"].Tasks[0] + require.Equal(t, task.NotebookTask.NotebookPath, ".databricks/bundle/development/.internal/notebook_test_to_trampoline") + require.Nil(t, task.PythonWheelTask) +} diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 011bb4b2..5a9a7f2f 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/deploy/lock" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/bundle/python" ) // The deploy phase deploys artifacts and resources. @@ -17,10 +18,11 @@ func Deploy() bundle.Mutator { bundle.Defer( bundle.Seq( mutator.ValidateGitDetails(), - files.Upload(), libraries.MatchWithArtifacts(), artifacts.CleanUp(), artifacts.UploadAll(), + python.TransformWheelTask(), + files.Upload(), terraform.Interpolate(), terraform.Write(), terraform.StatePull(), diff --git a/bundle/python/transform.go b/bundle/python/transform.go new file mode 100644 index 00000000..69bb5766 --- /dev/null +++ b/bundle/python/transform.go @@ -0,0 +1,109 @@ +package python + +import ( + "fmt" + "strconv" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +const NOTEBOOK_TEMPLATE = `# Databricks notebook source +%python +{{range .Libraries}} +%pip install --force-reinstall {{.Whl}} +{{end}} + +try: + from importlib import metadata +except ImportError: # for Python<3.8 + import subprocess + import sys + + subprocess.check_call([sys.executable, "-m", "pip", "install", "importlib-metadata"]) + import importlib_metadata as metadata + +from contextlib import redirect_stdout +import io +import sys +sys.argv = [{{.Params}}] + +entry = [ep for ep in metadata.distribution("{{.Task.PackageName}}").entry_points if ep.name == "{{.Task.EntryPoint}}"] + +f = io.StringIO() +with redirect_stdout(f): + if entry: + entry[0].load()() + else: + raise ImportError("Entry point '{{.Task.EntryPoint}}' not found") +s = f.getvalue() +dbutils.notebook.exit(s) +` + +// This mutator takes the wheel task and transforms it into notebook +// which installs uploaded wheels using %pip and then calling corresponding +// entry point. +func TransformWheelTask() bundle.Mutator { + return mutator.NewTrampoline( + "python_wheel", + &pythonTrampoline{}, + NOTEBOOK_TEMPLATE, + ) +} + +type pythonTrampoline struct{} + +func (t *pythonTrampoline) CleanUp(task *jobs.Task) error { + task.PythonWheelTask = nil + task.Libraries = nil + + return nil +} + +func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey { + r := b.Config.Resources + result := make([]mutator.TaskWithJobKey, 0) + for k := range b.Config.Resources.Jobs { + tasks := r.Jobs[k].JobSettings.Tasks + for i := range tasks { + task := &tasks[i] + result = append(result, mutator.TaskWithJobKey{ + JobKey: k, + Task: task, + }) + } + } + return result +} + +func (t *pythonTrampoline) GetTemplateData(task *jobs.Task) (map[string]any, error) { + params, err := t.generateParameters(task.PythonWheelTask) + if err != nil { + return nil, err + } + + data := map[string]any{ + "Libraries": task.Libraries, + "Params": params, + "Task": task.PythonWheelTask, + } + + return data, nil +} + +func (t *pythonTrampoline) generateParameters(task *jobs.PythonWheelTask) (string, error) { + if task.Parameters != nil && task.NamedParameters != nil { + return "", fmt.Errorf("not allowed to pass both paramaters and named_parameters") + } + params := append([]string{"python"}, task.Parameters...) + for k, v := range task.NamedParameters { + params = append(params, fmt.Sprintf("%s=%s", k, v)) + } + + for i := range params { + params[i] = strconv.Quote(params[i]) + } + return strings.Join(params, ", "), nil +} diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go new file mode 100644 index 00000000..1baebfc8 --- /dev/null +++ b/bundle/python/transform_test.go @@ -0,0 +1,66 @@ +package python + +import ( + "strings" + "testing" + + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +type testCase struct { + Actual []string + Expected string +} +type NamedParams map[string]string +type testCaseNamed struct { + Actual NamedParams + Expected string +} + +var paramsTestCases []testCase = []testCase{ + {[]string{}, `"python"`}, + {[]string{"a"}, `"python", "a"`}, + {[]string{"a", "b"}, `"python", "a", "b"`}, + {[]string{"123!@#$%^&*()-="}, `"python", "123!@#$%^&*()-="`}, + {[]string{`{"a": 1}`}, `"python", "{\"a\": 1}"`}, +} + +var paramsTestCasesNamed []testCaseNamed = []testCaseNamed{ + {NamedParams{}, `"python"`}, + {NamedParams{"a": "1"}, `"python", "a=1"`}, + {NamedParams{"a": "'1'"}, `"python", "a='1'"`}, + {NamedParams{"a": `"1"`}, `"python", "a=\"1\""`}, + {NamedParams{"a": "1", "b": "2"}, `"python", "a=1", "b=2"`}, + {NamedParams{"data": `{"a": 1}`}, `"python", "data={\"a\": 1}"`}, +} + +func TestGenerateParameters(t *testing.T) { + trampoline := pythonTrampoline{} + for _, c := range paramsTestCases { + task := &jobs.PythonWheelTask{Parameters: c.Actual} + result, err := trampoline.generateParameters(task) + require.NoError(t, err) + require.Equal(t, c.Expected, result) + } +} + +func TestGenerateNamedParameters(t *testing.T) { + trampoline := pythonTrampoline{} + for _, c := range paramsTestCasesNamed { + task := &jobs.PythonWheelTask{NamedParameters: c.Actual} + result, err := trampoline.generateParameters(task) + require.NoError(t, err) + + // parameters order can be undetermenistic, so just check that they exist as expected + require.ElementsMatch(t, strings.Split(c.Expected, ","), strings.Split(result, ",")) + } +} + +func TestGenerateBoth(t *testing.T) { + trampoline := pythonTrampoline{} + task := &jobs.PythonWheelTask{NamedParameters: map[string]string{"a": "1"}, Parameters: []string{"b"}} + _, err := trampoline.generateParameters(task) + require.Error(t, err) + require.ErrorContains(t, err, "not allowed to pass both paramaters and named_parameters") +} From ca2f1dc06c8a7324ac38c35c0f35856348cec918 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 30 Aug 2023 15:51:15 +0200 Subject: [PATCH 043/310] Filter down to Python wheel tasks only for trampoline (#712) ## Changes Fixes issue introduced in #635. ## Tests Added new unit test to confirm correct behavior. Manually deployed sample bundle. --- bundle/python/transform.go | 6 ++++ bundle/python/transform_test.go | 51 +++++++++++++++++++++++++++------ 2 files changed, 49 insertions(+), 8 deletions(-) diff --git a/bundle/python/transform.go b/bundle/python/transform.go index 69bb5766..6ec75a03 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -69,6 +69,12 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey { tasks := r.Jobs[k].JobSettings.Tasks for i := range tasks { task := &tasks[i] + + // Keep only Python wheel tasks + if task.PythonWheelTask == nil { + continue + } + result = append(result, mutator.TaskWithJobKey{ JobKey: k, Task: task, diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index 1baebfc8..c7b1f36e 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -4,6 +4,9 @@ import ( "strings" "testing" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) @@ -12,9 +15,9 @@ type testCase struct { Actual []string Expected string } -type NamedParams map[string]string + type testCaseNamed struct { - Actual NamedParams + Actual map[string]string Expected string } @@ -27,12 +30,12 @@ var paramsTestCases []testCase = []testCase{ } var paramsTestCasesNamed []testCaseNamed = []testCaseNamed{ - {NamedParams{}, `"python"`}, - {NamedParams{"a": "1"}, `"python", "a=1"`}, - {NamedParams{"a": "'1'"}, `"python", "a='1'"`}, - {NamedParams{"a": `"1"`}, `"python", "a=\"1\""`}, - {NamedParams{"a": "1", "b": "2"}, `"python", "a=1", "b=2"`}, - {NamedParams{"data": `{"a": 1}`}, `"python", "data={\"a\": 1}"`}, + {map[string]string{}, `"python"`}, + {map[string]string{"a": "1"}, `"python", "a=1"`}, + {map[string]string{"a": "'1'"}, `"python", "a='1'"`}, + {map[string]string{"a": `"1"`}, `"python", "a=\"1\""`}, + {map[string]string{"a": "1", "b": "2"}, `"python", "a=1", "b=2"`}, + {map[string]string{"data": `{"a": 1}`}, `"python", "data={\"a\": 1}"`}, } func TestGenerateParameters(t *testing.T) { @@ -64,3 +67,35 @@ func TestGenerateBoth(t *testing.T) { require.Error(t, err) require.ErrorContains(t, err, "not allowed to pass both paramaters and named_parameters") } + +func TestTransformFiltersWheelTasksOnly(t *testing.T) { + trampoline := pythonTrampoline{} + bundle := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{}, + }, + { + TaskKey: "key2", + NotebookTask: &jobs.NotebookTask{}, + }, + }, + }, + }, + }, + }, + }, + } + + tasks := trampoline.GetTasks(bundle) + require.Len(t, tasks, 1) + require.Equal(t, "job1", tasks[0].JobKey) + require.Equal(t, "key1", tasks[0].Task.TaskKey) + require.NotNil(t, tasks[0].Task.PythonWheelTask) +} From aa9e1fc41ce5b3abd99f55590c269149596c3611 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 30 Aug 2023 15:58:28 +0200 Subject: [PATCH 044/310] Update Terraform provider schema structs from 1.23.0 (#713) ## Changes The provider at version 1.24.0 includes a regression for the MLflow model resource. To fix this, we explicitly pin the provider version at the version we generate bindings for. ## Tests Confirmed that a deploy of said MLflow model resource works with 1.23.0. --- .../tf/codegen/generator/generator.go | 30 +++++++++++++++++ bundle/internal/tf/codegen/schema/generate.go | 8 +++-- bundle/internal/tf/codegen/schema/version.go | 3 ++ .../tf/codegen/templates/root.go.tmpl | 32 +++++++++++++++++++ .../internal/tf/schema/data_source_cluster.go | 1 + .../tf/schema/data_source_instance_pool.go | 1 + bundle/internal/tf/schema/data_source_job.go | 21 ++++++++++++ bundle/internal/tf/schema/resource_cluster.go | 1 + .../internal/tf/schema/resource_connection.go | 15 +++++++++ .../tf/schema/resource_instance_pool.go | 1 + bundle/internal/tf/schema/resource_job.go | 21 ++++++++++++ .../tf/schema/resource_model_serving.go | 1 + .../internal/tf/schema/resource_pipeline.go | 1 + bundle/internal/tf/schema/root.go | 2 +- 14 files changed, 134 insertions(+), 4 deletions(-) create mode 100644 bundle/internal/tf/codegen/schema/version.go create mode 100644 bundle/internal/tf/codegen/templates/root.go.tmpl create mode 100644 bundle/internal/tf/schema/resource_connection.go diff --git a/bundle/internal/tf/codegen/generator/generator.go b/bundle/internal/tf/codegen/generator/generator.go index 2bd78d96..86d76243 100644 --- a/bundle/internal/tf/codegen/generator/generator.go +++ b/bundle/internal/tf/codegen/generator/generator.go @@ -8,6 +8,7 @@ import ( "strings" "text/template" + schemapkg "github.com/databricks/cli/bundle/internal/tf/codegen/schema" tfjson "github.com/hashicorp/terraform-json" ) @@ -32,6 +33,23 @@ func (c *collection) Generate(path string) error { return tmpl.Execute(f, c) } +type root struct { + OutputFile string + ProviderVersion string +} + +func (r *root) Generate(path string) error { + tmpl := template.Must(template.ParseFiles(fmt.Sprintf("./templates/%s.tmpl", r.OutputFile))) + f, err := os.Create(filepath.Join(path, r.OutputFile)) + if err != nil { + return err + } + + defer f.Close() + + return tmpl.Execute(f, r) +} + func Run(ctx context.Context, schema *tfjson.ProviderSchema, path string) error { // Generate types for resources. var resources []*namedBlock @@ -105,5 +123,17 @@ func Run(ctx context.Context, schema *tfjson.ProviderSchema, path string) error } } + // Generate root.go + { + r := &root{ + OutputFile: "root.go", + ProviderVersion: schemapkg.ProviderVersion, + } + err := r.Generate(path) + if err != nil { + return err + } + } + return nil } diff --git a/bundle/internal/tf/codegen/schema/generate.go b/bundle/internal/tf/codegen/schema/generate.go index 4d3e2832..de2d2722 100644 --- a/bundle/internal/tf/codegen/schema/generate.go +++ b/bundle/internal/tf/codegen/schema/generate.go @@ -8,6 +8,7 @@ import ( "os" "path/filepath" + "github.com/hashicorp/go-version" "github.com/hashicorp/hc-install/product" "github.com/hashicorp/hc-install/releases" "github.com/hashicorp/terraform-exec/tfexec" @@ -19,7 +20,7 @@ func (s *Schema) writeTerraformBlock(_ context.Context) error { "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ "source": "databricks/databricks", - "version": ">= 1.0.0", + "version": ProviderVersion, }, }, }, @@ -40,9 +41,10 @@ func (s *Schema) installTerraform(ctx context.Context) (path string, err error) return } - installer := &releases.LatestVersion{ - InstallDir: installDir, + installer := &releases.ExactVersion{ Product: product.Terraform, + Version: version.Must(version.NewVersion("1.5.5")), + InstallDir: installDir, } installer.SetLogger(log.Default()) diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go new file mode 100644 index 00000000..84456731 --- /dev/null +++ b/bundle/internal/tf/codegen/schema/version.go @@ -0,0 +1,3 @@ +package schema + +const ProviderVersion = "1.23.0" diff --git a/bundle/internal/tf/codegen/templates/root.go.tmpl b/bundle/internal/tf/codegen/templates/root.go.tmpl new file mode 100644 index 00000000..3beb3007 --- /dev/null +++ b/bundle/internal/tf/codegen/templates/root.go.tmpl @@ -0,0 +1,32 @@ +package schema + +type Providers struct { + Databricks *Config `json:"databricks,omitempty"` +} + +func NewProviders() *Providers { + return &Providers{ + Databricks: &Config{}, + } +} + +type Root struct { + Terraform map[string]any `json:"terraform"` + + Provider *Providers `json:"provider,omitempty"` + Data *DataSources `json:"data,omitempty"` + Resource *Resources `json:"resource,omitempty"` +} + +func NewRoot() *Root { + return &Root{ + Terraform: map[string]interface{}{ + "required_providers": map[string]interface{}{ + "databricks": map[string]interface{}{ + "source": "databricks/databricks", + "version": "1.23.0", + }, + }, + }, + } +} diff --git a/bundle/internal/tf/schema/data_source_cluster.go b/bundle/internal/tf/schema/data_source_cluster.go index b5017402..2aa6fb5d 100644 --- a/bundle/internal/tf/schema/data_source_cluster.go +++ b/bundle/internal/tf/schema/data_source_cluster.go @@ -90,6 +90,7 @@ type DataSourceClusterClusterInfoGcpAttributes struct { Availability string `json:"availability,omitempty"` BootDiskSize int `json:"boot_disk_size,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` ZoneId string `json:"zone_id,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_instance_pool.go b/bundle/internal/tf/schema/data_source_instance_pool.go index 49824717..240083d6 100644 --- a/bundle/internal/tf/schema/data_source_instance_pool.go +++ b/bundle/internal/tf/schema/data_source_instance_pool.go @@ -26,6 +26,7 @@ type DataSourceInstancePoolPoolInfoDiskSpec struct { type DataSourceInstancePoolPoolInfoGcpAttributes struct { GcpAvailability string `json:"gcp_availability,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` } type DataSourceInstancePoolPoolInfoInstancePoolFleetAttributesFleetOnDemandOption struct { diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index 6d2d1aa9..d251dfe5 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -124,6 +124,7 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterGcpAttributes struct { Availability string `json:"availability,omitempty"` BootDiskSize int `json:"boot_disk_size,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` ZoneId string `json:"zone_id,omitempty"` } @@ -305,6 +306,7 @@ type DataSourceJobJobSettingsSettingsNewClusterGcpAttributes struct { Availability string `json:"availability,omitempty"` BootDiskSize int `json:"boot_disk_size,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` ZoneId string `json:"zone_id,omitempty"` } @@ -401,6 +403,11 @@ type DataSourceJobJobSettingsSettingsNotificationSettings struct { NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` } +type DataSourceJobJobSettingsSettingsParameter struct { + Default string `json:"default,omitempty"` + Name string `json:"name,omitempty"` +} + type DataSourceJobJobSettingsSettingsPipelineTask struct { FullRefresh bool `json:"full_refresh,omitempty"` PipelineId string `json:"pipeline_id"` @@ -421,6 +428,11 @@ type DataSourceJobJobSettingsSettingsRunAs struct { UserName string `json:"user_name,omitempty"` } +type DataSourceJobJobSettingsSettingsRunJobTask struct { + JobId string `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` +} + type DataSourceJobJobSettingsSettingsSchedule struct { PauseStatus string `json:"pause_status,omitempty"` QuartzCronExpression string `json:"quartz_cron_expression"` @@ -573,6 +585,7 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterGcpAttributes struct { Availability string `json:"availability,omitempty"` BootDiskSize int `json:"boot_disk_size,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` ZoneId string `json:"zone_id,omitempty"` } @@ -682,6 +695,11 @@ type DataSourceJobJobSettingsSettingsTaskPythonWheelTask struct { Parameters []string `json:"parameters,omitempty"` } +type DataSourceJobJobSettingsSettingsTaskRunJobTask struct { + JobId string `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` +} + type DataSourceJobJobSettingsSettingsTaskSparkJarTask struct { JarUri string `json:"jar_uri,omitempty"` MainClassName string `json:"main_class_name,omitempty"` @@ -760,6 +778,7 @@ type DataSourceJobJobSettingsSettingsTask struct { NotificationSettings *DataSourceJobJobSettingsSettingsTaskNotificationSettings `json:"notification_settings,omitempty"` PipelineTask *DataSourceJobJobSettingsSettingsTaskPipelineTask `json:"pipeline_task,omitempty"` PythonWheelTask *DataSourceJobJobSettingsSettingsTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *DataSourceJobJobSettingsSettingsTaskRunJobTask `json:"run_job_task,omitempty"` SparkJarTask *DataSourceJobJobSettingsSettingsTaskSparkJarTask `json:"spark_jar_task,omitempty"` SparkPythonTask *DataSourceJobJobSettingsSettingsTaskSparkPythonTask `json:"spark_python_task,omitempty"` SparkSubmitTask *DataSourceJobJobSettingsSettingsTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` @@ -821,10 +840,12 @@ type DataSourceJobJobSettingsSettings struct { NewCluster *DataSourceJobJobSettingsSettingsNewCluster `json:"new_cluster,omitempty"` NotebookTask *DataSourceJobJobSettingsSettingsNotebookTask `json:"notebook_task,omitempty"` NotificationSettings *DataSourceJobJobSettingsSettingsNotificationSettings `json:"notification_settings,omitempty"` + Parameter []DataSourceJobJobSettingsSettingsParameter `json:"parameter,omitempty"` PipelineTask *DataSourceJobJobSettingsSettingsPipelineTask `json:"pipeline_task,omitempty"` PythonWheelTask *DataSourceJobJobSettingsSettingsPythonWheelTask `json:"python_wheel_task,omitempty"` Queue *DataSourceJobJobSettingsSettingsQueue `json:"queue,omitempty"` RunAs *DataSourceJobJobSettingsSettingsRunAs `json:"run_as,omitempty"` + RunJobTask *DataSourceJobJobSettingsSettingsRunJobTask `json:"run_job_task,omitempty"` Schedule *DataSourceJobJobSettingsSettingsSchedule `json:"schedule,omitempty"` SparkJarTask *DataSourceJobJobSettingsSettingsSparkJarTask `json:"spark_jar_task,omitempty"` SparkPythonTask *DataSourceJobJobSettingsSettingsSparkPythonTask `json:"spark_python_task,omitempty"` diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index a95b8c13..bb4e3582 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -68,6 +68,7 @@ type ResourceClusterGcpAttributes struct { Availability string `json:"availability,omitempty"` BootDiskSize int `json:"boot_disk_size,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` ZoneId string `json:"zone_id,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_connection.go b/bundle/internal/tf/schema/resource_connection.go new file mode 100644 index 00000000..a249a539 --- /dev/null +++ b/bundle/internal/tf/schema/resource_connection.go @@ -0,0 +1,15 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceConnection struct { + Comment string `json:"comment,omitempty"` + ConnectionType string `json:"connection_type"` + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Options map[string]string `json:"options"` + Owner string `json:"owner,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + ReadOnly bool `json:"read_only,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_instance_pool.go b/bundle/internal/tf/schema/resource_instance_pool.go index 2c322121..f524b3fc 100644 --- a/bundle/internal/tf/schema/resource_instance_pool.go +++ b/bundle/internal/tf/schema/resource_instance_pool.go @@ -26,6 +26,7 @@ type ResourceInstancePoolDiskSpec struct { type ResourceInstancePoolGcpAttributes struct { GcpAvailability string `json:"gcp_availability,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` } type ResourceInstancePoolInstancePoolFleetAttributesFleetOnDemandOption struct { diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 77b681ee..50101400 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -124,6 +124,7 @@ type ResourceJobJobClusterNewClusterGcpAttributes struct { Availability string `json:"availability,omitempty"` BootDiskSize int `json:"boot_disk_size,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` ZoneId string `json:"zone_id,omitempty"` } @@ -305,6 +306,7 @@ type ResourceJobNewClusterGcpAttributes struct { Availability string `json:"availability,omitempty"` BootDiskSize int `json:"boot_disk_size,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` ZoneId string `json:"zone_id,omitempty"` } @@ -401,6 +403,11 @@ type ResourceJobNotificationSettings struct { NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` } +type ResourceJobParameter struct { + Default string `json:"default,omitempty"` + Name string `json:"name,omitempty"` +} + type ResourceJobPipelineTask struct { FullRefresh bool `json:"full_refresh,omitempty"` PipelineId string `json:"pipeline_id"` @@ -421,6 +428,11 @@ type ResourceJobRunAs struct { UserName string `json:"user_name,omitempty"` } +type ResourceJobRunJobTask struct { + JobId string `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` +} + type ResourceJobSchedule struct { PauseStatus string `json:"pause_status,omitempty"` QuartzCronExpression string `json:"quartz_cron_expression"` @@ -573,6 +585,7 @@ type ResourceJobTaskNewClusterGcpAttributes struct { Availability string `json:"availability,omitempty"` BootDiskSize int `json:"boot_disk_size,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` ZoneId string `json:"zone_id,omitempty"` } @@ -682,6 +695,11 @@ type ResourceJobTaskPythonWheelTask struct { Parameters []string `json:"parameters,omitempty"` } +type ResourceJobTaskRunJobTask struct { + JobId string `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` +} + type ResourceJobTaskSparkJarTask struct { JarUri string `json:"jar_uri,omitempty"` MainClassName string `json:"main_class_name,omitempty"` @@ -760,6 +778,7 @@ type ResourceJobTask struct { NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` @@ -825,10 +844,12 @@ type ResourceJob struct { NewCluster *ResourceJobNewCluster `json:"new_cluster,omitempty"` NotebookTask *ResourceJobNotebookTask `json:"notebook_task,omitempty"` NotificationSettings *ResourceJobNotificationSettings `json:"notification_settings,omitempty"` + Parameter []ResourceJobParameter `json:"parameter,omitempty"` PipelineTask *ResourceJobPipelineTask `json:"pipeline_task,omitempty"` PythonWheelTask *ResourceJobPythonWheelTask `json:"python_wheel_task,omitempty"` Queue *ResourceJobQueue `json:"queue,omitempty"` RunAs *ResourceJobRunAs `json:"run_as,omitempty"` + RunJobTask *ResourceJobRunJobTask `json:"run_job_task,omitempty"` Schedule *ResourceJobSchedule `json:"schedule,omitempty"` SparkJarTask *ResourceJobSparkJarTask `json:"spark_jar_task,omitempty"` SparkPythonTask *ResourceJobSparkPythonTask `json:"spark_python_task,omitempty"` diff --git a/bundle/internal/tf/schema/resource_model_serving.go b/bundle/internal/tf/schema/resource_model_serving.go index b7ff88cc..cc5c3257 100644 --- a/bundle/internal/tf/schema/resource_model_serving.go +++ b/bundle/internal/tf/schema/resource_model_serving.go @@ -4,6 +4,7 @@ package schema type ResourceModelServingConfigServedModels struct { EnvironmentVars map[string]string `json:"environment_vars,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` ModelName string `json:"model_name"` ModelVersion string `json:"model_version"` Name string `json:"name,omitempty"` diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 9e7f71b1..5c5de9a7 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -47,6 +47,7 @@ type ResourcePipelineClusterClusterLogConf struct { type ResourcePipelineClusterGcpAttributes struct { Availability string `json:"availability,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` ZoneId string `json:"zone_id,omitempty"` } diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 9cfe8491..3beb3007 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -24,7 +24,7 @@ func NewRoot() *Root { "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ "source": "databricks/databricks", - "version": ">= 1.0.0", + "version": "1.23.0", }, }, }, From 707fd6f617a7c7837d493ab9349a591a45f1cdd7 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 30 Aug 2023 07:01:08 -0700 Subject: [PATCH 045/310] Cleanup after "Add a foundation for built-in templates" (#707) ## Changes Add some cleanup based on @pietern's comments on https://github.com/databricks/cli/pull/685 --- bundle/config/mutator/process_target_mode.go | 5 ++++- cmd/root/bundle.go | 4 ++-- libs/auth/service_principal.go | 8 ++++++-- libs/template/helpers.go | 5 ++++- libs/template/materialize.go | 4 +++- 5 files changed, 19 insertions(+), 7 deletions(-) diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 3a00d42f..be93512b 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -160,7 +160,10 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error { } return transformDevelopmentMode(b) case config.Production: - isPrincipal := auth.IsServicePrincipal(ctx, b.WorkspaceClient(), b.Config.Workspace.CurrentUser.Id) + isPrincipal, err := auth.IsServicePrincipal(ctx, b.WorkspaceClient(), b.Config.Workspace.CurrentUser.Id) + if err != nil { + return err + } return validateProductionMode(ctx, b, isPrincipal) case "": // No action diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index fe97fbf2..10cce67a 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -43,7 +43,7 @@ func getTarget(cmd *cobra.Command) (value string) { return target } -func GetProfile(cmd *cobra.Command) (value string) { +func getProfile(cmd *cobra.Command) (value string) { // The command line flag takes precedence. flag := cmd.Flag("profile") if flag != nil { @@ -70,7 +70,7 @@ func loadBundle(cmd *cobra.Command, args []string, load func(ctx context.Context return nil, nil } - profile := GetProfile(cmd) + profile := getProfile(cmd) if profile != "" { b.Config.Workspace.Profile = profile } diff --git a/libs/auth/service_principal.go b/libs/auth/service_principal.go index 58fcc6a7..a6740b50 100644 --- a/libs/auth/service_principal.go +++ b/libs/auth/service_principal.go @@ -4,13 +4,17 @@ import ( "context" "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/apierr" ) // Determines whether a given user id is a service principal. // This function uses a heuristic: if no user exists with this id, we assume // it's a service principal. Unfortunately, the standard service principal API is too // slow for our purposes. -func IsServicePrincipal(ctx context.Context, ws *databricks.WorkspaceClient, userId string) bool { +func IsServicePrincipal(ctx context.Context, ws *databricks.WorkspaceClient, userId string) (bool, error) { _, err := ws.Users.GetById(ctx, userId) - return err != nil + if apierr.IsMissing(err) { + return true, nil + } + return false, err } diff --git a/libs/template/helpers.go b/libs/template/helpers.go index b8f2fe45..f947d9ba 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -104,7 +104,10 @@ func loadHelpers(ctx context.Context) template.FuncMap { return false, err } } - result := auth.IsServicePrincipal(ctx, w, user.Id) + result, err := auth.IsServicePrincipal(ctx, w, user.Id) + if err != nil { + return false, err + } is_service_principal = &result return result, nil }, diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 5422160d..8517858f 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -7,6 +7,8 @@ import ( "os" "path" "path/filepath" + + "github.com/databricks/cli/libs/cmdio" ) const libraryDirName = "library" @@ -80,7 +82,7 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st if err != nil { return err } - println("✨ Successfully initialized template") + cmdio.LogString(ctx, "✨ Successfully initialized template") return nil } From 46b999ed426ac122a0b915ad8a49bd7eec809493 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 30 Aug 2023 16:08:37 +0200 Subject: [PATCH 046/310] Pin Terraform binary version to 1.5.5 (#715) ## Changes The installer doesn't respect the version constraints if they are specified. Source: [the vc argument is not used](https://github.com/hashicorp/hc-install/blob/850464c6016513fc7ad47114d010080ec16f32cb/releases/latest_version.go#L158-L177). ## Tests Confirmed manually. --- bundle/deploy/terraform/init.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 924c1f09..6df7b8d4 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -55,10 +55,10 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con } // Download Terraform to private bin directory. - installer := &releases.LatestVersion{ - Product: product.Terraform, - Constraints: version.MustConstraints(version.NewConstraint("<=1.5.5")), - InstallDir: binDir, + installer := &releases.ExactVersion{ + Product: product.Terraform, + Version: version.Must(version.NewVersion("1.5.5")), + InstallDir: binDir, } execPath, err = installer.Install(ctx) if err != nil { From a548eba492883866e49157d24fd252f82f0029c0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 30 Aug 2023 16:09:15 +0200 Subject: [PATCH 047/310] Test transform when no Python wheel tasks defined (#714) ## Changes Fixed panic from Python transform when no python wheel tasks defined ## Tests Added regression test --- bundle/python/transform_test.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index c7b1f36e..fb2c23e4 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -1,6 +1,7 @@ package python import ( + "context" "strings" "testing" @@ -99,3 +100,34 @@ func TestTransformFiltersWheelTasksOnly(t *testing.T) { require.Equal(t, "key1", tasks[0].Task.TaskKey) require.NotNil(t, tasks[0].Task.PythonWheelTask) } + +func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { + tmpDir := t.TempDir() + b := &bundle.Bundle{ + Config: config.Root{ + Path: tmpDir, + Bundle: config.Bundle{ + Target: "development", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test": { + Paths: resources.Paths{ + ConfigFilePath: tmpDir, + }, + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "notebook_task", + NotebookTask: &jobs.NotebookTask{}}, + }, + }, + }, + }, + }, + }, + } + trampoline := TransformWheelTask() + err := bundle.Apply(context.Background(), b, trampoline) + require.NoError(t, err) +} From deebaa89f7f4448963878e27e811e0908dda2ad7 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 30 Aug 2023 16:31:36 +0200 Subject: [PATCH 048/310] Release v0.203.3 (#716) Bundles: * Support cluster overrides with cluster_key and compute_key ([#696](https://github.com/databricks/cli/pull/696)). * Allow referencing local Python wheels without artifacts section defined ([#703](https://github.com/databricks/cli/pull/703)). * Fixed --environment flag ([#705](https://github.com/databricks/cli/pull/705)). * Correctly identify local paths in libraries section ([#702](https://github.com/databricks/cli/pull/702)). * Fixed path joining in FindFilesWithSuffixInPath ([#704](https://github.com/databricks/cli/pull/704)). * Added transformation mutator for Python wheel task for them to work on DBR <13.1 ([#635](https://github.com/databricks/cli/pull/635)). Internal: * Add a foundation for built-in templates ([#685](https://github.com/databricks/cli/pull/685)). * Test transform when no Python wheel tasks defined ([#714](https://github.com/databricks/cli/pull/714)). * Pin Terraform binary version to 1.5.5 ([#715](https://github.com/databricks/cli/pull/715)). * Cleanup after "Add a foundation for built-in templates" ([#707](https://github.com/databricks/cli/pull/707)). * Filter down to Python wheel tasks only for trampoline ([#712](https://github.com/databricks/cli/pull/712)). * Update Terraform provider schema structs from 1.23.0 ([#713](https://github.com/databricks/cli/pull/713)). --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa0dec13..6fcbab8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Version changelog +## 0.203.3 + +Bundles: + * Support cluster overrides with cluster_key and compute_key ([#696](https://github.com/databricks/cli/pull/696)). + * Allow referencing local Python wheels without artifacts section defined ([#703](https://github.com/databricks/cli/pull/703)). + * Fixed --environment flag ([#705](https://github.com/databricks/cli/pull/705)). + * Correctly identify local paths in libraries section ([#702](https://github.com/databricks/cli/pull/702)). + * Fixed path joining in FindFilesWithSuffixInPath ([#704](https://github.com/databricks/cli/pull/704)). + * Added transformation mutator for Python wheel task for them to work on DBR <13.1 ([#635](https://github.com/databricks/cli/pull/635)). + +Internal: + * Add a foundation for built-in templates ([#685](https://github.com/databricks/cli/pull/685)). + * Test transform when no Python wheel tasks defined ([#714](https://github.com/databricks/cli/pull/714)). + * Pin Terraform binary version to 1.5.5 ([#715](https://github.com/databricks/cli/pull/715)). + * Cleanup after "Add a foundation for built-in templates" ([#707](https://github.com/databricks/cli/pull/707)). + * Filter down to Python wheel tasks only for trampoline ([#712](https://github.com/databricks/cli/pull/712)). + * Update Terraform provider schema structs from 1.23.0 ([#713](https://github.com/databricks/cli/pull/713)). + ## 0.203.2 CLI: From cc1038fbd575c1147459a10d98cbf7b6bfc2c746 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 30 Aug 2023 16:57:34 +0200 Subject: [PATCH 049/310] Upgrade to actions/setup-go@v4 (#718) ## Changes Version 4 enables caching by default so we no longer need to explicitly enable it: https://github.com/actions/setup-go#v4. The build cache only reuses a cache from a repo's default branch, which for this repository is `main`. After enabling the merge queue, we no longer run builds on the `main` branch after push, but on merge queue branches. With no more builds on the `main` branch there is no longer a cache to reuse. This change fixes that by making the `release(-snapshot)?` workflows use the same caching mechanism. These run off of the `main` branch, so the cache they save can be reused by builds triggered on PRs or from the merge queue. ## Tests We have to merge this to see if it works. --- .github/workflows/push.yml | 7 +++---- .github/workflows/release-snapshot.yml | 19 +------------------ .github/workflows/release.yml | 19 +------------------ 3 files changed, 5 insertions(+), 40 deletions(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 6f14fe88..3209ae93 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -26,10 +26,9 @@ jobs: run: git fetch --prune --unshallow - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: 1.21.0 - cache: true - name: Set go env run: | @@ -54,9 +53,9 @@ jobs: uses: actions/checkout@v3 - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.21 + go-version: 1.21.0 # No need to download cached dependencies when running gofmt. cache: false diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index 130d49dd..fbf5421b 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -19,27 +19,10 @@ jobs: run: git fetch --prune --unshallow - name: Setup Go - id: go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: 1.21.0 - - name: Locate cache paths - id: cache - run: | - echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT - echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT - - # Note: use custom caching because below performs a cross platform build - # through goreleaser and don't want to share a cache with the test builds. - - name: Setup caching - uses: actions/cache@v3 - with: - path: | - ${{ steps.cache.outputs.GOMODCACHE }} - ${{ steps.cache.outputs.GOCACHE }} - key: release-${{ runner.os }}-go-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', '.goreleaser.yaml') }} - - name: Hide snapshot tag to outsmart GoReleaser run: git tag -d snapshot || true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5992dcb4..c166fc5b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,27 +18,10 @@ jobs: run: git fetch --prune --unshallow - name: Setup Go - id: go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: 1.21.0 - - name: Locate cache paths - id: cache - run: | - echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT - echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT - - # Note: use custom caching because below performs a cross platform build - # through goreleaser and don't want to share a cache with the test builds. - - name: Setup caching - uses: actions/cache@v3 - with: - path: | - ${{ steps.cache.outputs.GOMODCACHE }} - ${{ steps.cache.outputs.GOCACHE }} - key: release-${{ runner.os }}-go-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', '.goreleaser.yaml') }} - - name: Run GoReleaser uses: goreleaser/goreleaser-action@v4 with: From 86c30dd3289751b30a232b415fcc2b4d76232187 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 31 Aug 2023 16:10:32 +0200 Subject: [PATCH 050/310] Fixed artifact file uploading on Windows and wheel execution on DBR 13.3 (#722) ## Changes Fixed artifact file uploading on Windows and wheel execution on DBR 13.3 Fixes #719, #720 ## Tests Added regression test for Windows --- bundle/artifacts/artifacts.go | 5 +- bundle/artifacts/artifacts_test.go | 89 ++++++++++++++++++++++++++++++ bundle/python/transform.go | 2 + 3 files changed, 94 insertions(+), 2 deletions(-) create mode 100644 bundle/artifacts/artifacts_test.go diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index c5413121..0331adb7 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "path" + "path/filepath" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" @@ -107,7 +108,7 @@ func uploadArtifact(ctx context.Context, a *config.Artifact, b *bundle.Bundle) e for i := range a.Files { f := &a.Files[i] if f.NeedsUpload() { - filename := path.Base(f.Source) + filename := filepath.Base(f.Source) cmdio.LogString(ctx, fmt.Sprintf("artifacts.Upload(%s): Uploading...", filename)) remotePath, err := uploadArtifactFile(ctx, f.Source, b) if err != nil { @@ -136,7 +137,7 @@ func uploadArtifactFile(ctx context.Context, file string, b *bundle.Bundle) (str } fileHash := sha256.Sum256(raw) - remotePath := path.Join(uploadPath, fmt.Sprintf("%x", fileHash), path.Base(file)) + remotePath := path.Join(uploadPath, fmt.Sprintf("%x", fileHash), filepath.Base(file)) // Make sure target directory exists. err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, path.Dir(remotePath)) if err != nil { diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go new file mode 100644 index 00000000..65a1950a --- /dev/null +++ b/bundle/artifacts/artifacts_test.go @@ -0,0 +1,89 @@ +package artifacts + +import ( + "context" + "os" + "path/filepath" + "regexp" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/require" +) + +func touchEmptyFile(t *testing.T, path string) { + err := os.MkdirAll(filepath.Dir(path), 0700) + require.NoError(t, err) + f, err := os.Create(path) + require.NoError(t, err) + f.Close() +} + +type MockWorkspaceService struct { +} + +// Delete implements workspace.WorkspaceService. +func (MockWorkspaceService) Delete(ctx context.Context, request workspace.Delete) error { + panic("unimplemented") +} + +// Export implements workspace.WorkspaceService. +func (MockWorkspaceService) Export(ctx context.Context, request workspace.ExportRequest) (*workspace.ExportResponse, error) { + panic("unimplemented") +} + +// GetStatus implements workspace.WorkspaceService. +func (MockWorkspaceService) GetStatus(ctx context.Context, request workspace.GetStatusRequest) (*workspace.ObjectInfo, error) { + panic("unimplemented") +} + +// Import implements workspace.WorkspaceService. +func (MockWorkspaceService) Import(ctx context.Context, request workspace.Import) error { + return nil +} + +// List implements workspace.WorkspaceService. +func (MockWorkspaceService) List(ctx context.Context, request workspace.ListWorkspaceRequest) (*workspace.ListResponse, error) { + panic("unimplemented") +} + +// Mkdirs implements workspace.WorkspaceService. +func (MockWorkspaceService) Mkdirs(ctx context.Context, request workspace.Mkdirs) error { + return nil +} + +func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) { + dir := t.TempDir() + whlPath := filepath.Join(dir, "dist", "test.whl") + touchEmptyFile(t, whlPath) + b := &bundle.Bundle{ + Config: config.Root{ + Path: dir, + Bundle: config.Bundle{ + Target: "whatever", + }, + Workspace: config.Workspace{ + ArtifactsPath: "/Users/test@databricks.com/whatever", + }, + }, + } + + b.WorkspaceClient().Workspace.WithImpl(MockWorkspaceService{}) + artifact := &config.Artifact{ + Files: []config.ArtifactFile{ + { + Source: whlPath, + Libraries: []*compute.Library{ + {Whl: "dist\\test.whl"}, + }, + }, + }, + } + + err := uploadArtifact(context.Background(), artifact, b) + require.NoError(t, err) + require.Regexp(t, regexp.MustCompile("/Users/test@databricks.com/whatever/.internal/[a-z0-9]+/test.whl"), artifact.Files[0].RemotePath) +} diff --git a/bundle/python/transform.go b/bundle/python/transform.go index 6ec75a03..53db450b 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -16,6 +16,8 @@ const NOTEBOOK_TEMPLATE = `# Databricks notebook source %pip install --force-reinstall {{.Whl}} {{end}} +dbutils.library.restartPython() + try: from importlib import metadata except ImportError: # for Python<3.8 From e22fd73b7d23ca96bd733a8552018cb6915e2fd2 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 4 Sep 2023 00:07:17 -0700 Subject: [PATCH 051/310] Cleanup after previous PR comments (#724) ## Changes @pietern this addresses a comment from you on a recently merged PR. It also updates settings.json based on the settings VS Code adds as soon as you edit a notebook. --- .vscode/settings.json | 4 +++- bundle/config/mutator/override_compute_test.go | 9 ++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 687e0fc0..86946528 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -8,5 +8,7 @@ "files.trimFinalNewlines": true, "python.envFile": "${workspaceFolder}/.databricks/.databricks.env", "databricks.python.envFile": "${workspaceFolder}/.env", - "python.analysis.stubPath": ".vscode" + "python.analysis.stubPath": ".vscode", + "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", + "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------" } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index f04c91c4..cb37eeb5 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -2,7 +2,6 @@ package mutator_test import ( "context" - "os" "testing" "github.com/databricks/cli/bundle" @@ -16,7 +15,7 @@ import ( ) func TestOverrideDevelopment(t *testing.T) { - os.Setenv("DATABRICKS_CLUSTER_ID", "") + t.Setenv("DATABRICKS_CLUSTER_ID", "") bundle := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ @@ -62,7 +61,7 @@ func TestOverrideDevelopment(t *testing.T) { } func TestOverrideDevelopmentEnv(t *testing.T) { - os.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") + t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") bundle := &bundle.Bundle{ Config: config.Root{ Resources: config.Resources{ @@ -90,7 +89,7 @@ func TestOverrideDevelopmentEnv(t *testing.T) { } func TestOverridePipelineTask(t *testing.T) { - os.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") + t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") bundle := &bundle.Bundle{ Config: config.Root{ Resources: config.Resources{ @@ -144,7 +143,7 @@ func TestOverrideProduction(t *testing.T) { } func TestOverrideProductionEnv(t *testing.T) { - os.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") + t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") bundle := &bundle.Bundle{ Config: config.Root{ Resources: config.Resources{ From 83443bae8d8ad4df3758f4192c6bbe613faae9c4 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 4 Sep 2023 11:55:01 +0200 Subject: [PATCH 052/310] Make resource and artifact paths in bundle config relative to config folder (#708) # Warning: breaking change ## Changes Instead of having paths in bundle config files be relative to bundle root even if the config file is nested, this PR makes such paths relative to the folder where the config is located. When bundle is initialised, these paths will be transformed to relative paths based on bundle root. For example, we have file structure like this ``` - mybundle | - bundle.yml | - subfolder | -- resource.yml | -- my.whl ``` Previously, we had to reference `my.whl` in resource.yml like this, which was confusing because resource.yml is in the same subfolder ``` sync: include: - ./subfolder/*.whl ... tasks: - task_key: name libraries: - whl: ./subfolder/my.whl ... ``` After the change we can reference it like this (which is in line with the current behaviour for notebooks) ``` sync: include: - ./*.whl ... tasks: - task_key: name libraries: - whl: ./my.whl ... ``` ## Tests Existing `translate_path_tests` successfully passed after refactoring. Added a couple of uses cases for `Libraries` paths. Added a bundle config tests with include config and sync section --------- Co-authored-by: Pieter Noordhuis --- bundle/config/artifact.go | 11 ++ bundle/config/mutator/trampoline_test.go | 3 +- bundle/config/mutator/translate_paths.go | 147 ++++++++---------- .../mutator/translate_paths_artifacts.go | 42 +++++ bundle/config/mutator/translate_paths_jobs.go | 103 ++++++++++++ .../mutator/translate_paths_pipelines.go | 60 +++++++ bundle/config/mutator/translate_paths_test.go | 79 ++++++++-- .../{resources/pkg.go => paths/paths.go} | 2 +- bundle/config/resources/job.go | 3 +- bundle/config/resources/mlflow_experiment.go | 7 +- bundle/config/resources/mlflow_model.go | 7 +- bundle/config/resources/pipeline.go | 7 +- bundle/config/resources_test.go | 19 +-- bundle/config/root.go | 17 +- bundle/config/sync.go | 18 +++ bundle/config/target.go | 2 +- bundle/python/transform_test.go | 3 +- .../relative_path_with_includes/bundle.yml | 25 +++ .../subfolder/include.yml | 20 +++ .../tests/relative_path_with_includes_test.go | 28 ++++ 20 files changed, 482 insertions(+), 121 deletions(-) create mode 100644 bundle/config/mutator/translate_paths_artifacts.go create mode 100644 bundle/config/mutator/translate_paths_jobs.go create mode 100644 bundle/config/mutator/translate_paths_pipelines.go rename bundle/config/{resources/pkg.go => paths/paths.go} (95%) create mode 100644 bundle/tests/relative_path_with_includes/bundle.yml create mode 100644 bundle/tests/relative_path_with_includes/subfolder/include.yml create mode 100644 bundle/tests/relative_path_with_includes_test.go diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index 60331eb1..1955e265 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -8,9 +8,18 @@ import ( "path" "strings" + "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/databricks-sdk-go/service/compute" ) +type Artifacts map[string]*Artifact + +func (artifacts Artifacts) SetConfigFilePath(path string) { + for _, artifact := range artifacts { + artifact.ConfigFilePath = path + } +} + type ArtifactType string const ArtifactPythonWheel ArtifactType = `whl` @@ -34,6 +43,8 @@ type Artifact struct { // (Python wheel, Java jar and etc) itself Files []ArtifactFile `json:"files"` BuildCommand string `json:"build"` + + paths.Paths } func (a *Artifact) Build(ctx context.Context) ([]byte, error) { diff --git a/bundle/config/mutator/trampoline_test.go b/bundle/config/mutator/trampoline_test.go index e523250e..aec58618 100644 --- a/bundle/config/mutator/trampoline_test.go +++ b/bundle/config/mutator/trampoline_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" @@ -64,7 +65,7 @@ func TestGenerateTrampoline(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: tmpDir, }, JobSettings: &jobs.JobSettings{ diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 08f83986..acfd5525 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "net/url" "os" "path" "path/filepath" @@ -11,8 +12,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/notebook" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/databricks/databricks-sdk-go/service/pipelines" ) type ErrIsNotebook struct { @@ -44,7 +43,9 @@ func (m *translatePaths) Name() string { return "TranslatePaths" } -// rewritePath converts a given relative path to a stable remote workspace path. +type rewriteFunc func(literal, localFullPath, localRelPath, remotePath string) (string, error) + +// rewritePath converts a given relative path from the loaded config to a new path based on the passed rewriting function // // It takes these arguments: // - The argument `dir` is the directory relative to which the given relative path is. @@ -57,13 +58,23 @@ func (m *translatePaths) rewritePath( dir string, b *bundle.Bundle, p *string, - fn func(literal, localPath, remotePath string) (string, error), + fn rewriteFunc, ) error { // We assume absolute paths point to a location in the workspace if path.IsAbs(filepath.ToSlash(*p)) { return nil } + url, err := url.Parse(*p) + if err != nil { + return err + } + + // If the file path has scheme, it's a full path and we don't need to transform it + if url.Scheme != "" { + return nil + } + // Local path is relative to the directory the resource was defined in. localPath := filepath.Join(dir, filepath.FromSlash(*p)) if interp, ok := m.seen[localPath]; ok { @@ -72,19 +83,19 @@ func (m *translatePaths) rewritePath( } // Remote path must be relative to the bundle root. - remotePath, err := filepath.Rel(b.Config.Path, localPath) + localRelPath, err := filepath.Rel(b.Config.Path, localPath) if err != nil { return err } - if strings.HasPrefix(remotePath, "..") { + if strings.HasPrefix(localRelPath, "..") { return fmt.Errorf("path %s is not contained in bundle root path", localPath) } // Prefix remote path with its remote root path. - remotePath = path.Join(b.Config.Workspace.FilesPath, filepath.ToSlash(remotePath)) + remotePath := path.Join(b.Config.Workspace.FilesPath, filepath.ToSlash(localRelPath)) // Convert local path into workspace path via specified function. - interp, err := fn(*p, localPath, filepath.ToSlash(remotePath)) + interp, err := fn(*p, localPath, localRelPath, filepath.ToSlash(remotePath)) if err != nil { return err } @@ -94,81 +105,69 @@ func (m *translatePaths) rewritePath( return nil } -func (m *translatePaths) translateNotebookPath(literal, localPath, remotePath string) (string, error) { - nb, _, err := notebook.Detect(localPath) +func translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + nb, _, err := notebook.Detect(localFullPath) if os.IsNotExist(err) { return "", fmt.Errorf("notebook %s not found", literal) } if err != nil { - return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localPath, err) + return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localFullPath, err) } if !nb { - return "", ErrIsNotNotebook{localPath} + return "", ErrIsNotNotebook{localFullPath} } // Upon import, notebooks are stripped of their extension. - return strings.TrimSuffix(remotePath, filepath.Ext(localPath)), nil + return strings.TrimSuffix(remotePath, filepath.Ext(localFullPath)), nil } -func (m *translatePaths) translateFilePath(literal, localPath, remotePath string) (string, error) { - nb, _, err := notebook.Detect(localPath) +func translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + nb, _, err := notebook.Detect(localFullPath) if os.IsNotExist(err) { return "", fmt.Errorf("file %s not found", literal) } if err != nil { - return "", fmt.Errorf("unable to determine if %s is not a notebook: %w", localPath, err) + return "", fmt.Errorf("unable to determine if %s is not a notebook: %w", localFullPath, err) } if nb { - return "", ErrIsNotebook{localPath} + return "", ErrIsNotebook{localFullPath} } return remotePath, nil } -func (m *translatePaths) translateJobTask(dir string, b *bundle.Bundle, task *jobs.Task) error { - var err error - - if task.NotebookTask != nil { - err = m.rewritePath(dir, b, &task.NotebookTask.NotebookPath, m.translateNotebookPath) - if target := (&ErrIsNotNotebook{}); errors.As(err, target) { - return fmt.Errorf(`expected a notebook for "tasks.notebook_task.notebook_path" but got a file: %w`, target) - } - if err != nil { - return err - } - } - - if task.SparkPythonTask != nil { - err = m.rewritePath(dir, b, &task.SparkPythonTask.PythonFile, m.translateFilePath) - if target := (&ErrIsNotebook{}); errors.As(err, target) { - return fmt.Errorf(`expected a file for "tasks.spark_python_task.python_file" but got a notebook: %w`, target) - } - if err != nil { - return err - } - } - - return nil +func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) { + return localRelPath, nil } -func (m *translatePaths) translatePipelineLibrary(dir string, b *bundle.Bundle, library *pipelines.PipelineLibrary) error { - var err error +type transformer struct { + // A directory path relative to which `path` will be transformed + dir string + // A path to transform + path *string + // Name of the config property where the path string is coming from + configPath string + // A function that performs the actual rewriting logic. + fn rewriteFunc +} - if library.Notebook != nil { - err = m.rewritePath(dir, b, &library.Notebook.Path, m.translateNotebookPath) - if target := (&ErrIsNotNotebook{}); errors.As(err, target) { - return fmt.Errorf(`expected a notebook for "libraries.notebook.path" but got a file: %w`, target) - } - if err != nil { - return err - } - } +type transformFunc func(resource any, dir string) *transformer - if library.File != nil { - err = m.rewritePath(dir, b, &library.File.Path, m.translateFilePath) - if target := (&ErrIsNotebook{}); errors.As(err, target) { - return fmt.Errorf(`expected a file for "libraries.file.path" but got a notebook: %w`, target) +// Apply all matches transformers for the given resource +func (m *translatePaths) applyTransformers(funcs []transformFunc, b *bundle.Bundle, resource any, dir string) error { + for _, transformFn := range funcs { + transformer := transformFn(resource, dir) + if transformer == nil { + continue } + + err := m.rewritePath(transformer.dir, b, transformer.path, transformer.fn) if err != nil { + if target := (&ErrIsNotebook{}); errors.As(err, target) { + return fmt.Errorf(`expected a file for "%s" but got a notebook: %w`, transformer.configPath, target) + } + if target := (&ErrIsNotNotebook{}); errors.As(err, target) { + return fmt.Errorf(`expected a notebook for "%s" but got a file: %w`, transformer.configPath, target) + } return err } } @@ -179,36 +178,14 @@ func (m *translatePaths) translatePipelineLibrary(dir string, b *bundle.Bundle, func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error { m.seen = make(map[string]string) - for key, job := range b.Config.Resources.Jobs { - dir, err := job.ConfigFileDirectory() + for _, fn := range []func(*translatePaths, *bundle.Bundle) error{ + applyJobTransformers, + applyPipelineTransformers, + applyArtifactTransformers, + } { + err := fn(m, b) if err != nil { - return fmt.Errorf("unable to determine directory for job %s: %w", key, err) - } - - // Do not translate job task paths if using git source - if job.GitSource != nil { - continue - } - - for i := 0; i < len(job.Tasks); i++ { - err := m.translateJobTask(dir, b, &job.Tasks[i]) - if err != nil { - return err - } - } - } - - for key, pipeline := range b.Config.Resources.Pipelines { - dir, err := pipeline.ConfigFileDirectory() - if err != nil { - return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) - } - - for i := 0; i < len(pipeline.Libraries); i++ { - err := m.translatePipelineLibrary(dir, b, &pipeline.Libraries[i]) - if err != nil { - return err - } + return err } } diff --git a/bundle/config/mutator/translate_paths_artifacts.go b/bundle/config/mutator/translate_paths_artifacts.go new file mode 100644 index 00000000..91e8397c --- /dev/null +++ b/bundle/config/mutator/translate_paths_artifacts.go @@ -0,0 +1,42 @@ +package mutator + +import ( + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" +) + +func transformArtifactPath(resource any, dir string) *transformer { + artifact, ok := resource.(*config.Artifact) + if !ok { + return nil + } + + return &transformer{ + dir, + &artifact.Path, + "artifacts.path", + translateNoOp, + } +} + +func applyArtifactTransformers(m *translatePaths, b *bundle.Bundle) error { + artifactTransformers := []transformFunc{ + transformArtifactPath, + } + + for key, artifact := range b.Config.Artifacts { + dir, err := artifact.ConfigFileDirectory() + if err != nil { + return fmt.Errorf("unable to determine directory for artifact %s: %w", key, err) + } + + err = m.applyTransformers(artifactTransformers, b, artifact, dir) + if err != nil { + return err + } + } + + return nil +} diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go new file mode 100644 index 00000000..b94df5e2 --- /dev/null +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -0,0 +1,103 @@ +package mutator + +import ( + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +func transformNotebookTask(resource any, dir string) *transformer { + task, ok := resource.(*jobs.Task) + if !ok || task.NotebookTask == nil { + return nil + } + + return &transformer{ + dir, + &task.NotebookTask.NotebookPath, + "tasks.notebook_task.notebook_path", + translateNotebookPath, + } +} + +func transformSparkTask(resource any, dir string) *transformer { + task, ok := resource.(*jobs.Task) + if !ok || task.SparkPythonTask == nil { + return nil + } + + return &transformer{ + dir, + &task.SparkPythonTask.PythonFile, + "tasks.spark_python_task.python_file", + translateFilePath, + } +} + +func transformWhlLibrary(resource any, dir string) *transformer { + library, ok := resource.(*compute.Library) + if !ok || library.Whl == "" { + return nil + } + + return &transformer{ + dir, + &library.Whl, + "libraries.whl", + translateNoOp, + } +} + +func transformJarLibrary(resource any, dir string) *transformer { + library, ok := resource.(*compute.Library) + if !ok || library.Jar == "" { + return nil + } + + return &transformer{ + dir, + &library.Jar, + "libraries.jar", + translateFilePath, + } +} + +func applyJobTransformers(m *translatePaths, b *bundle.Bundle) error { + jobTransformers := []transformFunc{ + transformNotebookTask, + transformSparkTask, + transformWhlLibrary, + transformJarLibrary, + } + + for key, job := range b.Config.Resources.Jobs { + dir, err := job.ConfigFileDirectory() + if err != nil { + return fmt.Errorf("unable to determine directory for job %s: %w", key, err) + } + + // Do not translate job task paths if using git source + if job.GitSource != nil { + continue + } + + for i := 0; i < len(job.Tasks); i++ { + task := &job.Tasks[i] + err := m.applyTransformers(jobTransformers, b, task, dir) + if err != nil { + return err + } + for j := 0; j < len(task.Libraries); j++ { + library := &task.Libraries[j] + err := m.applyTransformers(jobTransformers, b, library, dir) + if err != nil { + return err + } + } + } + } + + return nil +} diff --git a/bundle/config/mutator/translate_paths_pipelines.go b/bundle/config/mutator/translate_paths_pipelines.go new file mode 100644 index 00000000..1afdb9d5 --- /dev/null +++ b/bundle/config/mutator/translate_paths_pipelines.go @@ -0,0 +1,60 @@ +package mutator + +import ( + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/databricks-sdk-go/service/pipelines" +) + +func transformLibraryNotebook(resource any, dir string) *transformer { + library, ok := resource.(*pipelines.PipelineLibrary) + if !ok || library.Notebook == nil { + return nil + } + + return &transformer{ + dir, + &library.Notebook.Path, + "libraries.notebook.path", + translateNotebookPath, + } +} + +func transformLibraryFile(resource any, dir string) *transformer { + library, ok := resource.(*pipelines.PipelineLibrary) + if !ok || library.File == nil { + return nil + } + + return &transformer{ + dir, + &library.File.Path, + "libraries.file.path", + translateFilePath, + } +} + +func applyPipelineTransformers(m *translatePaths, b *bundle.Bundle) error { + pipelineTransformers := []transformFunc{ + transformLibraryNotebook, + transformLibraryFile, + } + + for key, pipeline := range b.Config.Resources.Pipelines { + dir, err := pipeline.ConfigFileDirectory() + if err != nil { + return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) + } + + for i := 0; i < len(pipeline.Libraries); i++ { + library := &pipeline.Libraries[i] + err := m.applyTransformers(pipelineTransformers, b, library, dir) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index b87f4f67..e7ac5e8a 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -9,7 +9,9 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/stretchr/testify/assert" @@ -43,7 +45,7 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) { Jobs: map[string]*resources.Job{ "job": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "resource.yml"), }, JobSettings: &jobs.JobSettings{ @@ -103,6 +105,7 @@ func TestTranslatePaths(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_job_notebook.py")) touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py")) touchEmptyFile(t, filepath.Join(dir, "my_python_file.py")) + touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar")) bundle := &bundle.Bundle{ Config: config.Root{ @@ -113,7 +116,7 @@ func TestTranslatePaths(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "resource.yml"), }, JobSettings: &jobs.JobSettings{ @@ -122,6 +125,9 @@ func TestTranslatePaths(t *testing.T) { NotebookTask: &jobs.NotebookTask{ NotebookPath: "./my_job_notebook.py", }, + Libraries: []compute.Library{ + {Whl: "./dist/task.whl"}, + }, }, { NotebookTask: &jobs.NotebookTask{ @@ -143,13 +149,29 @@ func TestTranslatePaths(t *testing.T) { PythonFile: "./my_python_file.py", }, }, + { + SparkJarTask: &jobs.SparkJarTask{ + MainClassName: "HelloWorld", + }, + Libraries: []compute.Library{ + {Jar: "./dist/task.jar"}, + }, + }, + { + SparkJarTask: &jobs.SparkJarTask{ + MainClassName: "HelloWorldRemote", + }, + Libraries: []compute.Library{ + {Jar: "dbfs:///bundle/dist/task_remote.jar"}, + }, + }, }, }, }, }, Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "resource.yml"), }, PipelineSpec: &pipelines.PipelineSpec{ @@ -194,6 +216,11 @@ func TestTranslatePaths(t *testing.T) { "/bundle/my_job_notebook", bundle.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath, ) + assert.Equal( + t, + filepath.Join("dist", "task.whl"), + bundle.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl, + ) assert.Equal( t, "/Users/jane.doe@databricks.com/doesnt_exist.py", @@ -209,6 +236,16 @@ func TestTranslatePaths(t *testing.T) { "/bundle/my_python_file.py", bundle.Config.Resources.Jobs["job"].Tasks[4].SparkPythonTask.PythonFile, ) + assert.Equal( + t, + "/bundle/dist/task.jar", + bundle.Config.Resources.Jobs["job"].Tasks[5].Libraries[0].Jar, + ) + assert.Equal( + t, + "dbfs:///bundle/dist/task_remote.jar", + bundle.Config.Resources.Jobs["job"].Tasks[6].Libraries[0].Jar, + ) // Assert that the path in the libraries now refer to the artifact. assert.Equal( @@ -236,6 +273,7 @@ func TestTranslatePaths(t *testing.T) { func TestTranslatePathsInSubdirectories(t *testing.T) { dir := t.TempDir() touchEmptyFile(t, filepath.Join(dir, "job", "my_python_file.py")) + touchEmptyFile(t, filepath.Join(dir, "job", "dist", "task.jar")) touchEmptyFile(t, filepath.Join(dir, "pipeline", "my_python_file.py")) bundle := &bundle.Bundle{ @@ -247,7 +285,7 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "job/resource.yml"), }, JobSettings: &jobs.JobSettings{ @@ -257,13 +295,21 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { PythonFile: "./my_python_file.py", }, }, + { + SparkJarTask: &jobs.SparkJarTask{ + MainClassName: "HelloWorld", + }, + Libraries: []compute.Library{ + {Jar: "./dist/task.jar"}, + }, + }, }, }, }, }, Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "pipeline/resource.yml"), }, @@ -290,6 +336,11 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { "/bundle/job/my_python_file.py", bundle.Config.Resources.Jobs["job"].Tasks[0].SparkPythonTask.PythonFile, ) + assert.Equal( + t, + "/bundle/job/dist/task.jar", + bundle.Config.Resources.Jobs["job"].Tasks[1].Libraries[0].Jar, + ) assert.Equal( t, @@ -310,7 +361,7 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "../resource.yml"), }, JobSettings: &jobs.JobSettings{ @@ -341,7 +392,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "fake.yml"), }, JobSettings: &jobs.JobSettings{ @@ -372,7 +423,7 @@ func TestJobFileDoesNotExistError(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "fake.yml"), }, JobSettings: &jobs.JobSettings{ @@ -403,7 +454,7 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "fake.yml"), }, PipelineSpec: &pipelines.PipelineSpec{ @@ -434,7 +485,7 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "fake.yml"), }, PipelineSpec: &pipelines.PipelineSpec{ @@ -469,7 +520,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "resource.yml"), }, JobSettings: &jobs.JobSettings{ @@ -504,7 +555,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "resource.yml"), }, JobSettings: &jobs.JobSettings{ @@ -539,7 +590,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "resource.yml"), }, PipelineSpec: &pipelines.PipelineSpec{ @@ -574,7 +625,7 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: filepath.Join(dir, "resource.yml"), }, PipelineSpec: &pipelines.PipelineSpec{ diff --git a/bundle/config/resources/pkg.go b/bundle/config/paths/paths.go similarity index 95% rename from bundle/config/resources/pkg.go rename to bundle/config/paths/paths.go index 5cf54a06..c2cbcb7d 100644 --- a/bundle/config/resources/pkg.go +++ b/bundle/config/paths/paths.go @@ -1,4 +1,4 @@ -package resources +package paths import ( "fmt" diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index 6200062a..66705afb 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -1,6 +1,7 @@ package resources import ( + "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/imdario/mergo" ) @@ -9,7 +10,7 @@ type Job struct { ID string `json:"id,omitempty" bundle:"readonly"` Permissions []Permission `json:"permissions,omitempty"` - Paths + paths.Paths *jobs.JobSettings } diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index ebef039a..d843cf22 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -1,11 +1,14 @@ package resources -import "github.com/databricks/databricks-sdk-go/service/ml" +import ( + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/service/ml" +) type MlflowExperiment struct { Permissions []Permission `json:"permissions,omitempty"` - Paths + paths.Paths *ml.Experiment } diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 31c72f6b..92617c95 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -1,11 +1,14 @@ package resources -import "github.com/databricks/databricks-sdk-go/service/ml" +import ( + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/service/ml" +) type MlflowModel struct { Permissions []Permission `json:"permissions,omitempty"` - Paths + paths.Paths *ml.Model } diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index 96efc2c4..d3a51c57 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -1,12 +1,15 @@ package resources -import "github.com/databricks/databricks-sdk-go/service/pipelines" +import ( + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/service/pipelines" +) type Pipeline struct { ID string `json:"id,omitempty" bundle:"readonly"` Permissions []Permission `json:"permissions,omitempty"` - Paths + paths.Paths *pipelines.PipelineSpec } diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index 63285bf9..82cb9f45 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -3,6 +3,7 @@ package config import ( "testing" + "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" "github.com/stretchr/testify/assert" ) @@ -11,21 +12,21 @@ func TestVerifyUniqueResourceIdentifiers(t *testing.T) { r := Resources{ Jobs: map[string]*resources.Job{ "foo": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: "foo.yml", }, }, }, Models: map[string]*resources.MlflowModel{ "bar": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: "bar.yml", }, }, }, Experiments: map[string]*resources.MlflowExperiment{ "foo": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: "foo2.yml", }, }, @@ -39,14 +40,14 @@ func TestVerifySafeMerge(t *testing.T) { r := Resources{ Jobs: map[string]*resources.Job{ "foo": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: "foo.yml", }, }, }, Models: map[string]*resources.MlflowModel{ "bar": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: "bar.yml", }, }, @@ -55,7 +56,7 @@ func TestVerifySafeMerge(t *testing.T) { other := Resources{ Pipelines: map[string]*resources.Pipeline{ "foo": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: "foo2.yml", }, }, @@ -69,14 +70,14 @@ func TestVerifySafeMergeForSameResourceType(t *testing.T) { r := Resources{ Jobs: map[string]*resources.Job{ "foo": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: "foo.yml", }, }, }, Models: map[string]*resources.MlflowModel{ "bar": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: "bar.yml", }, }, @@ -85,7 +86,7 @@ func TestVerifySafeMergeForSameResourceType(t *testing.T) { other := Resources{ Jobs: map[string]*resources.Job{ "foo": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: "foo2.yml", }, }, diff --git a/bundle/config/root.go b/bundle/config/root.go index 1275dab4..99ea33ad 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -64,7 +64,7 @@ type Root struct { Workspace Workspace `json:"workspace,omitempty"` // Artifacts contains a description of all code artifacts in this bundle. - Artifacts map[string]*Artifact `json:"artifacts,omitempty"` + Artifacts Artifacts `json:"artifacts,omitempty"` // Resources contains a description of all Databricks resources // to deploy in this bundle (e.g. jobs, pipelines, etc.). @@ -113,6 +113,10 @@ func Load(path string) (*Root, error) { // was loaded from in configuration leafs that require it. func (r *Root) SetConfigFilePath(path string) { r.Resources.SetConfigFilePath(path) + if r.Artifacts != nil { + r.Artifacts.SetConfigFilePath(path) + } + if r.Targets != nil { for _, env := range r.Targets { if env == nil { @@ -121,6 +125,9 @@ func (r *Root) SetConfigFilePath(path string) { if env.Resources != nil { env.Resources.SetConfigFilePath(path) } + if env.Artifacts != nil { + env.Artifacts.SetConfigFilePath(path) + } } } } @@ -175,11 +182,17 @@ func (r *Root) Load(path string) error { } func (r *Root) Merge(other *Root) error { + err := r.Sync.Merge(r, other) + if err != nil { + return err + } + other.Sync = Sync{} + // TODO: when hooking into merge semantics, disallow setting path on the target instance. other.Path = "" // Check for safe merge, protecting against duplicate resource identifiers - err := r.Resources.VerifySafeMerge(&other.Resources) + err = r.Resources.VerifySafeMerge(&other.Resources) if err != nil { return err } diff --git a/bundle/config/sync.go b/bundle/config/sync.go index 0580e4c4..6ba2603c 100644 --- a/bundle/config/sync.go +++ b/bundle/config/sync.go @@ -1,5 +1,7 @@ package config +import "path/filepath" + type Sync struct { // Include contains a list of globs evaluated relative to the bundle root path // to explicitly include files that were excluded by the user's gitignore. @@ -11,3 +13,19 @@ type Sync struct { // 2) the `Include` field above. Exclude []string `json:"exclude,omitempty"` } + +func (s *Sync) Merge(root *Root, other *Root) error { + path, err := filepath.Rel(root.Path, other.Path) + if err != nil { + return err + } + for _, include := range other.Sync.Include { + s.Include = append(s.Include, filepath.Join(path, include)) + } + + for _, exclude := range other.Sync.Exclude { + s.Exclude = append(s.Exclude, filepath.Join(path, exclude)) + } + + return nil +} diff --git a/bundle/config/target.go b/bundle/config/target.go index 6a45fdb8..2489efc3 100644 --- a/bundle/config/target.go +++ b/bundle/config/target.go @@ -23,7 +23,7 @@ type Target struct { Workspace *Workspace `json:"workspace,omitempty"` - Artifacts map[string]*Artifact `json:"artifacts,omitempty"` + Artifacts Artifacts `json:"artifacts,omitempty"` Resources *Resources `json:"resources,omitempty"` diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index fb2c23e4..a9f57db8 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" @@ -112,7 +113,7 @@ func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test": { - Paths: resources.Paths{ + Paths: paths.Paths{ ConfigFilePath: tmpDir, }, JobSettings: &jobs.JobSettings{ diff --git a/bundle/tests/relative_path_with_includes/bundle.yml b/bundle/tests/relative_path_with_includes/bundle.yml new file mode 100644 index 00000000..36474c75 --- /dev/null +++ b/bundle/tests/relative_path_with_includes/bundle.yml @@ -0,0 +1,25 @@ +bundle: + name: sync_include + +include: + - "*/*.yml" + +sync: + include: + - ./folder_a/*.* + exclude: + - ./folder_b/*.* + +artifacts: + test_a: + type: whl + path: ./artifact_a + +resources: + jobs: + job_a: + name: "job_a" + tasks: + - task_key: "task_a" + libraries: + - whl: ./dist/job_a.whl diff --git a/bundle/tests/relative_path_with_includes/subfolder/include.yml b/bundle/tests/relative_path_with_includes/subfolder/include.yml new file mode 100644 index 00000000..597abe3b --- /dev/null +++ b/bundle/tests/relative_path_with_includes/subfolder/include.yml @@ -0,0 +1,20 @@ +sync: + include: + - ./folder_c/*.* + exclude: + - ./folder_d/*.* + +artifacts: + test_b: + type: whl + path: ./artifact_b + + +resources: + jobs: + job_b: + name: "job_b" + tasks: + - task_key: "task_a" + libraries: + - whl: ./dist/job_b.whl diff --git a/bundle/tests/relative_path_with_includes_test.go b/bundle/tests/relative_path_with_includes_test.go new file mode 100644 index 00000000..92249c41 --- /dev/null +++ b/bundle/tests/relative_path_with_includes_test.go @@ -0,0 +1,28 @@ +package config_tests + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/assert" +) + +func TestRelativePathsWithIncludes(t *testing.T) { + b := load(t, "./relative_path_with_includes") + + m := mutator.TranslatePaths() + err := bundle.Apply(context.Background(), b, m) + assert.NoError(t, err) + + assert.Equal(t, "artifact_a", b.Config.Artifacts["test_a"].Path) + assert.Equal(t, filepath.Join("subfolder", "artifact_b"), b.Config.Artifacts["test_b"].Path) + + assert.ElementsMatch(t, []string{"./folder_a/*.*", filepath.Join("subfolder", "folder_c", "*.*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{"./folder_b/*.*", filepath.Join("subfolder", "folder_d", "*.*")}, b.Config.Sync.Exclude) + + assert.Equal(t, filepath.Join("dist", "job_a.whl"), b.Config.Resources.Jobs["job_a"].Tasks[0].Libraries[0].Whl) + assert.Equal(t, filepath.Join("subfolder", "dist", "job_b.whl"), b.Config.Resources.Jobs["job_b"].Tasks[0].Libraries[0].Whl) +} From 437263eb583eacc99cd1b30499e85928c377c8a2 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 5 Sep 2023 10:27:18 +0200 Subject: [PATCH 053/310] Upgrade to actions/checkout@v4 (#731) ## Changes This should fix intermittent failures with v3 (see https://github.com/actions/checkout/issues/1448) --- .github/workflows/push.yml | 4 ++-- .github/workflows/release-snapshot.yml | 2 +- .github/workflows/release.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 3209ae93..6194d490 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -20,7 +20,7 @@ jobs: steps: - name: Checkout repository and submodules - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Unshallow run: git fetch --prune --unshallow @@ -50,7 +50,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v4 diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index fbf5421b..c3398a2b 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository and submodules - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Unshallow run: git fetch --prune --unshallow diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c166fc5b..cbab15ec 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository and submodules - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Unshallow run: git fetch --prune --unshallow From 1752e29885f4fbcdcf1c9e1d17d18a5cafa3ec93 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 5 Sep 2023 11:43:57 +0200 Subject: [PATCH 054/310] Update Go SDK to v0.19.0 (#729) ## Changes * Update Go SDK to v0.19.0 * Update commands per OpenAPI spec from Go SDK * Incorporate `client.Do()` signature change to include a (nil) header map * Update `workspace.WorkspaceService` mock with permissions methods * Skip `files` service in codegen; already implemented under the `fs` command ## Tests Unit and integration tests pass. --- .codegen/_openapi_sha | 2 +- .codegen/cmds-workspace.go.tmpl | 10 +- .codegen/service.go.tmpl | 10 +- .gitattributes | 5 + bundle/artifacts/artifacts_test.go | 32 + .../custom-app-integration.go | 1 + cmd/account/groups/groups.go | 2 +- cmd/account/log-delivery/log-delivery.go | 38 +- cmd/account/metastores/metastores.go | 2 +- .../service-principals/service-principals.go | 2 +- cmd/account/users/users.go | 2 +- cmd/account/workspaces/workspaces.go | 18 +- cmd/api/api.go | 3 +- .../artifact-allowlists.go | 172 +++++ cmd/workspace/catalogs/catalogs.go | 2 + .../cluster-policies/cluster-policies.go | 323 ++++++++- cmd/workspace/clusters/clusters.go | 318 +++++++++ cmd/workspace/cmd.go | 10 + cmd/workspace/connections/connections.go | 2 +- .../dashboard-widgets/dashboard-widgets.go | 228 +++++++ cmd/workspace/dashboards/dashboards.go | 4 +- cmd/workspace/experiments/experiments.go | 429 +++++++++++- cmd/workspace/groups/groups.go | 2 +- .../instance-pools/instance-pools.go | 317 ++++++++- cmd/workspace/jobs/jobs.go | 315 +++++++++ cmd/workspace/libraries/libraries.go | 2 +- .../model-registry/model-registry.go | 267 ++++++++ .../model-versions/model-versions.go | 400 +++++++++++ cmd/workspace/permissions/permissions.go | 78 ++- cmd/workspace/pipelines/pipelines.go | 315 +++++++++ .../query-visualizations.go | 236 +++++++ .../registered-models/registered-models.go | 635 ++++++++++++++++++ cmd/workspace/repos/repos.go | 315 +++++++++ cmd/workspace/secrets/secrets.go | 73 ++ .../service-principals/service-principals.go | 2 +- .../serving-endpoints/serving-endpoints.go | 267 ++++++++ .../token-management/token-management.go | 247 +++++++ cmd/workspace/users/users.go | 249 ++++++- cmd/workspace/volumes/volumes.go | 12 +- cmd/workspace/warehouses/warehouses.go | 315 +++++++++ cmd/workspace/workspace/workspace.go | 271 ++++++++ go.mod | 12 +- go.sum | 24 +- internal/fs_mkdir_test.go | 2 +- internal/sync_test.go | 2 +- libs/filer/files_client.go | 17 +- libs/filer/workspace_files_client.go | 2 +- 47 files changed, 5893 insertions(+), 99 deletions(-) create mode 100755 cmd/workspace/artifact-allowlists/artifact-allowlists.go create mode 100755 cmd/workspace/dashboard-widgets/dashboard-widgets.go create mode 100755 cmd/workspace/model-versions/model-versions.go create mode 100755 cmd/workspace/query-visualizations/query-visualizations.go create mode 100755 cmd/workspace/registered-models/registered-models.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index c9e7a8be..b59218d3 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -7b57ba3a53f4de3d049b6a24391fe5474212daf8 \ No newline at end of file +09a7fa63d9ae243e5407941f200960ca14d48b07 \ No newline at end of file diff --git a/.codegen/cmds-workspace.go.tmpl b/.codegen/cmds-workspace.go.tmpl index 013c62f8..a9daa05d 100644 --- a/.codegen/cmds-workspace.go.tmpl +++ b/.codegen/cmds-workspace.go.tmpl @@ -2,7 +2,15 @@ package workspace -{{ $excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions" "account-access-control-proxy" }} +{{ $excludes := + list + "command-execution" + "statement-execution" + "dbfs" + "dbsql-permissions" + "account-access-control-proxy" + "files" +}} import ( "github.com/databricks/cli/cmd/root" diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 91f2e5cf..4ede142d 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -10,7 +10,15 @@ import ( "github.com/spf13/cobra" ) -{{ $excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions" "account-access-control-proxy" }} +{{ $excludes := + list + "command-execution" + "statement-execution" + "dbfs" + "dbsql-permissions" + "account-access-control-proxy" + "files" +}} {{if not (in $excludes .KebabName) }} {{template "service" .}} diff --git a/.gitattributes b/.gitattributes index f9ea0418..3209a0f3 100755 --- a/.gitattributes +++ b/.gitattributes @@ -25,6 +25,7 @@ cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true cmd/account/workspaces/workspaces.go linguist-generated=true cmd/workspace/alerts/alerts.go linguist-generated=true +cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true @@ -32,6 +33,7 @@ cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true cmd/workspace/connections/connections.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true +cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true cmd/workspace/experiments/experiments.go linguist-generated=true @@ -48,14 +50,17 @@ cmd/workspace/jobs/jobs.go linguist-generated=true cmd/workspace/libraries/libraries.go linguist-generated=true cmd/workspace/metastores/metastores.go linguist-generated=true cmd/workspace/model-registry/model-registry.go linguist-generated=true +cmd/workspace/model-versions/model-versions.go linguist-generated=true cmd/workspace/permissions/permissions.go linguist-generated=true cmd/workspace/pipelines/pipelines.go linguist-generated=true cmd/workspace/policy-families/policy-families.go linguist-generated=true cmd/workspace/providers/providers.go linguist-generated=true cmd/workspace/queries/queries.go linguist-generated=true cmd/workspace/query-history/query-history.go linguist-generated=true +cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=true cmd/workspace/recipients/recipients.go linguist-generated=true +cmd/workspace/registered-models/registered-models.go linguist-generated=true cmd/workspace/repos/repos.go linguist-generated=true cmd/workspace/schemas/schemas.go linguist-generated=true cmd/workspace/secrets/secrets.go linguist-generated=true diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go index 65a1950a..4c0a18f3 100644 --- a/bundle/artifacts/artifacts_test.go +++ b/bundle/artifacts/artifacts_test.go @@ -55,6 +55,38 @@ func (MockWorkspaceService) Mkdirs(ctx context.Context, request workspace.Mkdirs return nil } +// GetPermissionLevels implements workspace.WorkspaceService. +func (MockWorkspaceService) GetPermissionLevels( + ctx context.Context, + request workspace.GetWorkspaceObjectPermissionLevelsRequest, +) (*workspace.GetWorkspaceObjectPermissionLevelsResponse, error) { + panic("unimplemented") +} + +// GetPermissions implements workspace.WorkspaceService. +func (MockWorkspaceService) GetPermissions( + ctx context.Context, + request workspace.GetWorkspaceObjectPermissionsRequest, +) (*workspace.WorkspaceObjectPermissions, error) { + panic("unimplemented") +} + +// SetPermissions implements workspace.WorkspaceService. +func (MockWorkspaceService) SetPermissions( + ctx context.Context, + request workspace.WorkspaceObjectPermissionsRequest, +) (*workspace.WorkspaceObjectPermissions, error) { + panic("unimplemented") +} + +// UpdatePermissions implements workspace.WorkspaceService. +func (MockWorkspaceService) UpdatePermissions( + ctx context.Context, + request workspace.WorkspaceObjectPermissionsRequest, +) (*workspace.WorkspaceObjectPermissions, error) { + panic("unimplemented") +} + func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) { dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index d7269bf4..e5868809 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -60,6 +60,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().BoolVar(&createReq.Confidential, "confidential", createReq.Confidential, `indicates if an oauth client-secret should be generated.`) + // TODO: array: scopes // TODO: complex arg: token_access_policy cmd.Use = "create" diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index 04298b49..6e3b98c0 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -368,7 +368,7 @@ func newPatch() *cobra.Command { cmd.Flags().Var(&patchJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: Operations - // TODO: array: schema + // TODO: array: schemas cmd.Use = "patch ID" cmd.Short = `Update group details.` diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index a7ed39dc..6323e0dd 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -34,22 +34,21 @@ func New() *cobra.Command { 1. **Create storage**: In AWS, [create a new AWS S3 bucket] with a specific bucket policy. Using Databricks APIs, call the Account API to create a - [storage configuration object](#operation/create-storage-config) that uses the - bucket name. 2. **Create credentials**: In AWS, create the appropriate AWS IAM - role. For full details, including the required IAM role policies and trust + [storage configuration object](:method:Storage/Create) that uses the bucket + name. 2. **Create credentials**: In AWS, create the appropriate AWS IAM role. + For full details, including the required IAM role policies and trust relationship, see [Billable usage log delivery]. Using Databricks APIs, call the Account API to create a [credential configuration - object](#operation/create-credential-config) that uses the IAM role's ARN. 3. - **Create log delivery configuration**: Using Databricks APIs, call the Account - API to [create a log delivery - configuration](#operation/create-log-delivery-config) that uses the credential - and storage configuration objects from previous steps. You can specify if the - logs should include all events of that log type in your account (_Account - level_ delivery) or only events for a specific set of workspaces (_workspace - level_ delivery). Account level log delivery applies to all current and future - workspaces plus account level logs, while workspace level log delivery solely - delivers logs related to the specified workspaces. You can create multiple - types of delivery configurations per account. + object](:method:Credentials/Create) that uses the IAM role"s ARN. 3. **Create + log delivery configuration**: Using Databricks APIs, call the Account API to + [create a log delivery configuration](:method:LogDelivery/Create) that uses + the credential and storage configuration objects from previous steps. You can + specify if the logs should include all events of that log type in your account + (_Account level_ delivery) or only events for a specific set of workspaces + (_workspace level_ delivery). Account level log delivery applies to all + current and future workspaces plus account level logs, while workspace level + log delivery solely delivers logs related to the specified workspaces. You can + create multiple types of delivery configurations per account. For billable usage delivery: * For more information about billable usage logs, see [Billable usage log delivery]. For the CSV schema, see the [Usage page]. * @@ -120,10 +119,9 @@ func newCreate() *cobra.Command { Creates a new Databricks log delivery configuration to enable delivery of the specified type of logs to your storage location. This requires that you - already created a [credential object](#operation/create-credential-config) - (which encapsulates a cross-account service IAM role) and a [storage - configuration object](#operation/create-storage-config) (which encapsulates an - S3 bucket). + already created a [credential object](:method:Credentials/Create) (which + encapsulates a cross-account service IAM role) and a [storage configuration + object](:method:Storage/Create) (which encapsulates an S3 bucket). For full details, including the required IAM role policies and bucket policies, see [Deliver and access billable usage logs] or [Configure audit @@ -140,7 +138,7 @@ func newCreate() *cobra.Command { You cannot delete a log delivery configuration, but you can disable it (see [Enable or disable log delivery - configuration](#operation/patch-log-delivery-config-status)). + configuration](:method:LogDelivery/PatchStatus)). [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [Deliver and access billable usage logs]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html` @@ -368,7 +366,7 @@ func newPatchStatus() *cobra.Command { configurations is not supported, so disable log delivery configurations that are no longer needed. Note that you can't re-enable a delivery configuration if this would violate the delivery configuration limits described under - [Create log delivery](#operation/create-log-delivery-config).` + [Create log delivery](:method:LogDelivery/Create).` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/metastores/metastores.go b/cmd/account/metastores/metastores.go index 185f3642..726b779d 100755 --- a/cmd/account/metastores/metastores.go +++ b/cmd/account/metastores/metastores.go @@ -255,7 +255,7 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.Metastores.List(ctx) + response, err := a.Metastores.ListAll(ctx) if err != nil { return err } diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index 481af978..f5823c69 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -367,7 +367,7 @@ func newPatch() *cobra.Command { cmd.Flags().Var(&patchJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: Operations - // TODO: array: schema + // TODO: array: schemas cmd.Use = "patch ID" cmd.Short = `Update service principal details.` diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 7e84f90f..375dd5b5 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -375,7 +375,7 @@ func newPatch() *cobra.Command { cmd.Flags().Var(&patchJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: Operations - // TODO: array: schema + // TODO: array: schemas cmd.Use = "patch ID" cmd.Short = `Update user details.` diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 96ac33b6..60142a8a 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -70,7 +70,10 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Cloud, "cloud", createReq.Cloud, `The cloud provider which the workspace uses.`) // TODO: complex arg: cloud_resource_container cmd.Flags().StringVar(&createReq.CredentialsId, "credentials-id", createReq.CredentialsId, `ID of the workspace's credential configuration object.`) + // TODO: map via StringToStringVar: custom_tags cmd.Flags().StringVar(&createReq.DeploymentName, "deployment-name", createReq.DeploymentName, `The deployment name defines part of the subdomain for the workspace.`) + // TODO: complex arg: gcp_managed_network_config + // TODO: complex arg: gke_config cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account.`) cmd.Flags().StringVar(&createReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", createReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, ``) @@ -391,6 +394,7 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq provisioning.UpdateWorkspaceRequest + var updateJson flags.JsonFlag var updateSkipWait bool var updateTimeout time.Duration @@ -398,9 +402,11 @@ func newUpdate() *cobra.Command { cmd.Flags().BoolVar(&updateSkipWait, "no-wait", updateSkipWait, `do not wait to reach RUNNING state`) cmd.Flags().DurationVar(&updateTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach RUNNING state`) // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.AwsRegion, "aws-region", updateReq.AwsRegion, `The AWS region of the workspace's data plane (for example, us-west-2).`) cmd.Flags().StringVar(&updateReq.CredentialsId, "credentials-id", updateReq.CredentialsId, `ID of the workspace's credential configuration object.`) + // TODO: map via StringToStringVar: custom_tags cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`) cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`) @@ -435,7 +441,8 @@ func newUpdate() *cobra.Command { support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end - PrivateLink support on a workspace. + PrivateLink support on a workspace. - Custom tags. Given you provide an empty + custom tags, the update would not be applied. After calling the PATCH operation to update the workspace configuration, make repeated GET requests with the workspace ID and check the workspace @@ -473,7 +480,8 @@ func newUpdate() *cobra.Command { PrivateLink support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end - PrivateLink support on a workspace. + PrivateLink support on a workspace. - Custom tags. Given you provide an empty + custom tags, the update would not be applied. **Important**: To update a running workspace, your workspace must have no running compute resources that run in your workspace's VPC in the Classic data @@ -529,6 +537,12 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Workspaces drop-down." diff --git a/cmd/api/api.go b/cmd/api/api.go index 698781e6..11a5e3e3 100644 --- a/cmd/api/api.go +++ b/cmd/api/api.go @@ -60,7 +60,8 @@ func makeCommand(method string) *cobra.Command { } var response any - err = api.Do(cmd.Context(), method, path, request, &response) + headers := map[string]string{"Content-Type": "application/json"} + err = api.Do(cmd.Context(), method, path, headers, request, &response) if err != nil { return err } diff --git a/cmd/workspace/artifact-allowlists/artifact-allowlists.go b/cmd/workspace/artifact-allowlists/artifact-allowlists.go new file mode 100755 index 00000000..9f9b9be1 --- /dev/null +++ b/cmd/workspace/artifact-allowlists/artifact-allowlists.go @@ -0,0 +1,172 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package artifact_allowlists + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "artifact-allowlists", + Short: `In Databricks Runtime 13.3 and above, you can add libraries and init scripts to the allowlist in UC so that users can leverage these artifacts on compute configured with shared access mode.`, + Long: `In Databricks Runtime 13.3 and above, you can add libraries and init scripts + to the allowlist in UC so that users can leverage these artifacts on compute + configured with shared access mode.`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *catalog.GetArtifactAllowlistRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq catalog.GetArtifactAllowlistRequest + + // TODO: short flags + + cmd.Use = "get ARTIFACT_TYPE" + cmd.Short = `Get an artifact allowlist.` + cmd.Long = `Get an artifact allowlist. + + Get the artifact allowlist of a certain artifact type. The caller must be a + metastore admin.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + _, err = fmt.Sscan(args[0], &getReq.ArtifactType) + if err != nil { + return fmt.Errorf("invalid ARTIFACT_TYPE: %s", args[0]) + } + + response, err := w.ArtifactAllowlists.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGet()) + }) +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *catalog.SetArtifactAllowlist, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq catalog.SetArtifactAllowlist + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Set an artifact allowlist.` + cmd.Long = `Set an artifact allowlist. + + Set the artifact allowlist of a certain artifact type. The whole artifact + allowlist is replaced with the new allowlist. The caller must be a metastore + admin.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ArtifactAllowlists.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdate()) + }) +} + +// end service ArtifactAllowlists diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index c17f6c22..5e06977c 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -60,6 +60,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&createReq.ConnectionName, "connection-name", createReq.ConnectionName, `The name of the connection to an external data source.`) + // TODO: map via StringToStringVar: options // TODO: map via StringToStringVar: properties cmd.Flags().StringVar(&createReq.ProviderName, "provider-name", createReq.ProviderName, `The name of delta sharing provider.`) cmd.Flags().StringVar(&createReq.ShareName, "share-name", createReq.ShareName, `The name of the share under the share provider.`) @@ -321,6 +322,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of catalog.`) + // TODO: map via StringToStringVar: options cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of catalog.`) // TODO: map via StringToStringVar: properties diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 74a092cd..0e309194 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -318,8 +318,8 @@ func newGet() *cobra.Command { // TODO: short flags cmd.Use = "get POLICY_ID" - cmd.Short = `Get entity.` - cmd.Long = `Get entity. + cmd.Short = `Get a cluster policy.` + cmd.Long = `Get a cluster policy. Get a cluster policy entity. Creation and editing is available to admins only.` @@ -374,6 +374,153 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *compute.GetClusterPolicyPermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq compute.GetClusterPolicyPermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels CLUSTER_POLICY_ID" + cmd.Short = `Get cluster policy permission levels.` + cmd.Long = `Get cluster policy permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No CLUSTER_POLICY_ID argument specified. Loading names for Cluster Policies drop-down." + names, err := w.ClusterPolicies.PolicyNameToPolicyIdMap(ctx, compute.ListClusterPoliciesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Cluster Policies drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The cluster policy for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the cluster policy for which to get or manage permissions") + } + getPermissionLevelsReq.ClusterPolicyId = args[0] + + response, err := w.ClusterPolicies.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *compute.GetClusterPolicyPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq compute.GetClusterPolicyPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions CLUSTER_POLICY_ID" + cmd.Short = `Get cluster policy permissions.` + cmd.Long = `Get cluster policy permissions. + + Gets the permissions of a cluster policy. Cluster policies can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No CLUSTER_POLICY_ID argument specified. Loading names for Cluster Policies drop-down." + names, err := w.ClusterPolicies.PolicyNameToPolicyIdMap(ctx, compute.ListClusterPoliciesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Cluster Policies drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The cluster policy for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the cluster policy for which to get or manage permissions") + } + getPermissionsReq.ClusterPolicyId = args[0] + + response, err := w.ClusterPolicies.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start list command // Slice with functions to override default command behavior. @@ -396,8 +543,8 @@ func newList() *cobra.Command { cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order in which the policies get listed.`) cmd.Use = "list" - cmd.Short = `Get a cluster policy.` - cmd.Long = `Get a cluster policy. + cmd.Short = `List cluster policies.` + cmd.Long = `List cluster policies. Returns a list of policies accessible by the requesting user.` @@ -449,4 +596,172 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *compute.ClusterPolicyPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq compute.ClusterPolicyPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions CLUSTER_POLICY_ID" + cmd.Short = `Set cluster policy permissions.` + cmd.Long = `Set cluster policy permissions. + + Sets permissions on a cluster policy. Cluster policies can inherit permissions + from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No CLUSTER_POLICY_ID argument specified. Loading names for Cluster Policies drop-down." + names, err := w.ClusterPolicies.PolicyNameToPolicyIdMap(ctx, compute.ListClusterPoliciesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Cluster Policies drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The cluster policy for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the cluster policy for which to get or manage permissions") + } + setPermissionsReq.ClusterPolicyId = args[0] + + response, err := w.ClusterPolicies.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *compute.ClusterPolicyPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq compute.ClusterPolicyPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions CLUSTER_POLICY_ID" + cmd.Short = `Update cluster policy permissions.` + cmd.Long = `Update cluster policy permissions. + + Updates the permissions on a cluster policy. Cluster policies can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No CLUSTER_POLICY_ID argument specified. Loading names for Cluster Policies drop-down." + names, err := w.ClusterPolicies.PolicyNameToPolicyIdMap(ctx, compute.ListClusterPoliciesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Cluster Policies drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The cluster policy for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the cluster policy for which to get or manage permissions") + } + updatePermissionsReq.ClusterPolicyId = args[0] + + response, err := w.ClusterPolicies.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service ClusterPolicies diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index 86173063..dce6753d 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -169,6 +169,8 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`) cmd.Flags().Var(&createReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.`) // TODO: map via StringToStringVar: custom_tags + cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `This describes an enum.`) + // TODO: complex arg: docker_image cmd.Flags().StringVar(&createReq.DriverInstancePoolId, "driver-instance-pool-id", createReq.DriverInstancePoolId, `The optional ID of the instance pool for the driver of the cluster belongs.`) cmd.Flags().StringVar(&createReq.DriverNodeTypeId, "driver-node-type-id", createReq.DriverNodeTypeId, `The node type of the Spark driver.`) cmd.Flags().BoolVar(&createReq.EnableElasticDisk, "enable-elastic-disk", createReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.`) @@ -180,6 +182,7 @@ func newCreate() *cobra.Command { cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`) cmd.Flags().StringVar(&createReq.PolicyId, "policy-id", createReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`) cmd.Flags().Var(&createReq.RuntimeEngine, "runtime-engine", `Decides which runtime engine to be use, e.g.`) + cmd.Flags().StringVar(&createReq.SingleUserName, "single-user-name", createReq.SingleUserName, `Single user name if data_security_mode is SINGLE_USER.`) // TODO: map via StringToStringVar: spark_conf // TODO: map via StringToStringVar: spark_env_vars // TODO: array: ssh_public_keys @@ -661,6 +664,153 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *compute.GetClusterPermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq compute.GetClusterPermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels CLUSTER_ID" + cmd.Short = `Get cluster permission levels.` + cmd.Long = `Get cluster permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down." + names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The cluster for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the cluster for which to get or manage permissions") + } + getPermissionLevelsReq.ClusterId = args[0] + + response, err := w.Clusters.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *compute.GetClusterPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq compute.GetClusterPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions CLUSTER_ID" + cmd.Short = `Get cluster permissions.` + cmd.Long = `Get cluster permissions. + + Gets the permissions of a cluster. Clusters can inherit permissions from their + root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down." + names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The cluster for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the cluster for which to get or manage permissions") + } + getPermissionsReq.ClusterId = args[0] + + response, err := w.Clusters.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start list command // Slice with functions to override default command behavior. @@ -1217,6 +1367,90 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *compute.ClusterPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq compute.ClusterPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions CLUSTER_ID" + cmd.Short = `Set cluster permissions.` + cmd.Long = `Set cluster permissions. + + Sets permissions on a cluster. Clusters can inherit permissions from their + root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down." + names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The cluster for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the cluster for which to get or manage permissions") + } + setPermissionsReq.ClusterId = args[0] + + response, err := w.Clusters.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + // start spark-versions command // Slice with functions to override default command behavior. @@ -1456,4 +1690,88 @@ func init() { }) } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *compute.ClusterPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq compute.ClusterPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions CLUSTER_ID" + cmd.Short = `Update cluster permissions.` + cmd.Long = `Update cluster permissions. + + Updates the permissions on a cluster. Clusters can inherit permissions from + their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No CLUSTER_ID argument specified. Loading names for Clusters drop-down." + names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The cluster for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the cluster for which to get or manage permissions") + } + updatePermissionsReq.ClusterId = args[0] + + response, err := w.Clusters.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service Clusters diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 74dcc3a5..495d8066 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -4,12 +4,14 @@ package workspace import ( alerts "github.com/databricks/cli/cmd/workspace/alerts" + artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists" catalogs "github.com/databricks/cli/cmd/workspace/catalogs" clean_rooms "github.com/databricks/cli/cmd/workspace/clean-rooms" cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" clusters "github.com/databricks/cli/cmd/workspace/clusters" connections "github.com/databricks/cli/cmd/workspace/connections" current_user "github.com/databricks/cli/cmd/workspace/current-user" + dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" dashboards "github.com/databricks/cli/cmd/workspace/dashboards" data_sources "github.com/databricks/cli/cmd/workspace/data-sources" experiments "github.com/databricks/cli/cmd/workspace/experiments" @@ -26,14 +28,17 @@ import ( libraries "github.com/databricks/cli/cmd/workspace/libraries" metastores "github.com/databricks/cli/cmd/workspace/metastores" model_registry "github.com/databricks/cli/cmd/workspace/model-registry" + model_versions "github.com/databricks/cli/cmd/workspace/model-versions" permissions "github.com/databricks/cli/cmd/workspace/permissions" pipelines "github.com/databricks/cli/cmd/workspace/pipelines" policy_families "github.com/databricks/cli/cmd/workspace/policy-families" providers "github.com/databricks/cli/cmd/workspace/providers" queries "github.com/databricks/cli/cmd/workspace/queries" query_history "github.com/databricks/cli/cmd/workspace/query-history" + query_visualizations "github.com/databricks/cli/cmd/workspace/query-visualizations" recipient_activation "github.com/databricks/cli/cmd/workspace/recipient-activation" recipients "github.com/databricks/cli/cmd/workspace/recipients" + registered_models "github.com/databricks/cli/cmd/workspace/registered-models" repos "github.com/databricks/cli/cmd/workspace/repos" schemas "github.com/databricks/cli/cmd/workspace/schemas" secrets "github.com/databricks/cli/cmd/workspace/secrets" @@ -59,12 +64,14 @@ func All() []*cobra.Command { var out []*cobra.Command out = append(out, alerts.New()) + out = append(out, artifact_allowlists.New()) out = append(out, catalogs.New()) out = append(out, clean_rooms.New()) out = append(out, cluster_policies.New()) out = append(out, clusters.New()) out = append(out, connections.New()) out = append(out, current_user.New()) + out = append(out, dashboard_widgets.New()) out = append(out, dashboards.New()) out = append(out, data_sources.New()) out = append(out, experiments.New()) @@ -81,14 +88,17 @@ func All() []*cobra.Command { out = append(out, libraries.New()) out = append(out, metastores.New()) out = append(out, model_registry.New()) + out = append(out, model_versions.New()) out = append(out, permissions.New()) out = append(out, pipelines.New()) out = append(out, policy_families.New()) out = append(out, providers.New()) out = append(out, queries.New()) out = append(out, query_history.New()) + out = append(out, query_visualizations.New()) out = append(out, recipient_activation.New()) out = append(out, recipients.New()) + out = append(out, registered_models.New()) out = append(out, repos.New()) out = append(out, schemas.New()) out = append(out, secrets.New()) diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index 7783b9eb..c25825c9 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -65,7 +65,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&createReq.Owner, "owner", createReq.Owner, `Username of current owner of the connection.`) - // TODO: map via StringToStringVar: properties_kvpairs + // TODO: map via StringToStringVar: properties cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `If the connection is read only.`) cmd.Use = "create" diff --git a/cmd/workspace/dashboard-widgets/dashboard-widgets.go b/cmd/workspace/dashboard-widgets/dashboard-widgets.go new file mode 100755 index 00000000..63e8d120 --- /dev/null +++ b/cmd/workspace/dashboard-widgets/dashboard-widgets.go @@ -0,0 +1,228 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package dashboard_widgets + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "dashboard-widgets", + Short: `This is an evolving API that facilitates the addition and removal of widgets from existing dashboards within the Databricks Workspace.`, + Long: `This is an evolving API that facilitates the addition and removal of widgets + from existing dashboards within the Databricks Workspace. Data structures may + change over time.`, + GroupID: "sql", + Annotations: map[string]string{ + "package": "sql", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *sql.CreateWidget, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq sql.CreateWidget + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Add widget to a dashboard.` + cmd.Long = `Add widget to a dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.DashboardWidgets.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newCreate()) + }) +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *sql.DeleteDashboardWidgetRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq sql.DeleteDashboardWidgetRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Remove widget.` + cmd.Long = `Remove widget.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Id = args[0] + + err = w.DashboardWidgets.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDelete()) + }) +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *sql.CreateWidget, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq sql.CreateWidget + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update existing widget.` + cmd.Long = `Update existing widget.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.DashboardWidgets.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdate()) + }) +} + +// end service DashboardWidgets diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index 3c48dc1b..2335ee28 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -58,14 +58,14 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "create" + cmd.Use = "create NAME" cmd.Short = `Create a dashboard object.` cmd.Long = `Create a dashboard object.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { check = cobra.ExactArgs(0) } diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 1e2ff9fa..13087029 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -352,6 +352,86 @@ func init() { }) } +// start delete-runs command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteRunsOverrides []func( + *cobra.Command, + *ml.DeleteRuns, +) + +func newDeleteRuns() *cobra.Command { + cmd := &cobra.Command{} + + var deleteRunsReq ml.DeleteRuns + var deleteRunsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&deleteRunsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().IntVar(&deleteRunsReq.MaxRuns, "max-runs", deleteRunsReq.MaxRuns, `An optional positive integer indicating the maximum number of runs to delete.`) + + cmd.Use = "delete-runs EXPERIMENT_ID MAX_TIMESTAMP_MILLIS" + cmd.Short = `Delete runs by creation time.` + cmd.Long = `Delete runs by creation time. + + Bulk delete runs in an experiment that were created prior to or at the + specified timestamp. Deletes at most max_runs per request.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = deleteRunsJson.Unmarshal(&deleteRunsReq) + if err != nil { + return err + } + } else { + deleteRunsReq.ExperimentId = args[0] + _, err = fmt.Sscan(args[1], &deleteRunsReq.MaxTimestampMillis) + if err != nil { + return fmt.Errorf("invalid MAX_TIMESTAMP_MILLIS: %s", args[1]) + } + } + + response, err := w.Experiments.DeleteRuns(ctx, deleteRunsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteRunsOverrides { + fn(cmd, &deleteRunsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDeleteRuns()) + }) +} + // start delete-tag command // Slice with functions to override default command behavior. @@ -598,7 +678,7 @@ func newGetHistory() *cobra.Command { getHistoryReq.MetricKey = args[0] - response, err := w.Experiments.GetHistory(ctx, getHistoryReq) + response, err := w.Experiments.GetHistoryAll(ctx, getHistoryReq) if err != nil { return err } @@ -623,6 +703,129 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *ml.GetExperimentPermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq ml.GetExperimentPermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels EXPERIMENT_ID" + cmd.Short = `Get experiment permission levels.` + cmd.Long = `Get experiment permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPermissionLevelsReq.ExperimentId = args[0] + + response, err := w.Experiments.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *ml.GetExperimentPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq ml.GetExperimentPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions EXPERIMENT_ID" + cmd.Short = `Get experiment permissions.` + cmd.Long = `Get experiment permissions. + + Gets the permissions of an experiment. Experiments can inherit permissions + from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPermissionsReq.ExperimentId = args[0] + + response, err := w.Experiments.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start get-run command // Slice with functions to override default command behavior. @@ -1428,6 +1631,86 @@ func init() { }) } +// start restore-runs command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var restoreRunsOverrides []func( + *cobra.Command, + *ml.RestoreRuns, +) + +func newRestoreRuns() *cobra.Command { + cmd := &cobra.Command{} + + var restoreRunsReq ml.RestoreRuns + var restoreRunsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&restoreRunsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().IntVar(&restoreRunsReq.MaxRuns, "max-runs", restoreRunsReq.MaxRuns, `An optional positive integer indicating the maximum number of runs to restore.`) + + cmd.Use = "restore-runs EXPERIMENT_ID MIN_TIMESTAMP_MILLIS" + cmd.Short = `Restore runs by deletion time.` + cmd.Long = `Restore runs by deletion time. + + Bulk restore runs in an experiment that were deleted no earlier than the + specified timestamp. Restores at most max_runs per request.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = restoreRunsJson.Unmarshal(&restoreRunsReq) + if err != nil { + return err + } + } else { + restoreRunsReq.ExperimentId = args[0] + _, err = fmt.Sscan(args[1], &restoreRunsReq.MinTimestampMillis) + if err != nil { + return fmt.Errorf("invalid MIN_TIMESTAMP_MILLIS: %s", args[1]) + } + } + + response, err := w.Experiments.RestoreRuns(ctx, restoreRunsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range restoreRunsOverrides { + fn(cmd, &restoreRunsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newRestoreRuns()) + }) +} + // start search-experiments command // Slice with functions to override default command behavior. @@ -1662,6 +1945,78 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *ml.ExperimentPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq ml.ExperimentPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions EXPERIMENT_ID" + cmd.Short = `Set experiment permissions.` + cmd.Long = `Set experiment permissions. + + Sets permissions on an experiment. Experiments can inherit permissions from + their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + setPermissionsReq.ExperimentId = args[0] + + response, err := w.Experiments.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + // start set-tag command // Slice with functions to override default command behavior. @@ -1815,6 +2170,78 @@ func init() { }) } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *ml.ExperimentPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq ml.ExperimentPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions EXPERIMENT_ID" + cmd.Short = `Update experiment permissions.` + cmd.Long = `Update experiment permissions. + + Updates the permissions on an experiment. Experiments can inherit permissions + from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + updatePermissionsReq.ExperimentId = args[0] + + response, err := w.Experiments.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // start update-run command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index 48a9c9c6..3f46abbc 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -368,7 +368,7 @@ func newPatch() *cobra.Command { cmd.Flags().Var(&patchJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: Operations - // TODO: array: schema + // TODO: array: schemas cmd.Use = "patch ID" cmd.Short = `Update group details.` diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 2a95437f..9e7805ae 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -77,7 +77,6 @@ func newCreate() *cobra.Command { cmd.Flags().BoolVar(&createReq.EnableElasticDisk, "enable-elastic-disk", createReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`) // TODO: complex arg: gcp_attributes cmd.Flags().IntVar(&createReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", createReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`) - // TODO: complex arg: instance_pool_fleet_attributes cmd.Flags().IntVar(&createReq.MaxCapacity, "max-capacity", createReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`) cmd.Flags().IntVar(&createReq.MinIdleInstances, "min-idle-instances", createReq.MinIdleInstances, `Minimum number of idle instances to keep in the instance pool.`) // TODO: array: preloaded_docker_images @@ -247,7 +246,6 @@ func newEdit() *cobra.Command { cmd.Flags().BoolVar(&editReq.EnableElasticDisk, "enable-elastic-disk", editReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`) // TODO: complex arg: gcp_attributes cmd.Flags().IntVar(&editReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", editReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`) - // TODO: complex arg: instance_pool_fleet_attributes cmd.Flags().IntVar(&editReq.MaxCapacity, "max-capacity", editReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`) cmd.Flags().IntVar(&editReq.MinIdleInstances, "min-idle-instances", editReq.MinIdleInstances, `Minimum number of idle instances to keep in the instance pool.`) // TODO: array: preloaded_docker_images @@ -383,6 +381,153 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *compute.GetInstancePoolPermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq compute.GetInstancePoolPermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels INSTANCE_POOL_ID" + cmd.Short = `Get instance pool permission levels.` + cmd.Long = `Get instance pool permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No INSTANCE_POOL_ID argument specified. Loading names for Instance Pools drop-down." + names, err := w.InstancePools.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Instance Pools drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The instance pool for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the instance pool for which to get or manage permissions") + } + getPermissionLevelsReq.InstancePoolId = args[0] + + response, err := w.InstancePools.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *compute.GetInstancePoolPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq compute.GetInstancePoolPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions INSTANCE_POOL_ID" + cmd.Short = `Get instance pool permissions.` + cmd.Long = `Get instance pool permissions. + + Gets the permissions of an instance pool. Instance pools can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No INSTANCE_POOL_ID argument specified. Loading names for Instance Pools drop-down." + names, err := w.InstancePools.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Instance Pools drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The instance pool for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the instance pool for which to get or manage permissions") + } + getPermissionsReq.InstancePoolId = args[0] + + response, err := w.InstancePools.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start list command // Slice with functions to override default command behavior. @@ -431,4 +576,172 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *compute.InstancePoolPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq compute.InstancePoolPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions INSTANCE_POOL_ID" + cmd.Short = `Set instance pool permissions.` + cmd.Long = `Set instance pool permissions. + + Sets permissions on an instance pool. Instance pools can inherit permissions + from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No INSTANCE_POOL_ID argument specified. Loading names for Instance Pools drop-down." + names, err := w.InstancePools.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Instance Pools drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The instance pool for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the instance pool for which to get or manage permissions") + } + setPermissionsReq.InstancePoolId = args[0] + + response, err := w.InstancePools.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *compute.InstancePoolPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq compute.InstancePoolPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions INSTANCE_POOL_ID" + cmd.Short = `Update instance pool permissions.` + cmd.Long = `Update instance pool permissions. + + Updates the permissions on an instance pool. Instance pools can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No INSTANCE_POOL_ID argument specified. Loading names for Instance Pools drop-down." + names, err := w.InstancePools.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Instance Pools drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The instance pool for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the instance pool for which to get or manage permissions") + } + updatePermissionsReq.InstancePoolId = args[0] + + response, err := w.InstancePools.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service InstancePools diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 640f4001..7670ebb7 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -646,6 +646,153 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *jobs.GetJobPermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq jobs.GetJobPermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels JOB_ID" + cmd.Short = `Get job permission levels.` + cmd.Long = `Get job permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down." + names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The job for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the job for which to get or manage permissions") + } + getPermissionLevelsReq.JobId = args[0] + + response, err := w.Jobs.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *jobs.GetJobPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq jobs.GetJobPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions JOB_ID" + cmd.Short = `Get job permissions.` + cmd.Long = `Get job permissions. + + Gets the permissions of a job. Jobs can inherit permissions from their root + object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down." + names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The job for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the job for which to get or manage permissions") + } + getPermissionsReq.JobId = args[0] + + response, err := w.Jobs.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start get-run command // Slice with functions to override default command behavior. @@ -1285,6 +1432,90 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *jobs.JobPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq jobs.JobPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions JOB_ID" + cmd.Short = `Set job permissions.` + cmd.Long = `Set job permissions. + + Sets permissions on a job. Jobs can inherit permissions from their root + object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down." + names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The job for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the job for which to get or manage permissions") + } + setPermissionsReq.JobId = args[0] + + response, err := w.Jobs.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + // start submit command // Slice with functions to override default command behavior. @@ -1484,4 +1715,88 @@ func init() { }) } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *jobs.JobPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq jobs.JobPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions JOB_ID" + cmd.Short = `Update job permissions.` + cmd.Long = `Update job permissions. + + Updates the permissions on a job. Jobs can inherit permissions from their root + object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down." + names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The job for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the job for which to get or manage permissions") + } + updatePermissionsReq.JobId = args[0] + + response, err := w.Jobs.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service Jobs diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index e230bfb0..92671dc3 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -154,7 +154,7 @@ func newClusterStatus() *cobra.Command { clusterStatusReq.ClusterId = args[0] - response, err := w.Libraries.ClusterStatus(ctx, clusterStatusReq) + response, err := w.Libraries.ClusterStatusAll(ctx, clusterStatusReq) if err != nil { return err } diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index d944e0d9..e2e55225 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -1226,6 +1226,129 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *ml.GetRegisteredModelPermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq ml.GetRegisteredModelPermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels REGISTERED_MODEL_ID" + cmd.Short = `Get registered model permission levels.` + cmd.Long = `Get registered model permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPermissionLevelsReq.RegisteredModelId = args[0] + + response, err := w.ModelRegistry.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *ml.GetRegisteredModelPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq ml.GetRegisteredModelPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions REGISTERED_MODEL_ID" + cmd.Short = `Get registered model permissions.` + cmd.Long = `Get registered model permissions. + + Gets the permissions of a registered model. Registered models can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPermissionsReq.RegisteredModelId = args[0] + + response, err := w.ModelRegistry.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start list-models command // Slice with functions to override default command behavior. @@ -1902,6 +2025,78 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *ml.RegisteredModelPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq ml.RegisteredModelPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions REGISTERED_MODEL_ID" + cmd.Short = `Set registered model permissions.` + cmd.Long = `Set registered model permissions. + + Sets permissions on a registered model. Registered models can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + setPermissionsReq.RegisteredModelId = args[0] + + response, err := w.ModelRegistry.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + // start test-registry-webhook command // Slice with functions to override default command behavior. @@ -2292,6 +2487,78 @@ func init() { }) } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *ml.RegisteredModelPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq ml.RegisteredModelPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions REGISTERED_MODEL_ID" + cmd.Short = `Update registered model permissions.` + cmd.Long = `Update registered model permissions. + + Updates the permissions on a registered model. Registered models can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + updatePermissionsReq.RegisteredModelId = args[0] + + response, err := w.ModelRegistry.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // start update-webhook command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go new file mode 100755 index 00000000..f62cddab --- /dev/null +++ b/cmd/workspace/model-versions/model-versions.go @@ -0,0 +1,400 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package model_versions + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "model-versions", + Short: `Databricks provides a hosted version of MLflow Model Registry in Unity Catalog.`, + Long: `Databricks provides a hosted version of MLflow Model Registry in Unity + Catalog. Models in Unity Catalog provide centralized access control, auditing, + lineage, and discovery of ML models across Databricks workspaces. + + This API reference documents the REST endpoints for managing model versions in + Unity Catalog. For more details, see the [registered models API + docs](/api/workspace/registeredmodels).`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *catalog.DeleteModelVersionRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq catalog.DeleteModelVersionRequest + + // TODO: short flags + + cmd.Use = "delete FULL_NAME VERSION" + cmd.Short = `Delete a Model Version.` + cmd.Long = `Delete a Model Version. + + Deletes a model version from the specified registered model. Any aliases + assigned to the model version will also be deleted. + + The caller must be a metastore admin or an owner of the parent registered + model. For the latter case, the caller must also be the owner or have the + **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + privilege on the parent schema.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.FullName = args[0] + _, err = fmt.Sscan(args[1], &deleteReq.Version) + if err != nil { + return fmt.Errorf("invalid VERSION: %s", args[1]) + } + + err = w.ModelVersions.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDelete()) + }) +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *catalog.GetModelVersionRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq catalog.GetModelVersionRequest + + // TODO: short flags + + cmd.Use = "get FULL_NAME VERSION" + cmd.Short = `Get a Model Version.` + cmd.Long = `Get a Model Version. + + Get a model version. + + The caller must be a metastore admin or an owner of (or have the **EXECUTE** + privilege on) the parent registered model. For the latter case, the caller + must also be the owner or have the **USE_CATALOG** privilege on the parent + catalog and the **USE_SCHEMA** privilege on the parent schema.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.FullName = args[0] + _, err = fmt.Sscan(args[1], &getReq.Version) + if err != nil { + return fmt.Errorf("invalid VERSION: %s", args[1]) + } + + response, err := w.ModelVersions.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGet()) + }) +} + +// start get-by-alias command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getByAliasOverrides []func( + *cobra.Command, + *catalog.GetByAliasRequest, +) + +func newGetByAlias() *cobra.Command { + cmd := &cobra.Command{} + + var getByAliasReq catalog.GetByAliasRequest + + // TODO: short flags + + cmd.Use = "get-by-alias FULL_NAME ALIAS" + cmd.Short = `Get Model Version By Alias.` + cmd.Long = `Get Model Version By Alias. + + Get a model version by alias. + + The caller must be a metastore admin or an owner of (or have the **EXECUTE** + privilege on) the registered model. For the latter case, the caller must also + be the owner or have the **USE_CATALOG** privilege on the parent catalog and + the **USE_SCHEMA** privilege on the parent schema.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getByAliasReq.FullName = args[0] + getByAliasReq.Alias = args[1] + + response, err := w.ModelVersions.GetByAlias(ctx, getByAliasReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getByAliasOverrides { + fn(cmd, &getByAliasReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetByAlias()) + }) +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *catalog.ListModelVersionsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq catalog.ListModelVersionsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Max number of model versions to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`) + + cmd.Use = "list FULL_NAME" + cmd.Short = `List Model Versions.` + cmd.Long = `List Model Versions. + + List model versions. You can list model versions under a particular schema, or + list all model versions in the current metastore. + + The returned models are filtered based on the privileges of the calling user. + For example, the metastore admin is able to list all the model versions. A + regular user needs to be the owner or have the **EXECUTE** privilege on the + parent registered model to recieve the model versions in the response. For the + latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema. + + There is no guarantee of a specific ordering of the elements in the response.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.FullName = args[0] + + response, err := w.ModelVersions.ListAll(ctx, listReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newList()) + }) +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *catalog.UpdateModelVersionRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq catalog.UpdateModelVersionRequest + + // TODO: short flags + + cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the model version.`) + + cmd.Use = "update FULL_NAME VERSION" + cmd.Short = `Update a Model Version.` + cmd.Long = `Update a Model Version. + + Updates the specified model version. + + The caller must be a metastore admin or an owner of the parent registered + model. For the latter case, the caller must also be the owner or have the + **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + privilege on the parent schema. + + Currently only the comment of the model version can be updated.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + updateReq.FullName = args[0] + _, err = fmt.Sscan(args[1], &updateReq.Version) + if err != nil { + return fmt.Errorf("invalid VERSION: %s", args[1]) + } + + response, err := w.ModelVersions.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdate()) + }) +} + +// end service ModelVersions diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index 39454b24..c168a1a4 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -19,7 +19,52 @@ func New() *cobra.Command { Use: "permissions", Short: `Permissions API are used to create read, write, edit, update and manage access for various users on different objects and endpoints.`, Long: `Permissions API are used to create read, write, edit, update and manage access - for various users on different objects and endpoints.`, + for various users on different objects and endpoints. + + * **[Cluster permissions](:service:clusters)** — Manage which users can + manage, restart, or attach to clusters. + + * **[Cluster policy permissions](:service:clusterpolicies)** — Manage which + users can use cluster policies. + + * **[Delta Live Tables pipeline permissions](:service:pipelines)** — Manage + which users can view, manage, run, cancel, or own a Delta Live Tables + pipeline. + + * **[Job permissions](:service:jobs)** — Manage which users can view, + manage, trigger, cancel, or own a job. + + * **[MLflow experiment permissions](:service:experiments)** — Manage which + users can read, edit, or manage MLflow experiments. + + * **[MLflow registered model permissions](:service:modelregistry)** — Manage + which users can read, edit, or manage MLflow registered models. + + * **[Password permissions](:service:users)** — Manage which users can use + password login when SSO is enabled. + + * **[Instance Pool permissions](:service:instancepools)** — Manage which + users can manage or attach to pools. + + * **[Repo permissions](repos)** — Manage which users can read, run, edit, or + manage a repo. + + * **[Serving endpoint permissions](:service:servingendpoints)** — Manage + which users can view, query, or manage a serving endpoint. + + * **[SQL warehouse permissions](:service:warehouses)** — Manage which users + can use or manage SQL warehouses. + + * **[Token permissions](:service:tokenmanagement)** — Manage which users can + create or use tokens. + + * **[Workspace object permissions](:service:workspace)** — Manage which + users can read, run, edit, or manage directories, files, and notebooks. + + For the mapping of the required permissions for specific actions or abilities + and other important information, see [Access Control]. + + [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html`, GroupID: "iam", Annotations: map[string]string{ "package": "iam", @@ -54,8 +99,8 @@ func newGet() *cobra.Command { cmd.Short = `Get object permissions.` cmd.Long = `Get object permissions. - Gets the permission of an object. Objects can inherit permissions from their - parent objects or root objects.` + Gets the permissions of an object. Objects can inherit permissions from their + parent objects or root object.` cmd.Annotations = make(map[string]string) @@ -114,8 +159,8 @@ func newGetPermissionLevels() *cobra.Command { // TODO: short flags cmd.Use = "get-permission-levels REQUEST_OBJECT_TYPE REQUEST_OBJECT_ID" - cmd.Short = `Get permission levels.` - cmd.Long = `Get permission levels. + cmd.Short = `Get object permission levels.` + cmd.Long = `Get object permission levels. Gets the permission levels that a user can have on an object.` @@ -180,11 +225,11 @@ func newSet() *cobra.Command { // TODO: array: access_control_list cmd.Use = "set REQUEST_OBJECT_TYPE REQUEST_OBJECT_ID" - cmd.Short = `Set permissions.` - cmd.Long = `Set permissions. + cmd.Short = `Set object permissions.` + cmd.Long = `Set object permissions. - Sets permissions on object. Objects can inherit permissions from their parent - objects and root objects.` + Sets permissions on an object. Objects can inherit permissions from their + parent objects or root object.` cmd.Annotations = make(map[string]string) @@ -207,11 +252,11 @@ func newSet() *cobra.Command { setReq.RequestObjectType = args[0] setReq.RequestObjectId = args[1] - err = w.Permissions.Set(ctx, setReq) + response, err := w.Permissions.Set(ctx, setReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -253,10 +298,11 @@ func newUpdate() *cobra.Command { // TODO: array: access_control_list cmd.Use = "update REQUEST_OBJECT_TYPE REQUEST_OBJECT_ID" - cmd.Short = `Update permission.` - cmd.Long = `Update permission. + cmd.Short = `Update object permissions.` + cmd.Long = `Update object permissions. - Updates the permissions on an object.` + Updates the permissions on an object. Objects can inherit permissions from + their parent objects or root object.` cmd.Annotations = make(map[string]string) @@ -279,11 +325,11 @@ func newUpdate() *cobra.Command { updateReq.RequestObjectType = args[0] updateReq.RequestObjectId = args[1] - err = w.Permissions.Update(ctx, updateReq) + response, err := w.Permissions.Update(ctx, updateReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 708343b2..10bcc226 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -272,6 +272,153 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *pipelines.GetPipelinePermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq pipelines.GetPipelinePermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels PIPELINE_ID" + cmd.Short = `Get pipeline permission levels.` + cmd.Long = `Get pipeline permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down." + names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The pipeline for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the pipeline for which to get or manage permissions") + } + getPermissionLevelsReq.PipelineId = args[0] + + response, err := w.Pipelines.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *pipelines.GetPipelinePermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq pipelines.GetPipelinePermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions PIPELINE_ID" + cmd.Short = `Get pipeline permissions.` + cmd.Long = `Get pipeline permissions. + + Gets the permissions of a pipeline. Pipelines can inherit permissions from + their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down." + names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The pipeline for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the pipeline for which to get or manage permissions") + } + getPermissionsReq.PipelineId = args[0] + + response, err := w.Pipelines.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start get-update command // Slice with functions to override default command behavior. @@ -664,6 +811,90 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *pipelines.PipelinePermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq pipelines.PipelinePermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions PIPELINE_ID" + cmd.Short = `Set pipeline permissions.` + cmd.Long = `Set pipeline permissions. + + Sets permissions on a pipeline. Pipelines can inherit permissions from their + root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down." + names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The pipeline for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the pipeline for which to get or manage permissions") + } + setPermissionsReq.PipelineId = args[0] + + response, err := w.Pipelines.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + // start start-update command // Slice with functions to override default command behavior. @@ -942,4 +1173,88 @@ func init() { }) } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *pipelines.PipelinePermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq pipelines.PipelinePermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions PIPELINE_ID" + cmd.Short = `Update pipeline permissions.` + cmd.Long = `Update pipeline permissions. + + Updates the permissions on a pipeline. Pipelines can inherit permissions from + their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down." + names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The pipeline for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the pipeline for which to get or manage permissions") + } + updatePermissionsReq.PipelineId = args[0] + + response, err := w.Pipelines.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service Pipelines diff --git a/cmd/workspace/query-visualizations/query-visualizations.go b/cmd/workspace/query-visualizations/query-visualizations.go new file mode 100755 index 00000000..fae0f934 --- /dev/null +++ b/cmd/workspace/query-visualizations/query-visualizations.go @@ -0,0 +1,236 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package query_visualizations + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "query-visualizations", + Short: `This is an evolving API that facilitates the addition and removal of vizualisations from existing queries within the Databricks Workspace.`, + Long: `This is an evolving API that facilitates the addition and removal of + vizualisations from existing queries within the Databricks Workspace. Data + structures may change over time.`, + GroupID: "sql", + Annotations: map[string]string{ + "package": "sql", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *sql.CreateQueryVisualizationRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq sql.CreateQueryVisualizationRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Add visualization to a query.` + cmd.Long = `Add visualization to a query.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.QueryVisualizations.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newCreate()) + }) +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *sql.DeleteQueryVisualizationRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq sql.DeleteQueryVisualizationRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Remove visualization.` + cmd.Long = `Remove visualization.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Id = args[0] + + err = w.QueryVisualizations.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDelete()) + }) +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *sql.Visualization, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq sql.Visualization + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Edit existing visualization.` + cmd.Long = `Edit existing visualization.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.QueryVisualizations.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdate()) + }) +} + +// end service QueryVisualizations diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go new file mode 100755 index 00000000..cc782253 --- /dev/null +++ b/cmd/workspace/registered-models/registered-models.go @@ -0,0 +1,635 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package registered_models + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "registered-models", + Short: `Databricks provides a hosted version of MLflow Model Registry in Unity Catalog.`, + Long: `Databricks provides a hosted version of MLflow Model Registry in Unity + Catalog. Models in Unity Catalog provide centralized access control, auditing, + lineage, and discovery of ML models across Databricks workspaces. + + An MLflow registered model resides in the third layer of Unity Catalog’s + three-level namespace. Registered models contain model versions, which + correspond to actual ML models (MLflow models). Creating new model versions + currently requires use of the MLflow Python client. Once model versions are + created, you can load them for batch inference using MLflow Python client + APIs, or deploy them for real-time serving using Databricks Model Serving. + + All operations on registered models and model versions require USE_CATALOG + permissions on the enclosing catalog and USE_SCHEMA permissions on the + enclosing schema. In addition, the following additional privileges are + required for various operations: + + * To create a registered model, users must additionally have the CREATE_MODEL + permission on the target schema. * To view registered model or model version + metadata, model version data files, or invoke a model version, users must + additionally have the EXECUTE permission on the registered model * To update + registered model or model version tags, users must additionally have APPLY TAG + permissions on the registered model * To update other registered model or + model version metadata (comments, aliases) create a new model version, or + update permissions on the registered model, users must be owners of the + registered model. + + Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. + tagging, grants) that specify a securable type, use "FUNCTION" as the + securable type.`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *catalog.CreateRegisteredModelRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq catalog.CreateRegisteredModelRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `The comment attached to the registered model.`) + cmd.Flags().StringVar(&createReq.StorageLocation, "storage-location", createReq.StorageLocation, `The storage location on the cloud under which model version data files are stored.`) + + cmd.Use = "create CATALOG_NAME SCHEMA_NAME NAME" + cmd.Short = `Create a Registered Model.` + cmd.Long = `Create a Registered Model. + + Creates a new registered model in Unity Catalog. + + File storage for model versions in the registered model will be located in the + default location which is specified by the parent schema, or the parent + catalog, or the Metastore. + + For registered model creation to succeed, the user must satisfy the following + conditions: - The caller must be a metastore admin, or be the owner of the + parent catalog and schema, or have the **USE_CATALOG** privilege on the parent + catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller + must have the **CREATE MODEL** or **CREATE FUNCTION** privilege on the parent + schema.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(3) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + createReq.CatalogName = args[0] + createReq.SchemaName = args[1] + createReq.Name = args[2] + } + + response, err := w.RegisteredModels.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newCreate()) + }) +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *catalog.DeleteRegisteredModelRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq catalog.DeleteRegisteredModelRequest + + // TODO: short flags + + cmd.Use = "delete FULL_NAME" + cmd.Short = `Delete a Registered Model.` + cmd.Long = `Delete a Registered Model. + + Deletes a registered model and all its model versions from the specified + parent catalog and schema. + + The caller must be a metastore admin or an owner of the registered model. For + the latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No FULL_NAME argument specified. Loading names for Registered Models drop-down." + names, err := w.RegisteredModels.RegisteredModelInfoNameToFullNameMap(ctx, catalog.ListRegisteredModelsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Registered Models drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The three-level (fully qualified) name of the registered model") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the three-level (fully qualified) name of the registered model") + } + deleteReq.FullName = args[0] + + err = w.RegisteredModels.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDelete()) + }) +} + +// start delete-alias command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteAliasOverrides []func( + *cobra.Command, + *catalog.DeleteAliasRequest, +) + +func newDeleteAlias() *cobra.Command { + cmd := &cobra.Command{} + + var deleteAliasReq catalog.DeleteAliasRequest + + // TODO: short flags + + cmd.Use = "delete-alias FULL_NAME ALIAS" + cmd.Short = `Delete a Registered Model Alias.` + cmd.Long = `Delete a Registered Model Alias. + + Deletes a registered model alias. + + The caller must be a metastore admin or an owner of the registered model. For + the latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteAliasReq.FullName = args[0] + deleteAliasReq.Alias = args[1] + + err = w.RegisteredModels.DeleteAlias(ctx, deleteAliasReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteAliasOverrides { + fn(cmd, &deleteAliasReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDeleteAlias()) + }) +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *catalog.GetRegisteredModelRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq catalog.GetRegisteredModelRequest + + // TODO: short flags + + cmd.Use = "get FULL_NAME" + cmd.Short = `Get a Registered Model.` + cmd.Long = `Get a Registered Model. + + Get a registered model. + + The caller must be a metastore admin or an owner of (or have the **EXECUTE** + privilege on) the registered model. For the latter case, the caller must also + be the owner or have the **USE_CATALOG** privilege on the parent catalog and + the **USE_SCHEMA** privilege on the parent schema.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No FULL_NAME argument specified. Loading names for Registered Models drop-down." + names, err := w.RegisteredModels.RegisteredModelInfoNameToFullNameMap(ctx, catalog.ListRegisteredModelsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Registered Models drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The three-level (fully qualified) name of the registered model") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the three-level (fully qualified) name of the registered model") + } + getReq.FullName = args[0] + + response, err := w.RegisteredModels.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGet()) + }) +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *catalog.ListRegisteredModelsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq catalog.ListRegisteredModelsRequest + var listJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&listReq.CatalogName, "catalog-name", listReq.CatalogName, `The identifier of the catalog under which to list registered models.`) + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Max number of registered models to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`) + cmd.Flags().StringVar(&listReq.SchemaName, "schema-name", listReq.SchemaName, `The identifier of the schema under which to list registered models.`) + + cmd.Use = "list" + cmd.Short = `List Registered Models.` + cmd.Long = `List Registered Models. + + List registered models. You can list registered models under a particular + schema, or list all registered models in the current metastore. + + The returned models are filtered based on the privileges of the calling user. + For example, the metastore admin is able to list all the registered models. A + regular user needs to be the owner or have the **EXECUTE** privilege on the + registered model to recieve the registered models in the response. For the + latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema. + + There is no guarantee of a specific ordering of the elements in the response.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = listJson.Unmarshal(&listReq) + if err != nil { + return err + } + } else { + } + + response, err := w.RegisteredModels.ListAll(ctx, listReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newList()) + }) +} + +// start set-alias command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setAliasOverrides []func( + *cobra.Command, + *catalog.SetRegisteredModelAliasRequest, +) + +func newSetAlias() *cobra.Command { + cmd := &cobra.Command{} + + var setAliasReq catalog.SetRegisteredModelAliasRequest + var setAliasJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setAliasJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "set-alias FULL_NAME ALIAS VERSION_NUM" + cmd.Short = `Set a Registered Model Alias.` + cmd.Long = `Set a Registered Model Alias. + + Set an alias on the specified registered model. + + The caller must be a metastore admin or an owner of the registered model. For + the latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(3) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setAliasJson.Unmarshal(&setAliasReq) + if err != nil { + return err + } + } else { + setAliasReq.FullName = args[0] + setAliasReq.Alias = args[1] + _, err = fmt.Sscan(args[2], &setAliasReq.VersionNum) + if err != nil { + return fmt.Errorf("invalid VERSION_NUM: %s", args[2]) + } + } + + response, err := w.RegisteredModels.SetAlias(ctx, setAliasReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setAliasOverrides { + fn(cmd, &setAliasReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetAlias()) + }) +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *catalog.UpdateRegisteredModelRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq catalog.UpdateRegisteredModelRequest + + // TODO: short flags + + cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the registered model.`) + cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the registered model.`) + cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the registered model.`) + + cmd.Use = "update FULL_NAME" + cmd.Short = `Update a Registered Model.` + cmd.Long = `Update a Registered Model. + + Updates the specified registered model. + + The caller must be a metastore admin or an owner of the registered model. For + the latter case, the caller must also be the owner or have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema. + + Currently only the name, the owner or the comment of the registered model can + be updated.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No FULL_NAME argument specified. Loading names for Registered Models drop-down." + names, err := w.RegisteredModels.RegisteredModelInfoNameToFullNameMap(ctx, catalog.ListRegisteredModelsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Registered Models drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The three-level (fully qualified) name of the registered model") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the three-level (fully qualified) name of the registered model") + } + updateReq.FullName = args[0] + + response, err := w.RegisteredModels.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdate()) + }) +} + +// end service RegisteredModels diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index b1e00371..2d510e90 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -275,6 +275,153 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *workspace.GetRepoPermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq workspace.GetRepoPermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels REPO_ID" + cmd.Short = `Get repo permission levels.` + cmd.Long = `Get repo permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No REPO_ID argument specified. Loading names for Repos drop-down." + names, err := w.Repos.RepoInfoPathToIdMap(ctx, workspace.ListReposRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Repos drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The repo for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the repo for which to get or manage permissions") + } + getPermissionLevelsReq.RepoId = args[0] + + response, err := w.Repos.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *workspace.GetRepoPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq workspace.GetRepoPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions REPO_ID" + cmd.Short = `Get repo permissions.` + cmd.Long = `Get repo permissions. + + Gets the permissions of a repo. Repos can inherit permissions from their root + object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No REPO_ID argument specified. Loading names for Repos drop-down." + names, err := w.Repos.RepoInfoPathToIdMap(ctx, workspace.ListReposRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Repos drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The repo for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the repo for which to get or manage permissions") + } + getPermissionsReq.RepoId = args[0] + + response, err := w.Repos.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start list command // Slice with functions to override default command behavior. @@ -351,6 +498,90 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *workspace.RepoPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq workspace.RepoPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions REPO_ID" + cmd.Short = `Set repo permissions.` + cmd.Long = `Set repo permissions. + + Sets permissions on a repo. Repos can inherit permissions from their root + object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No REPO_ID argument specified. Loading names for Repos drop-down." + names, err := w.Repos.RepoInfoPathToIdMap(ctx, workspace.ListReposRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Repos drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The repo for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the repo for which to get or manage permissions") + } + setPermissionsReq.RepoId = args[0] + + response, err := w.Repos.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + // start update command // Slice with functions to override default command behavior. @@ -440,4 +671,88 @@ func init() { }) } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *workspace.RepoPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq workspace.RepoPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions REPO_ID" + cmd.Short = `Update repo permissions.` + cmd.Long = `Update repo permissions. + + Updates the permissions on a repo. Repos can inherit permissions from their + root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No REPO_ID argument specified. Loading names for Repos drop-down." + names, err := w.Repos.RepoInfoPathToIdMap(ctx, workspace.ListReposRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Repos drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The repo for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the repo for which to get or manage permissions") + } + updatePermissionsReq.RepoId = args[0] + + response, err := w.Repos.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service Repos diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index a8b907ac..5425da90 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -427,6 +427,79 @@ func init() { }) } +// start get-secret command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getSecretOverrides []func( + *cobra.Command, + *workspace.GetSecretRequest, +) + +func newGetSecret() *cobra.Command { + cmd := &cobra.Command{} + + var getSecretReq workspace.GetSecretRequest + + // TODO: short flags + + cmd.Use = "get-secret SCOPE KEY" + cmd.Short = `Get a secret.` + cmd.Long = `Get a secret. + + Gets the bytes representation of a secret value for the specified scope and + key. + + Users need the READ permission to make this call. + + Note that the secret value returned is in bytes. The interpretation of the + bytes is determined by the caller in DBUtils and the type the data is decoded + into. + + Throws PERMISSION_DENIED if the user does not have permission to make this + API call. Throws RESOURCE_DOES_NOT_EXIST if no such secret or secret scope + exists.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getSecretReq.Scope = args[0] + getSecretReq.Key = args[1] + + response, err := w.Secrets.GetSecret(ctx, getSecretReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getSecretOverrides { + fn(cmd, &getSecretReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetSecret()) + }) +} + // start list-acls command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index f30a92d4..1958dd21 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -367,7 +367,7 @@ func newPatch() *cobra.Command { cmd.Flags().Var(&patchJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: Operations - // TODO: array: schema + // TODO: array: schemas cmd.Use = "patch ID" cmd.Short = `Update service principal details.` diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 33b0abac..e22a3844 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -374,6 +374,129 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *serving.GetServingEndpointPermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq serving.GetServingEndpointPermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels SERVING_ENDPOINT_ID" + cmd.Short = `Get serving endpoint permission levels.` + cmd.Long = `Get serving endpoint permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPermissionLevelsReq.ServingEndpointId = args[0] + + response, err := w.ServingEndpoints.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *serving.GetServingEndpointPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq serving.GetServingEndpointPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions SERVING_ENDPOINT_ID" + cmd.Short = `Get serving endpoint permissions.` + cmd.Long = `Get serving endpoint permissions. + + Gets the permissions of a serving endpoint. Serving endpoints can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPermissionsReq.ServingEndpointId = args[0] + + response, err := w.ServingEndpoints.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start list command // Slice with functions to override default command behavior. @@ -542,6 +665,78 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *serving.ServingEndpointPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq serving.ServingEndpointPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions SERVING_ENDPOINT_ID" + cmd.Short = `Set serving endpoint permissions.` + cmd.Long = `Set serving endpoint permissions. + + Sets permissions on a serving endpoint. Serving endpoints can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + setPermissionsReq.ServingEndpointId = args[0] + + response, err := w.ServingEndpoints.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + // start update-config command // Slice with functions to override default command behavior. @@ -630,4 +825,76 @@ func init() { }) } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *serving.ServingEndpointPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq serving.ServingEndpointPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions SERVING_ENDPOINT_ID" + cmd.Short = `Update serving endpoint permissions.` + cmd.Long = `Update serving endpoint permissions. + + Updates the permissions on a serving endpoint. Serving endpoints can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + updatePermissionsReq.ServingEndpointId = args[0] + + response, err := w.ServingEndpoints.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service ServingEndpoints diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index dcee2f0e..b934e264 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -262,6 +262,103 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "get-permission-levels" + cmd.Short = `Get token permission levels.` + cmd.Long = `Get token permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.TokenManagement.GetPermissionLevels(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "get-permissions" + cmd.Short = `Get token permissions.` + cmd.Long = `Get token permissions. + + Gets the permissions of all tokens. Tokens can inherit permissions from their + root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.TokenManagement.GetPermissions(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start list command // Slice with functions to override default command behavior. @@ -337,4 +434,154 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *settings.TokenPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq settings.TokenPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions" + cmd.Short = `Set token permissions.` + cmd.Long = `Set token permissions. + + Sets permissions on all tokens. Tokens can inherit permissions from their root + object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } else { + } + + response, err := w.TokenManagement.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *settings.TokenPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq settings.TokenPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions" + cmd.Short = `Update token permissions.` + cmd.Long = `Update token permissions. + + Updates the permissions on all tokens. Tokens can inherit permissions from + their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } else { + } + + response, err := w.TokenManagement.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service TokenManagement diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index 2dfbf6e8..b1a8b057 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -276,6 +276,103 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "get-permission-levels" + cmd.Short = `Get password permission levels.` + cmd.Long = `Get password permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.Users.GetPermissionLevels(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "get-permissions" + cmd.Short = `Get password permissions.` + cmd.Long = `Get password permissions. + + Gets the permissions of all passwords. Passwords can inherit permissions from + their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.Users.GetPermissions(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start list command // Slice with functions to override default command behavior. @@ -375,7 +472,7 @@ func newPatch() *cobra.Command { cmd.Flags().Var(&patchJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: Operations - // TODO: array: schema + // TODO: array: schemas cmd.Use = "patch ID" cmd.Short = `Update user details.` @@ -441,6 +538,81 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *iam.PasswordPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq iam.PasswordPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions" + cmd.Short = `Set password permissions.` + cmd.Long = `Set password permissions. + + Sets permissions on all passwords. Passwords can inherit permissions from + their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } else { + } + + response, err := w.Users.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + // start update command // Slice with functions to override default command behavior. @@ -534,4 +706,79 @@ func init() { }) } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *iam.PasswordPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq iam.PasswordPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions" + cmd.Short = `Update password permissions.` + cmd.Long = `Update password permissions. + + Updates the permissions on all passwords. Passwords can inherit permissions + from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } else { + } + + response, err := w.Users.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service Users diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 2d202682..4dbfc585 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -32,9 +32,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "catalog", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Apply optional overrides to this command. @@ -66,7 +63,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `The comment attached to the volume.`) cmd.Flags().StringVar(&createReq.StorageLocation, "storage-location", createReq.StorageLocation, `The storage location on the cloud.`) - cmd.Use = "create CATALOG_NAME NAME SCHEMA_NAME VOLUME_TYPE" + cmd.Use = "create CATALOG_NAME SCHEMA_NAME NAME VOLUME_TYPE" cmd.Short = `Create a Volume.` cmd.Long = `Create a Volume. @@ -111,8 +108,8 @@ func newCreate() *cobra.Command { } } else { createReq.CatalogName = args[0] - createReq.Name = args[1] - createReq.SchemaName = args[2] + createReq.SchemaName = args[1] + createReq.Name = args[2] _, err = fmt.Sscan(args[3], &createReq.VolumeType) if err != nil { return fmt.Errorf("invalid VOLUME_TYPE: %s", args[3]) @@ -253,6 +250,9 @@ func newList() *cobra.Command { There is no guarantee of a specific ordering of the elements in the array.` + // This command is being previewed; hide from help output. + cmd.Hidden = true + cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index 378b931d..c64e0e0b 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -418,6 +418,153 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *sql.GetWarehousePermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq sql.GetWarehousePermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels WAREHOUSE_ID" + cmd.Short = `Get SQL warehouse permission levels.` + cmd.Long = `Get SQL warehouse permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No WAREHOUSE_ID argument specified. Loading names for Warehouses drop-down." + names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The SQL warehouse for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the sql warehouse for which to get or manage permissions") + } + getPermissionLevelsReq.WarehouseId = args[0] + + response, err := w.Warehouses.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *sql.GetWarehousePermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq sql.GetWarehousePermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions WAREHOUSE_ID" + cmd.Short = `Get SQL warehouse permissions.` + cmd.Long = `Get SQL warehouse permissions. + + Gets the permissions of a SQL warehouse. SQL warehouses can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No WAREHOUSE_ID argument specified. Loading names for Warehouses drop-down." + names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The SQL warehouse for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the sql warehouse for which to get or manage permissions") + } + getPermissionsReq.WarehouseId = args[0] + + response, err := w.Warehouses.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start get-workspace-warehouse-config command // Slice with functions to override default command behavior. @@ -541,6 +688,90 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *sql.WarehousePermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq sql.WarehousePermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions WAREHOUSE_ID" + cmd.Short = `Set SQL warehouse permissions.` + cmd.Long = `Set SQL warehouse permissions. + + Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions + from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No WAREHOUSE_ID argument specified. Loading names for Warehouses drop-down." + names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The SQL warehouse for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the sql warehouse for which to get or manage permissions") + } + setPermissionsReq.WarehouseId = args[0] + + response, err := w.Warehouses.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + // start set-workspace-warehouse-config command // Slice with functions to override default command behavior. @@ -818,4 +1049,88 @@ func init() { }) } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *sql.WarehousePermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq sql.WarehousePermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions WAREHOUSE_ID" + cmd.Short = `Update SQL warehouse permissions.` + cmd.Long = `Update SQL warehouse permissions. + + Updates the permissions on a SQL warehouse. SQL warehouses can inherit + permissions from their root object.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No WAREHOUSE_ID argument specified. Loading names for Warehouses drop-down." + names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The SQL warehouse for which to get or manage permissions") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the sql warehouse for which to get or manage permissions") + } + updatePermissionsReq.WarehouseId = args[0] + + response, err := w.Warehouses.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service Warehouses diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index aeca9525..124680f0 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -212,6 +212,131 @@ func init() { }) } +// start get-permission-levels command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionLevelsOverrides []func( + *cobra.Command, + *workspace.GetWorkspaceObjectPermissionLevelsRequest, +) + +func newGetPermissionLevels() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionLevelsReq workspace.GetWorkspaceObjectPermissionLevelsRequest + + // TODO: short flags + + cmd.Use = "get-permission-levels WORKSPACE_OBJECT_TYPE WORKSPACE_OBJECT_ID" + cmd.Short = `Get workspace object permission levels.` + cmd.Long = `Get workspace object permission levels. + + Gets the permission levels that a user can have on an object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPermissionLevelsReq.WorkspaceObjectType = args[0] + getPermissionLevelsReq.WorkspaceObjectId = args[1] + + response, err := w.Workspace.GetPermissionLevels(ctx, getPermissionLevelsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissionLevels()) + }) +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *workspace.GetWorkspaceObjectPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq workspace.GetWorkspaceObjectPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions WORKSPACE_OBJECT_TYPE WORKSPACE_OBJECT_ID" + cmd.Short = `Get workspace object permissions.` + cmd.Long = `Get workspace object permissions. + + Gets the permissions of a workspace object. Workspace objects can inherit + permissions from their parent objects or root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPermissionsReq.WorkspaceObjectType = args[0] + getPermissionsReq.WorkspaceObjectId = args[1] + + response, err := w.Workspace.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPermissions()) + }) +} + // start get-status command // Slice with functions to override default command behavior. @@ -507,4 +632,150 @@ func init() { }) } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *workspace.WorkspaceObjectPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq workspace.WorkspaceObjectPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions WORKSPACE_OBJECT_TYPE WORKSPACE_OBJECT_ID" + cmd.Short = `Set workspace object permissions.` + cmd.Long = `Set workspace object permissions. + + Sets permissions on a workspace object. Workspace objects can inherit + permissions from their parent objects or root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + setPermissionsReq.WorkspaceObjectType = args[0] + setPermissionsReq.WorkspaceObjectId = args[1] + + response, err := w.Workspace.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newSetPermissions()) + }) +} + +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *workspace.WorkspaceObjectPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq workspace.WorkspaceObjectPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions WORKSPACE_OBJECT_TYPE WORKSPACE_OBJECT_ID" + cmd.Short = `Update workspace object permissions.` + cmd.Long = `Update workspace object permissions. + + Updates the permissions on a workspace object. Workspace objects can inherit + permissions from their parent objects or root object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + updatePermissionsReq.WorkspaceObjectType = args[0] + updatePermissionsReq.WorkspaceObjectId = args[1] + + response, err := w.Workspace.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdatePermissions()) + }) +} + // end service Workspace diff --git a/go.mod b/go.mod index 9534a4c9..7e24b0db 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.14.1 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.19.0 // Apache 2.0 github.com/fatih/color v1.15.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.3.0 // BSD-3-Clause @@ -32,7 +32,7 @@ require ( ) require ( - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect @@ -42,7 +42,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/s2a-go v0.1.4 // indirect + github.com/google/s2a-go v0.1.5 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -54,10 +54,10 @@ require ( golang.org/x/net v0.14.0 // indirect golang.org/x/sys v0.11.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/api v0.131.0 // indirect + google.golang.org/api v0.138.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/grpc v1.56.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect + google.golang.org/grpc v1.57.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index b8c90e5e..83bb01b6 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -36,8 +36,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/databricks/databricks-sdk-go v0.14.1 h1:s9x18c2i6XbJxem6zKdTrrwEUXQX/Nzn0iVM+qGlRus= -github.com/databricks/databricks-sdk-go v0.14.1/go.mod h1:0iuEtPIoD6oqw7OuFbPskhlEryt2FPH+Ies1UYiiDy8= +github.com/databricks/databricks-sdk-go v0.19.0 h1:Xh5A90/+8ehW7fTqoQbQK5xZu7a/akv3Xwv8UdWB4GU= +github.com/databricks/databricks-sdk-go v0.19.0/go.mod h1:Bt/3i3ry/rQdE6Y+psvkAENlp+LzJHaQK5PsLIstQb4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -93,8 +93,8 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg= +github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -257,8 +257,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.131.0 h1:AcgWS2edQ4chVEt/SxgDKubVu/9/idCJy00tBGuGB4M= -google.golang.org/api v0.131.0/go.mod h1:7vtkbKv2REjJbxmHSkBTBQ5LUGvPdAqjjvt84XAfhpA= +google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0= +google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= @@ -267,8 +267,8 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130 h1:2FZP5XuJY9zQyGM5N0rtovnoXjiMUEIUMvw0m9wlpLc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -277,8 +277,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/internal/fs_mkdir_test.go b/internal/fs_mkdir_test.go index 83417c13..b743ebb7 100644 --- a/internal/fs_mkdir_test.go +++ b/internal/fs_mkdir_test.go @@ -112,6 +112,6 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { // assert run fails _, _, err = RequireErrorRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "hello")) // Different cloud providers return different errors. - regex := regexp.MustCompile(`^Path is a file: .*$|^Cannot create directory .* because .* is an existing file\.$|^mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$`) + regex := regexp.MustCompile(`(^|: )Path is a file: .*$|^Cannot create directory .* because .* is an existing file\.$|^mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$`) assert.Regexp(t, regex, err.Error()) } diff --git a/internal/sync_test.go b/internal/sync_test.go index 66b5fd3c..bc1cbd91 100644 --- a/internal/sync_test.go +++ b/internal/sync_test.go @@ -159,7 +159,7 @@ func (a *syncTest) remoteFileContent(ctx context.Context, relativePath string, e var res []byte a.c.Eventually(func() bool { - err = apiClient.Do(ctx, http.MethodGet, urlPath, nil, &res) + err = apiClient.Do(ctx, http.MethodGet, urlPath, nil, nil, &res) require.NoError(a.t, err) actualContent := string(res) return actualContent == expectedContent diff --git a/libs/filer/files_client.go b/libs/filer/files_client.go index 285338b6..17884d57 100644 --- a/libs/filer/files_client.go +++ b/libs/filer/files_client.go @@ -104,11 +104,8 @@ func (w *FilesClient) Write(ctx context.Context, name string, reader io.Reader, overwrite := slices.Contains(mode, OverwriteIfExists) urlPath = fmt.Sprintf("%s?overwrite=%t", urlPath, overwrite) - err = w.apiClient.Do(ctx, http.MethodPut, urlPath, reader, nil, - func(r *http.Request) error { - r.Header.Set("Content-Type", "application/octet-stream") - return nil - }) + headers := map[string]string{"Content-Type": "application/octet-stream"} + err = w.apiClient.Do(ctx, http.MethodPut, urlPath, headers, reader, nil) // Return early on success. if err == nil { @@ -136,7 +133,7 @@ func (w *FilesClient) Read(ctx context.Context, name string) (io.ReadCloser, err } var buf bytes.Buffer - err = w.apiClient.Do(ctx, http.MethodGet, urlPath, nil, &buf) + err = w.apiClient.Do(ctx, http.MethodGet, urlPath, nil, nil, &buf) // Return early on success. if err == nil { @@ -168,7 +165,7 @@ func (w *FilesClient) Delete(ctx context.Context, name string, mode ...DeleteMod return CannotDeleteRootError{} } - err = w.apiClient.Do(ctx, http.MethodDelete, urlPath, nil, nil) + err = w.apiClient.Do(ctx, http.MethodDelete, urlPath, nil, nil, nil) // Return early on success. if err == nil { @@ -210,11 +207,7 @@ func (w *FilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error return nil, err } - err = w.apiClient.Do(ctx, http.MethodHead, urlPath, nil, nil, - func(r *http.Request) error { - r.Header.Del("Content-Type") - return nil - }) + err = w.apiClient.Do(ctx, http.MethodHead, urlPath, nil, nil, nil) // If the HEAD requests succeeds, the file exists. if err == nil { diff --git a/libs/filer/workspace_files_client.go b/libs/filer/workspace_files_client.go index ed4ad7a2..41e35d9d 100644 --- a/libs/filer/workspace_files_client.go +++ b/libs/filer/workspace_files_client.go @@ -115,7 +115,7 @@ func (w *WorkspaceFilesClient) Write(ctx context.Context, name string, reader io return err } - err = w.apiClient.Do(ctx, http.MethodPost, urlPath, body, nil) + err = w.apiClient.Do(ctx, http.MethodPost, urlPath, nil, body, nil) // Return early on success. if err == nil { From 7a130a3e6e4302e835a19dc281816a31f37ee0d6 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 5 Sep 2023 11:58:45 +0200 Subject: [PATCH 055/310] Group permission related commands (#730) ## Changes Before: ``` Usage: databricks instance-pools [command] Available Commands: create Create a new instance pool. delete Delete an instance pool. edit Edit an existing instance pool. get Get instance pool information. get-permission-levels Get instance pool permission levels. get-permissions Get instance pool permissions. list List instance pool info. set-permissions Set instance pool permissions. update-permissions Update instance pool permissions. ``` After: ``` Usage: databricks instance-pools [command] Available Commands create Create a new instance pool. delete Delete an instance pool. edit Edit an existing instance pool. get Get instance pool information. list List instance pool info. Permission Commands get-permission-levels Get instance pool permission levels. get-permissions Get instance pool permissions. set-permissions Set instance pool permissions. update-permissions Update instance pool permissions. ``` ## Tests Manual. --- cmd/cmd.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/cmd/cmd.go b/cmd/cmd.go index 04d7cc80..032fde5c 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1,6 +1,8 @@ package cmd import ( + "strings" + "github.com/databricks/cli/cmd/account" "github.com/databricks/cli/cmd/api" "github.com/databricks/cli/cmd/auth" @@ -14,6 +16,11 @@ import ( "github.com/spf13/cobra" ) +const ( + mainGroup = "main" + permissionsGroup = "permissions" +) + func New() *cobra.Command { cli := root.New() @@ -22,6 +29,31 @@ func New() *cobra.Command { // Add workspace subcommands. for _, cmd := range workspace.All() { + // Built-in groups for the workspace commands. + groups := []cobra.Group{ + { + ID: mainGroup, + Title: "Available Commands", + }, + { + ID: permissionsGroup, + Title: "Permission Commands", + }, + } + for i := range groups { + cmd.AddGroup(&groups[i]) + } + + // Order the permissions subcommands after the main commands. + for _, sub := range cmd.Commands() { + switch { + case strings.HasSuffix(sub.Name(), "-permissions"), strings.HasSuffix(sub.Name(), "-permission-levels"): + sub.GroupID = permissionsGroup + default: + sub.GroupID = mainGroup + } + } + cli.AddCommand(cmd) } From 2f2386ef5a505c7391e75096736fa3e99894d7a4 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 5 Sep 2023 11:58:56 +0200 Subject: [PATCH 056/310] Work on GitHub Action (#733) ## Changes * Run the build workflow on push to main to properly use the build cache Same as https://github.com/databricks/databricks-sdk-go/pull/601. ## Tests n/a --- .github/workflows/push.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 6194d490..f0fa2ee6 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -5,6 +5,14 @@ on: types: [opened, synchronize] merge_group: types: [checks_requested] + push: + # Always run on push to main. The build cache can only be reused + # if it was saved by a run from the repository's default branch. + # The run result will be identical to that from the merge queue + # because the commit is identical, yet we need to perform it to + # seed the build cache. + branches: + - main jobs: tests: From bbbeabf98ca8805086d2180c94f6ea29eb12a2ac Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 5 Sep 2023 13:08:25 +0200 Subject: [PATCH 057/310] Add support for ordering of input prompts (#662) ## Changes JSON schema properties are a map and thus unordered. This PR introduces a JSON schema extension field called `order` to allow template authors to define the order in which template variables should be resolved/prompted. ## Tests Unit tests. --------- Co-authored-by: Pieter Noordhuis --- libs/jsonschema/extension.go | 14 +++++++ libs/jsonschema/schema.go | 3 ++ libs/jsonschema/schema_order.go | 57 ++++++++++++++++++++++++++ libs/jsonschema/schema_order_test.go | 60 ++++++++++++++++++++++++++++ libs/template/config.go | 5 ++- 5 files changed, 138 insertions(+), 1 deletion(-) create mode 100644 libs/jsonschema/extension.go create mode 100644 libs/jsonschema/schema_order.go create mode 100644 libs/jsonschema/schema_order_test.go diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go new file mode 100644 index 00000000..bbbde695 --- /dev/null +++ b/libs/jsonschema/extension.go @@ -0,0 +1,14 @@ +package jsonschema + +// Extension defines our custom JSON schema extensions. +// +// JSON schema supports custom extensions through vocabularies: +// https://json-schema.org/understanding-json-schema/reference/schema.html#vocabularies. +// We don't (yet?) define a meta-schema for the extensions below. +// It's not a big issue because the reach/scope of these extensions is limited. +type Extension struct { + // Order defines the order of a field with respect to other fields. + // If not defined, the field is ordered alphabetically after all fields + // that do have an order defined. + Order *int `json:"order,omitempty"` +} diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index c0d1736c..87e9acd5 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -40,6 +40,9 @@ type Schema struct { // Default value for the property / object Default any `json:"default,omitempty"` + + // Extension embeds our custom JSON schema extensions. + Extension } type Type string diff --git a/libs/jsonschema/schema_order.go b/libs/jsonschema/schema_order.go new file mode 100644 index 00000000..3bc3e7d0 --- /dev/null +++ b/libs/jsonschema/schema_order.go @@ -0,0 +1,57 @@ +package jsonschema + +import ( + "slices" + "strings" +) + +// Property defines a single property of a struct schema. +// This type is not used in the schema itself but rather to +// return the pair of a property name and its schema. +type Property struct { + Name string + Schema *Schema +} + +// OrderedProperties returns the properties of the schema ordered according +// to the value of their `order` extension. If this extension is not set, the +// properties are ordered alphabetically. +func (s *Schema) OrderedProperties() []Property { + order := make(map[string]*int) + out := make([]Property, 0, len(s.Properties)) + for key, property := range s.Properties { + order[key] = property.Order + out = append(out, Property{ + Name: key, + Schema: property, + }) + } + + // Sort the properties by order and then by name. + slices.SortFunc(out, func(a, b Property) int { + oa := order[a.Name] + ob := order[b.Name] + cmp := 0 + switch { + case oa != nil && ob != nil: + // Compare the order values if both are set. + cmp = *oa - *ob + case oa == nil && ob != nil: + // If only one is set, the one that is set comes first. + cmp = 1 + case oa != nil && ob == nil: + // If only one is set, the one that is set comes first. + cmp = -1 + } + + // If we have a non-zero comparison, return it. + if cmp != 0 { + return cmp + } + + // If the order is the same, compare by name. + return strings.Compare(a.Name, b.Name) + }) + + return out +} diff --git a/libs/jsonschema/schema_order_test.go b/libs/jsonschema/schema_order_test.go new file mode 100644 index 00000000..56d4d635 --- /dev/null +++ b/libs/jsonschema/schema_order_test.go @@ -0,0 +1,60 @@ +package jsonschema + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOrderedProperties(t *testing.T) { + newInt := func(i int) *int { + return &i + } + + s := Schema{ + Properties: map[string]*Schema{ + "bbb": { + Type: StringType, + }, + "ccc": { + Type: StringType, + }, + "ddd": { + Type: StringType, + }, + "zzz1": { + Type: StringType, + Extension: Extension{ + Order: newInt(-1), + }, + }, + "zzz2": { + Type: StringType, + Extension: Extension{ + Order: newInt(-2), + }, + }, + "aaa1": { + Type: StringType, + Extension: Extension{ + Order: newInt(1), + }, + }, + "aaa2": { + Type: StringType, + Extension: Extension{ + Order: newInt(2), + }, + }, + }, + } + + // Test that the properties are ordered by order and then by name. + properties := s.OrderedProperties() + names := make([]string, len(properties)) + for i, property := range properties { + names[i] = property.Name + } + + assert.Equal(t, []string{"zzz2", "zzz1", "aaa1", "aaa2", "bbb", "ccc", "ddd"}, names) +} diff --git a/libs/template/config.go b/libs/template/config.go index 302a1361..8a1ed6c8 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -117,7 +117,10 @@ func (c *config) assignDefaultValues() error { // Prompts user for values for properties that do not have a value set yet func (c *config) promptForValues() error { - for name, property := range c.schema.Properties { + for _, p := range c.schema.OrderedProperties() { + name := p.Name + property := p.Schema + // Config already has a value assigned if _, ok := c.values[name]; ok { continue From f62def3e77459cd0717d1ad04192c3162932930d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 5 Sep 2023 13:10:37 +0200 Subject: [PATCH 058/310] Replace API call to test configuration with dummy authenticate call (#728) ## Changes This reduces the latency of every workspace command by the duration of a single API call to retrieve the current user (which can take up to a full second). Note: the better place to verify that a request can be authenticated is the SDK itself. ## Tests * Unit test to confirm an the empty `*http.Request` can be constructed * Manually confirmed that the additional API call no longer happens --- cmd/root/auth.go | 26 +++++++++++++------------- cmd/root/auth_test.go | 14 ++++++++++++++ 2 files changed, 27 insertions(+), 13 deletions(-) create mode 100644 cmd/root/auth_test.go diff --git a/cmd/root/auth.go b/cmd/root/auth.go index e56074ef..d4c9a31b 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "net/http" "os" "github.com/databricks/cli/bundle" @@ -11,7 +12,6 @@ import ( "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" - "github.com/databricks/databricks-sdk-go/service/iam" "github.com/manifoldco/promptui" "github.com/spf13/cobra" ) @@ -19,7 +19,6 @@ import ( // Placeholders to use as unique keys in context.Context. var workspaceClient int var accountClient int -var currentUser int func initProfileFlag(cmd *cobra.Command) { cmd.PersistentFlags().StringP("profile", "p", "", "~/.databrickscfg profile") @@ -94,8 +93,7 @@ TRY_AUTH: // or try picking a config profile dynamically if err != nil { return err } - // get current user identity also to verify validity of configuration - me, err := w.CurrentUser.Me(ctx) + err = w.Config.Authenticate(emptyHttpRequest(ctx)) if cmdio.IsInteractive(ctx) && errors.Is(err, config.ErrCannotConfigureAuth) { profile, err := askForWorkspaceProfile() if err != nil { @@ -107,7 +105,6 @@ TRY_AUTH: // or try picking a config profile dynamically if err != nil { return err } - ctx = context.WithValue(ctx, ¤tUser, me) ctx = context.WithValue(ctx, &workspaceClient, w) cmd.SetContext(ctx) return nil @@ -194,6 +191,17 @@ func askForAccountProfile() (string, error) { return profiles[i].Name, nil } +// To verify that a client is configured correctly, we pass an empty HTTP request +// to a client's `config.Authenticate` function. Note: this functionality +// should be supported by the SDK itself. +func emptyHttpRequest(ctx context.Context) *http.Request { + req, err := http.NewRequestWithContext(ctx, "", "", nil) + if err != nil { + panic(err) + } + return req +} + func WorkspaceClient(ctx context.Context) *databricks.WorkspaceClient { w, ok := ctx.Value(&workspaceClient).(*databricks.WorkspaceClient) if !ok { @@ -209,11 +217,3 @@ func AccountClient(ctx context.Context) *databricks.AccountClient { } return a } - -func Me(ctx context.Context) *iam.User { - me, ok := ctx.Value(¤tUser).(*iam.User) - if !ok { - panic("cannot get current user. Please report it as a bug") - } - return me -} diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go new file mode 100644 index 00000000..75d255b5 --- /dev/null +++ b/cmd/root/auth_test.go @@ -0,0 +1,14 @@ +package root + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEmptyHttpRequest(t *testing.T) { + ctx, _ := context.WithCancel(context.Background()) + req := emptyHttpRequest(ctx) + assert.Equal(t, req.Context(), ctx) +} From 947d5b1e5c45a523d3e66f33982361bae3f75c62 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Tue, 5 Sep 2023 04:20:55 -0700 Subject: [PATCH 059/310] Fix IsServicePrincipal() only working for workspace admins (#732) ## Changes The latest rendition of isServicePrincipal no longer worked for non-admin users as it used the "principals get" API. This new version relies on the property that service principals always have a UUID as their userName. This was tested with the eng-jaws principal (8b948b2e-d2b5-4b9e-8274-11b596f3b652). --- bundle/config/mutator/process_target_mode.go | 5 +---- libs/auth/service_principal.go | 21 ++++++++------------ libs/auth/service_principal_test.go | 19 ++++++++++++++++++ libs/template/helpers.go | 5 +---- 4 files changed, 29 insertions(+), 21 deletions(-) create mode 100644 libs/auth/service_principal_test.go diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index be93512b..06ae7b85 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -160,10 +160,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error { } return transformDevelopmentMode(b) case config.Production: - isPrincipal, err := auth.IsServicePrincipal(ctx, b.WorkspaceClient(), b.Config.Workspace.CurrentUser.Id) - if err != nil { - return err - } + isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.Id) return validateProductionMode(ctx, b, isPrincipal) case "": // No action diff --git a/libs/auth/service_principal.go b/libs/auth/service_principal.go index a6740b50..cb488d16 100644 --- a/libs/auth/service_principal.go +++ b/libs/auth/service_principal.go @@ -1,20 +1,15 @@ package auth import ( - "context" - - "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" + "github.com/google/uuid" ) // Determines whether a given user id is a service principal. -// This function uses a heuristic: if no user exists with this id, we assume -// it's a service principal. Unfortunately, the standard service principal API is too -// slow for our purposes. -func IsServicePrincipal(ctx context.Context, ws *databricks.WorkspaceClient, userId string) (bool, error) { - _, err := ws.Users.GetById(ctx, userId) - if apierr.IsMissing(err) { - return true, nil - } - return false, err +// This function uses a heuristic: if the user id is a UUID, then we assume +// it's a service principal. Unfortunately, the service principal listing API is too +// slow for our purposes. And the "users" and "service principals get" APIs +// only allow access by workspace admins. +func IsServicePrincipal(userId string) bool { + _, err := uuid.Parse(userId) + return err == nil } diff --git a/libs/auth/service_principal_test.go b/libs/auth/service_principal_test.go new file mode 100644 index 00000000..95e8ab5c --- /dev/null +++ b/libs/auth/service_principal_test.go @@ -0,0 +1,19 @@ +package auth + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsServicePrincipal_ValidUUID(t *testing.T) { + userId := "8b948b2e-d2b5-4b9e-8274-11b596f3b652" + isSP := IsServicePrincipal(userId) + assert.True(t, isSP, "Expected user ID to be recognized as a service principal") +} + +func TestIsServicePrincipal_InvalidUUID(t *testing.T) { + userId := "invalid" + isSP := IsServicePrincipal(userId) + assert.False(t, isSP, "Expected user ID to not be recognized as a service principal") +} diff --git a/libs/template/helpers.go b/libs/template/helpers.go index f947d9ba..29abbe21 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -104,10 +104,7 @@ func loadHelpers(ctx context.Context) template.FuncMap { return false, err } } - result, err := auth.IsServicePrincipal(ctx, w, user.Id) - if err != nil { - return false, err - } + result := auth.IsServicePrincipal(user.Id) is_service_principal = &result return result, nil }, From 8c2cc07f7b3649df0cebb539e8fec81fccc07ed5 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Tue, 5 Sep 2023 04:58:34 -0700 Subject: [PATCH 060/310] databricks bundle init template v1 (#686) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This adds a built-in "default-python" template to the CLI. This is based on the new default-template support of https://github.com/databricks/cli/pull/685. The goal here is to offer an experience where customers can simply type `databricks bundle init` to get a default template: ``` $ databricks bundle init Template to use [default-python]: default-python Unique name for this project [my_project]: my_project ✨ Successfully initialized template ``` The present template: - [x] Works well with VS Code - [x] Works well with the workspace - [x] Works well with DB Connect - [x] Uses minimal stubs rather than boiler-plate-heavy examples I'll have a followup with tests + DLT support. --------- Co-authored-by: Andrew Nester Co-authored-by: PaulCornellDB Co-authored-by: Pieter Noordhuis --- cmd/bundle/init.go | 2 +- .../databricks_template_schema.json | 2 +- .../template/{{.project_name}}/.gitignore | 9 +++ .../.vscode/__builtins__.pyi | 3 + .../{{.project_name}}/.vscode/extensions.json | 7 ++ .../{{.project_name}}/.vscode/settings.json | 14 ++++ .../template/{{.project_name}}/README.md | 3 - .../template/{{.project_name}}/README.md.tmpl | 37 +++++++++++ .../{{.project_name}}/databricks.yml.tmpl | 52 +++++++++++++++ .../{{.project_name}}/fixtures/.gitkeep.tmpl | 27 ++++++++ .../template/{{.project_name}}/pytest.ini | 3 + .../resources/{{.project_name}}_job.yml.tmpl | 42 ++++++++++++ .../{{.project_name}}/scratch/README.md | 4 ++ .../scratch/exploration.ipynb | 50 ++++++++++++++ .../template/{{.project_name}}/setup.py.tmpl | 24 +++++++ .../{{.project_name}}/src/notebook.ipynb.tmpl | 65 +++++++++++++++++++ .../src/{{.project_name}}/__init__.py | 1 + .../src/{{.project_name}}/main.py.tmpl | 16 +++++ .../{{.project_name}}/tests/main_test.py.tmpl | 5 ++ 19 files changed, 361 insertions(+), 5 deletions(-) create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/.gitignore create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/.vscode/__builtins__.pyi create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/.vscode/extensions.json create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json delete mode 100644 libs/template/templates/default-python/template/{{.project_name}}/README.md create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/fixtures/.gitkeep.tmpl create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/pytest.ini create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/scratch/README.md create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/__init__.py create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 2127a7bc..bf68e921 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -59,7 +59,7 @@ func newInitCommand() *cobra.Command { } else { return errors.New("please specify a template") - /* TODO: propose to use default-python (once template is ready) + /* TODO: propose to use default-python (once #708 is merged) var err error if !cmdio.IsOutTTY(ctx) || !cmdio.IsInTTY(ctx) { return errors.New("please specify a template") diff --git a/libs/template/templates/default-python/databricks_template_schema.json b/libs/template/templates/default-python/databricks_template_schema.json index b680c5fb..3220e9a6 100644 --- a/libs/template/templates/default-python/databricks_template_schema.json +++ b/libs/template/templates/default-python/databricks_template_schema.json @@ -3,7 +3,7 @@ "project_name": { "type": "string", "default": "my_project", - "description": "Name of the directory" + "description": "Unique name for this project" } } } diff --git a/libs/template/templates/default-python/template/{{.project_name}}/.gitignore b/libs/template/templates/default-python/template/{{.project_name}}/.gitignore new file mode 100644 index 00000000..aa87f019 --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/.gitignore @@ -0,0 +1,9 @@ + +.databricks/ +build/ +dist/ +__pycache__/ +*.egg-info +.venv/ +scratch/** +!scratch/README.md diff --git a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/__builtins__.pyi b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/__builtins__.pyi new file mode 100644 index 00000000..0edd5181 --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/__builtins__.pyi @@ -0,0 +1,3 @@ +# Typings for Pylance in Visual Studio Code +# see https://github.com/microsoft/pyright/blob/main/docs/builtins.md +from databricks.sdk.runtime import * diff --git a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/extensions.json b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/extensions.json new file mode 100644 index 00000000..5d15eba3 --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/extensions.json @@ -0,0 +1,7 @@ +{ + "recommendations": [ + "databricks.databricks", + "ms-python.vscode-pylance", + "redhat.vscode-yaml" + ] +} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json new file mode 100644 index 00000000..16cb2c96 --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json @@ -0,0 +1,14 @@ +{ + "python.analysis.stubPath": ".vscode", + "databricks.python.envFile": "${workspaceFolder}/.env", + "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", + "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", + "python.testing.pytestArgs": [ + "." + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "files.exclude": { + "**/*.egg-info": true + }, +} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/README.md b/libs/template/templates/default-python/template/{{.project_name}}/README.md deleted file mode 100644 index 3187b9ed..00000000 --- a/libs/template/templates/default-python/template/{{.project_name}}/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# {{.project_name}} - -The '{{.project_name}}' bundle was generated using the default-python template. diff --git a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl new file mode 100644 index 00000000..4c89435b --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl @@ -0,0 +1,37 @@ +# {{.project_name}} + +The '{{.project_name}}' project was generated by using the default-python template. + +## Getting started + +1. Install the Databricks CLI from https://docs.databricks.com/dev-tools/cli/databricks-cli.html + +2. Authenticate to your Databricks workspace: + ``` + $ databricks configure + ``` + +3. To deploy a development copy of this project, type: + ``` + $ databricks bundle deploy --target dev + ``` + (Note that "dev" is the default target, so the `--target` parameter + is optional here.) + + This deploys everything that's defined for this project. + For example, the default template would deploy a job called + `[dev yourname] {{.project_name}}-job` to your workspace. + You can find that job by opening your workpace and clicking on **Workflows**. + +4. Similarly, to deploy a production copy, type: + ``` + $ databricks bundle deploy --target prod + ``` + +5. Optionally, install developer tools such as the Databricks extension for Visual Studio Code from + https://docs.databricks.com/dev-tools/vscode-ext.html. Or read the "getting started" documentation for + **Databricks Connect** for instructions on running the included Python code from a different IDE. + +6. For documentation on the Databricks asset bundles format used + for this project, and for CI/CD configuration, see + https://docs.databricks.com/dev-tools/bundles/index.html. diff --git a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl new file mode 100644 index 00000000..48aef0ea --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl @@ -0,0 +1,52 @@ +# This is a Databricks asset bundle definition for {{.project_name}}. +# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation. +bundle: + name: {{.project_name}} + +include: + - resources/*.yml + +targets: + # The 'dev' target, used development purposes. + # Whenever a developer deploys using 'dev', they get their own copy. + dev: + # We use 'mode: development' to make everything deployed to this target gets a prefix + # like '[dev my_user_name]'. Setting this mode also disables any schedules and + # automatic triggers for jobs and enables the 'development' mode for Delta Live Tables pipelines. + mode: development + default: true + workspace: + host: {{workspace_host}} + + # Optionally, there could be a 'staging' target here. + # (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/index.html.) + # + # staging: + # workspace: + # host: {{workspace_host}} + + # The 'prod' target, used for production deployment. + prod: + # For production deployments, we only have a single copy, so we override the + # workspace.root_path default of + # /Users/${workspace.current_user.userName}/.bundle/${bundle.target}/${bundle.name} + # to a path that is not specific to the current user. + {{- /* + Explaining 'mode: production' isn't as pressing as explaining 'mode: development'. + As we already talked about the other mode above, users can just + look at documentation or ask the assistant about 'mode: production'. + # + # By making use of 'mode: production' we enable strict checks + # to make sure we have correctly configured this target. + */}} + mode: production + workspace: + host: {{workspace_host}} + root_path: /Shared/.bundle/prod/${bundle.name} + {{- if not is_service_principal}} + run_as: + # This runs as {{user_name}} in production. Alternatively, + # a service principal could be used here using service_principal_name + # (see Databricks documentation). + user_name: {{user_name}} + {{end -}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/fixtures/.gitkeep.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/fixtures/.gitkeep.tmpl new file mode 100644 index 00000000..361c681f --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/fixtures/.gitkeep.tmpl @@ -0,0 +1,27 @@ +# Fixtures +{{- /* +We don't want to have too many README.md files, since they +stand out so much. But we do need to have a file here to make +sure the folder is added to Git. +*/}} + +This folder is reserved for fixtures, such as CSV files. + +Below is an example of how to load fixtures as a data frame: + +``` +import pandas as pd +import os + +def get_absolute_path(*relative_parts): + if 'dbutils' in globals(): + base_dir = os.path.dirname(dbutils.notebook.entry_point.getDbutils().notebook().getContext().notebookPath().get()) # type: ignore + path = os.path.normpath(os.path.join(base_dir, *relative_parts)) + return path if path.startswith("/Workspace") else os.path.join("/Workspace", path) + else: + return os.path.join(*relative_parts) + +csv_file = get_absolute_path("..", "fixtures", "mycsv.csv") +df = pd.read_csv(csv_file) +display(df) +``` diff --git a/libs/template/templates/default-python/template/{{.project_name}}/pytest.ini b/libs/template/templates/default-python/template/{{.project_name}}/pytest.ini new file mode 100644 index 00000000..80432c22 --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +testpaths = tests +pythonpath = src diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl new file mode 100644 index 00000000..f8116cdf --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl @@ -0,0 +1,42 @@ +# The main job for {{.project_name}} +resources: + + jobs: + {{.project_name}}_job: + name: {{.project_name}}_job + + schedule: + quartz_cron_expression: '44 37 8 * * ?' + timezone_id: Europe/Amsterdam + + {{- if not is_service_principal}} + email_notifications: + on_failure: + - {{user_name}} + {{end -}} + + tasks: + - task_key: notebook_task + job_cluster_key: job_cluster + notebook_task: + notebook_path: ../src/notebook.ipynb + + - task_key: python_wheel_task + depends_on: + - task_key: notebook_task + job_cluster_key: job_cluster + python_wheel_task: + package_name: {{.project_name}} + entry_point: main + libraries: + - whl: ../dist/*.whl + + job_clusters: + - job_cluster_key: job_cluster + new_cluster: + {{- /* we should always use an LTS version in our templates */}} + spark_version: 13.3.x-scala2.12 + node_type_id: {{smallest_node_type}} + autoscale: + min_workers: 1 + max_workers: 4 diff --git a/libs/template/templates/default-python/template/{{.project_name}}/scratch/README.md b/libs/template/templates/default-python/template/{{.project_name}}/scratch/README.md new file mode 100644 index 00000000..e6cfb81b --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/scratch/README.md @@ -0,0 +1,4 @@ +# scratch + +This folder is reserved for personal, exploratory notebooks. +By default these are not committed to Git, as 'scratch' is listed in .gitignore. diff --git a/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb b/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb new file mode 100644 index 00000000..2ee36c3c --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb @@ -0,0 +1,50 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "6bca260b-13d1-448f-8082-30b60a85c9ae", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "import sys\n", + "sys.path.append('../src')\n", + "from project import main\n", + "\n", + "main.taxis.show(10)" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 2 + }, + "notebookName": "ipynb-notebook", + "widgets": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl new file mode 100644 index 00000000..93f4e9ff --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl @@ -0,0 +1,24 @@ +""" +Setup script for {{.project_name}}. + +This script packages and distributes the associated wheel file(s). +Source code is in ./src/. Run 'python setup.py sdist bdist_wheel' to build. +""" +from setuptools import setup, find_packages + +import sys +sys.path.append('./src') + +import {{.project_name}} + +setup( + name="{{.project_name}}", + version={{.project_name}}.__version__, + url="https://databricks.com", + author="{{.user_name}}", + description="my test wheel", + packages=find_packages(where='./src'), + package_dir={'': 'src'}, + entry_points={"entry_points": "main={{.project_name}}.main:main"}, + install_requires=["setuptools"], +) diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl new file mode 100644 index 00000000..26c74303 --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl @@ -0,0 +1,65 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "ee353e42-ff58-4955-9608-12865bd0950e", + "showTitle": false, + "title": "" + } + }, + "source": [ + "# Default notebook\n", + "\n", + "This default notebook is executed using Databricks Workflows as defined in resources/{{.my_project}}_job.yml." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "6bca260b-13d1-448f-8082-30b60a85c9ae", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "from {{.project_name}} import main\n", + "\n", + "main.get_taxis().show(10)\n" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 2 + }, + "notebookName": "notebook", + "widgets": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/__init__.py b/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/__init__.py new file mode 100644 index 00000000..f102a9ca --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/__init__.py @@ -0,0 +1 @@ +__version__ = "0.0.1" diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl new file mode 100644 index 00000000..4fe5ac8f --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl @@ -0,0 +1,16 @@ +{{- /* +We use pyspark.sql rather than DatabricksSession.builder.getOrCreate() +for compatibility with older runtimes. With a new runtime, it's +equivalent to DatabricksSession.builder.getOrCreate(). +*/ -}} +from pyspark.sql import SparkSession + +def get_taxis(): + spark = SparkSession.builder.getOrCreate() + return spark.read.table("samples.nyctaxi.trips") + +def main(): + get_taxis().show(5) + +if __name__ == '__main__': + main() diff --git a/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl new file mode 100644 index 00000000..92afccc6 --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl @@ -0,0 +1,5 @@ +from {{.project_name}} import main + +def test_main(): + taxis = main.get_taxis() + assert taxis.count() == 5 From e533f9109a04295b50b2bc032f4f934e6bb25ead Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Tue, 5 Sep 2023 06:57:01 -0700 Subject: [PATCH 061/310] Show 'databricks bundle init' template in CLI prompt (#725) ~(this should be changed to target `main`)~ This reveals the template from https://github.com/databricks/cli/pull/686 in CLI prompts for once #686 and #708 are merged. --------- Co-authored-by: Andrew Nester Co-authored-by: PaulCornellDB Co-authored-by: Pieter Noordhuis --- cmd/bundle/init.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index bf68e921..9a11eb25 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/template" "github.com/spf13/cobra" @@ -57,9 +58,6 @@ func newInitCommand() *cobra.Command { if len(args) > 0 { templatePath = args[0] } else { - return errors.New("please specify a template") - - /* TODO: propose to use default-python (once #708 is merged) var err error if !cmdio.IsOutTTY(ctx) || !cmdio.IsInTTY(ctx) { return errors.New("please specify a template") @@ -68,7 +66,6 @@ func newInitCommand() *cobra.Command { if err != nil { return err } - */ } if !isRepoUrl(templatePath) { From 9194418ac16310bc24ac25f90845af338f4518bd Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 5 Sep 2023 16:25:26 +0200 Subject: [PATCH 062/310] Fix regex error check in mkdir integration test (#735) ## Changes Fixes test for all cloud provider after the Go SDK bump which introduces the `non retryable error` prefix to errors. The test passes now. --- internal/fs_mkdir_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/fs_mkdir_test.go b/internal/fs_mkdir_test.go index b743ebb7..25117d53 100644 --- a/internal/fs_mkdir_test.go +++ b/internal/fs_mkdir_test.go @@ -112,6 +112,6 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { // assert run fails _, _, err = RequireErrorRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "hello")) // Different cloud providers return different errors. - regex := regexp.MustCompile(`(^|: )Path is a file: .*$|^Cannot create directory .* because .* is an existing file\.$|^mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$`) + regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$`) assert.Regexp(t, regex, err.Error()) } From fabe8e88b8abca3993d40148c966df8d9e924318 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 6 Sep 2023 09:54:35 +0200 Subject: [PATCH 063/310] Include $PATH in set of environment variables to pass along. (#736) ## Changes This is necessary to ensure that our Terraform provider can use the same auxiliary programs (e.g. `az`, or `gcloud`) as the CLI. ## Tests Unit test and manual verification. --- bundle/deploy/terraform/init.go | 8 ++++++++ bundle/deploy/terraform/init_test.go | 2 ++ 2 files changed, 10 insertions(+) diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 6df7b8d4..878c4e8b 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -78,6 +78,14 @@ func inheritEnvVars(env map[string]string) error { env["HOME"] = home } + // Include $PATH in set of environment variables to pass along. + // This is necessary to ensure that our Terraform provider can use the + // same auxiliary programs (e.g. `az`, or `gcloud`) as the CLI. + path, ok := os.LookupEnv("PATH") + if ok { + env["PATH"] = path + } + // Include $TF_CLI_CONFIG_FILE to override terraform provider in development. configFile, ok := os.LookupEnv("TF_CLI_CONFIG_FILE") if ok { diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index 5bb5929e..b9459387 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -277,6 +277,7 @@ func TestInheritEnvVars(t *testing.T) { env := map[string]string{} t.Setenv("HOME", "/home/testuser") + t.Setenv("PATH", "/foo:/bar") t.Setenv("TF_CLI_CONFIG_FILE", "/tmp/config.tfrc") err := inheritEnvVars(env) @@ -285,6 +286,7 @@ func TestInheritEnvVars(t *testing.T) { require.Equal(t, map[string]string{ "HOME": "/home/testuser", + "PATH": "/foo:/bar", "TF_CLI_CONFIG_FILE": "/tmp/config.tfrc", }, env) } From a41b9e8bf2aa0a25898e48cf25f24518d33a5c84 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 6 Sep 2023 10:41:47 +0200 Subject: [PATCH 064/310] Added description for version command (#737) ## Changes Added description for version command ## Tests ``` databricks help ... Additional Commands: account Databricks Account Commands api Perform Databricks API call auth Authentication related commands bundle Databricks Asset Bundles completion Generate the autocompletion script for the specified shell fs Filesystem related commands help Help about any command sync Synchronize a local directory to a workspace directory version Retrieve information about the current version of CLI ``` --------- Co-authored-by: Pieter Noordhuis --- cmd/version/version.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/version/version.go b/cmd/version/version.go index 17bb4b9a..653fbb89 100644 --- a/cmd/version/version.go +++ b/cmd/version/version.go @@ -8,9 +8,9 @@ import ( func New() *cobra.Command { cmd := &cobra.Command{ - Use: "version", - Args: cobra.NoArgs, - + Use: "version", + Args: cobra.NoArgs, + Short: "Retrieve information about the current version of this CLI", Annotations: map[string]string{ "template": "Databricks CLI v{{.Version}}\n", }, From f9e521b43e1e19b5ae52ca1c512f6690204e8b2a Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 6 Sep 2023 11:52:31 +0200 Subject: [PATCH 065/310] databricks bundle init template v2: optional stubs, DLT support (#700) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This follows up on https://github.com/databricks/cli/pull/686. This PR makes our stubs optional + it adds DLT stubs: ``` $ databricks bundle init Template to use [default-python]: default-python Unique name for this project [my_project]: my_project Include a stub (sample) notebook in 'my_project/src' [yes]: yes Include a stub (sample) DLT pipeline in 'my_project/src' [yes]: yes Include a stub (sample) Python package 'my_project/src' [yes]: yes ✨ Successfully initialized template ``` ## Tests Manual testing, matrix tests. --------- Co-authored-by: Andrew Nester Co-authored-by: PaulCornellDB Co-authored-by: Pieter Noordhuis --- bundle/bundle.go | 4 + .../config/mutator/populate_current_user.go | 4 + bundle/deploy/terraform/apply.go | 4 + bundle/deploy/terraform/convert.go | 9 +- bundle/deploy/terraform/convert_test.go | 18 +-- bundle/deploy/terraform/write.go | 3 +- libs/template/helpers.go | 25 ++-- libs/template/renderer.go | 16 ++- libs/template/renderer_test.go | 96 +++++++++++++++ .../databricks_template_schema.json | 27 ++++- .../templates/default-python/defaults.json | 5 +- .../default-python/template/__preamble.tmpl | 38 ++++++ .../template/{{.project_name}}/README.md.tmpl | 15 ++- .../{{.project_name}}/resources/.gitkeep | 1 + .../resources/{{.project_name}}_job.yml.tmpl | 28 ++++- .../{{.project_name}}_pipeline.yml.tmpl | 12 ++ ...ploration.ipynb => exploration.ipynb.tmpl} | 8 +- .../src/dlt_pipeline.ipynb.tmpl | 112 ++++++++++++++++++ .../{{.project_name}}/src/notebook.ipynb.tmpl | 6 +- .../{{.project_name}}/tests/main_test.py.tmpl | 2 +- 20 files changed, 393 insertions(+), 40 deletions(-) create mode 100644 libs/template/templates/default-python/template/__preamble.tmpl create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/resources/.gitkeep create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl rename libs/template/templates/default-python/template/{{.project_name}}/scratch/{exploration.ipynb => exploration.ipynb.tmpl} (84%) create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl diff --git a/bundle/bundle.go b/bundle/bundle.go index d69d5815..8175ce28 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -37,6 +37,10 @@ type Bundle struct { // Stores an initialized copy of this bundle's Terraform wrapper. Terraform *tfexec.Terraform + // Indicates that the Terraform definition based on this bundle is empty, + // i.e. that it would deploy no resources. + TerraformHasNoResources bool + // Stores the locker responsible for acquiring/releasing a deployment lock. Locker *locker.Locker diff --git a/bundle/config/mutator/populate_current_user.go b/bundle/config/mutator/populate_current_user.go index cbaa2d30..bba0457c 100644 --- a/bundle/config/mutator/populate_current_user.go +++ b/bundle/config/mutator/populate_current_user.go @@ -21,6 +21,10 @@ func (m *populateCurrentUser) Name() string { } func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error { + if b.Config.Workspace.CurrentUser != nil { + return nil + } + w := b.WorkspaceClient() me, err := w.CurrentUser.Me(ctx) if err != nil { diff --git a/bundle/deploy/terraform/apply.go b/bundle/deploy/terraform/apply.go index ab868f76..53cffbba 100644 --- a/bundle/deploy/terraform/apply.go +++ b/bundle/deploy/terraform/apply.go @@ -16,6 +16,10 @@ func (w *apply) Name() string { } func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) error { + if b.TerraformHasNoResources { + cmdio.LogString(ctx, "Note: there are no resources to deploy for this bundle") + return nil + } tf := b.Terraform if tf == nil { return fmt.Errorf("terraform not initialized") diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index ac68bd35..41bde91d 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -49,12 +49,14 @@ func convPermission(ac resources.Permission) schema.ResourcePermissionsAccessCon // // NOTE: THIS IS CURRENTLY A HACK. WE NEED A BETTER WAY TO // CONVERT TO/FROM TERRAFORM COMPATIBLE FORMAT. -func BundleToTerraform(config *config.Root) *schema.Root { +func BundleToTerraform(config *config.Root) (*schema.Root, bool) { tfroot := schema.NewRoot() tfroot.Provider = schema.NewProviders() tfroot.Resource = schema.NewResources() + noResources := true for k, src := range config.Resources.Jobs { + noResources = false var dst schema.ResourceJob conv(src, &dst) @@ -100,6 +102,7 @@ func BundleToTerraform(config *config.Root) *schema.Root { } for k, src := range config.Resources.Pipelines { + noResources = false var dst schema.ResourcePipeline conv(src, &dst) @@ -127,6 +130,7 @@ func BundleToTerraform(config *config.Root) *schema.Root { } for k, src := range config.Resources.Models { + noResources = false var dst schema.ResourceMlflowModel conv(src, &dst) tfroot.Resource.MlflowModel[k] = &dst @@ -139,6 +143,7 @@ func BundleToTerraform(config *config.Root) *schema.Root { } for k, src := range config.Resources.Experiments { + noResources = false var dst schema.ResourceMlflowExperiment conv(src, &dst) tfroot.Resource.MlflowExperiment[k] = &dst @@ -150,7 +155,7 @@ func BundleToTerraform(config *config.Root) *schema.Root { } } - return tfroot + return tfroot, noResources } func TerraformToBundle(state *tfjson.State, config *config.Root) error { diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index c47824ec..4d912fbe 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -40,7 +40,7 @@ func TestConvertJob(t *testing.T) { }, } - out := BundleToTerraform(&config) + out, _ := BundleToTerraform(&config) assert.Equal(t, "my job", out.Resource.Job["my_job"].Name) assert.Len(t, out.Resource.Job["my_job"].JobCluster, 1) assert.Equal(t, "https://github.com/foo/bar", out.Resource.Job["my_job"].GitSource.Url) @@ -65,7 +65,7 @@ func TestConvertJobPermissions(t *testing.T) { }, } - out := BundleToTerraform(&config) + out, _ := BundleToTerraform(&config) assert.NotEmpty(t, out.Resource.Permissions["job_my_job"].JobId) assert.Len(t, out.Resource.Permissions["job_my_job"].AccessControl, 1) @@ -101,7 +101,7 @@ func TestConvertJobTaskLibraries(t *testing.T) { }, } - out := BundleToTerraform(&config) + out, _ := BundleToTerraform(&config) assert.Equal(t, "my job", out.Resource.Job["my_job"].Name) require.Len(t, out.Resource.Job["my_job"].Task, 1) require.Len(t, out.Resource.Job["my_job"].Task[0].Library, 1) @@ -135,7 +135,7 @@ func TestConvertPipeline(t *testing.T) { }, } - out := BundleToTerraform(&config) + out, _ := BundleToTerraform(&config) assert.Equal(t, "my pipeline", out.Resource.Pipeline["my_pipeline"].Name) assert.Len(t, out.Resource.Pipeline["my_pipeline"].Library, 2) assert.Nil(t, out.Data) @@ -159,7 +159,7 @@ func TestConvertPipelinePermissions(t *testing.T) { }, } - out := BundleToTerraform(&config) + out, _ := BundleToTerraform(&config) assert.NotEmpty(t, out.Resource.Permissions["pipeline_my_pipeline"].PipelineId) assert.Len(t, out.Resource.Permissions["pipeline_my_pipeline"].AccessControl, 1) @@ -194,7 +194,7 @@ func TestConvertModel(t *testing.T) { }, } - out := BundleToTerraform(&config) + out, _ := BundleToTerraform(&config) assert.Equal(t, "name", out.Resource.MlflowModel["my_model"].Name) assert.Equal(t, "description", out.Resource.MlflowModel["my_model"].Description) assert.Len(t, out.Resource.MlflowModel["my_model"].Tags, 2) @@ -223,7 +223,7 @@ func TestConvertModelPermissions(t *testing.T) { }, } - out := BundleToTerraform(&config) + out, _ := BundleToTerraform(&config) assert.NotEmpty(t, out.Resource.Permissions["mlflow_model_my_model"].RegisteredModelId) assert.Len(t, out.Resource.Permissions["mlflow_model_my_model"].AccessControl, 1) @@ -247,7 +247,7 @@ func TestConvertExperiment(t *testing.T) { }, } - out := BundleToTerraform(&config) + out, _ := BundleToTerraform(&config) assert.Equal(t, "name", out.Resource.MlflowExperiment["my_experiment"].Name) assert.Nil(t, out.Data) } @@ -270,7 +270,7 @@ func TestConvertExperimentPermissions(t *testing.T) { }, } - out := BundleToTerraform(&config) + out, _ := BundleToTerraform(&config) assert.NotEmpty(t, out.Resource.Permissions["mlflow_experiment_my_experiment"].ExperimentId) assert.Len(t, out.Resource.Permissions["mlflow_experiment_my_experiment"].AccessControl, 1) diff --git a/bundle/deploy/terraform/write.go b/bundle/deploy/terraform/write.go index b40a7053..0bf9ab24 100644 --- a/bundle/deploy/terraform/write.go +++ b/bundle/deploy/terraform/write.go @@ -21,7 +21,8 @@ func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - root := BundleToTerraform(&b.Config) + root, noResources := BundleToTerraform(&b.Config) + b.TerraformHasNoResources = noResources f, err := os.Create(filepath.Join(dir, "bundle.tf.json")) if err != nil { return err diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 29abbe21..31752270 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -26,9 +26,10 @@ type pair struct { v any } +var cachedUser *iam.User +var cachedIsServicePrincipal *bool + func loadHelpers(ctx context.Context) template.FuncMap { - var user *iam.User - var is_service_principal *bool w := root.WorkspaceClient(ctx) return template.FuncMap{ "fail": func(format string, args ...any) (any, error) { @@ -80,32 +81,32 @@ func loadHelpers(ctx context.Context) template.FuncMap { return w.Config.Host, nil }, "user_name": func() (string, error) { - if user == nil { + if cachedUser == nil { var err error - user, err = w.CurrentUser.Me(ctx) + cachedUser, err = w.CurrentUser.Me(ctx) if err != nil { return "", err } } - result := user.UserName + result := cachedUser.UserName if result == "" { - result = user.Id + result = cachedUser.Id } return result, nil }, "is_service_principal": func() (bool, error) { - if is_service_principal != nil { - return *is_service_principal, nil + if cachedIsServicePrincipal != nil { + return *cachedIsServicePrincipal, nil } - if user == nil { + if cachedUser == nil { var err error - user, err = w.CurrentUser.Me(ctx) + cachedUser, err = w.CurrentUser.Me(ctx) if err != nil { return false, err } } - result := auth.IsServicePrincipal(user.Id) - is_service_principal = &result + result := auth.IsServicePrincipal(cachedUser.Id) + cachedIsServicePrincipal = &result return result, nil }, } diff --git a/libs/template/renderer.go b/libs/template/renderer.go index f4bd99d2..f674ea0f 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -9,6 +9,7 @@ import ( "path" "path/filepath" "slices" + "sort" "strings" "text/template" @@ -214,17 +215,22 @@ func (r *renderer) walk() error { // Add skip function, which accumulates skip patterns relative to current // directory r.baseTemplate.Funcs(template.FuncMap{ - "skip": func(relPattern string) string { + "skip": func(relPattern string) (string, error) { // patterns are specified relative to current directory of the file // the {{skip}} function is called from. - pattern := path.Join(currentDirectory, relPattern) + patternRaw := path.Join(currentDirectory, relPattern) + pattern, err := r.executeTemplate(patternRaw) + if err != nil { + return "", err + } + if !slices.Contains(r.skipPatterns, pattern) { logger.Infof(r.ctx, "adding skip pattern: %s", pattern) r.skipPatterns = append(r.skipPatterns, pattern) } // return empty string will print nothing at function call site // when executing the template - return "" + return "", nil }, }) @@ -239,6 +245,10 @@ func (r *renderer) walk() error { if err != nil { return err } + // Sort by name to ensure deterministic ordering + sort.Slice(entries, func(i, j int) bool { + return entries[i].Name() < entries[j].Name() + }) for _, entry := range entries { if entry.IsDir() { // Add to slice, for BFS traversal diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index a2e5675e..21dd1e4f 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -12,7 +12,14 @@ import ( "testing" "text/template" + "github.com/databricks/cli/bundle" + bundleConfig "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/root" + "github.com/databricks/databricks-sdk-go" + workspaceConfig "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/service/iam" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -29,6 +36,95 @@ func assertFilePermissions(t *testing.T, path string, perm fs.FileMode) { assert.Equal(t, perm, info.Mode().Perm()) } +func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target string, isServicePrincipal bool, build bool, tempDir string) { + ctx := context.Background() + + templatePath, err := prepareBuiltinTemplates("default-python", tempDir) + require.NoError(t, err) + + w := &databricks.WorkspaceClient{ + Config: &workspaceConfig.Config{Host: "https://myhost.com"}, + } + + // Prepare helpers + cachedUser = &iam.User{UserName: "user@domain.com"} + cachedIsServicePrincipal = &isServicePrincipal + ctx = root.SetWorkspaceClient(ctx, w) + helpers := loadHelpers(ctx) + + renderer, err := newRenderer(ctx, settings, helpers, templatePath, "./testdata/template-in-path/library", tempDir) + require.NoError(t, err) + + // Evaluate template + err = renderer.walk() + require.NoError(t, err) + err = renderer.persistToDisk() + require.NoError(t, err) + b, err := bundle.Load(ctx, filepath.Join(tempDir, "template", "my_project")) + require.NoError(t, err) + + // Apply initialize / validation mutators + b.Config.Workspace.CurrentUser = &bundleConfig.User{User: cachedUser} + b.WorkspaceClient() + b.Config.Bundle.Terraform = &bundleConfig.Terraform{ + ExecPath: "sh", + } + err = bundle.Apply(ctx, b, bundle.Seq( + bundle.Seq(mutator.DefaultMutators()...), + mutator.SelectTarget(target), + phases.Initialize(), + )) + require.NoError(t, err) + + // Apply build mutator + if build { + err = bundle.Apply(ctx, b, phases.Build()) + require.NoError(t, err) + } +} + +func TestBuiltinTemplateValid(t *testing.T) { + // Test option combinations + options := []string{"yes", "no"} + isServicePrincipal := false + build := false + for _, includeNotebook := range options { + for _, includeDlt := range options { + for _, includePython := range options { + for _, isServicePrincipal := range []bool{true, false} { + config := map[string]any{ + "project_name": "my_project", + "include_notebook": includeNotebook, + "include_dlt": includeDlt, + "include_python": includePython, + } + tempDir := t.TempDir() + assertBuiltinTemplateValid(t, config, "dev", isServicePrincipal, build, tempDir) + } + } + } + } + + // Test prod mode + build + config := map[string]any{ + "project_name": "my_project", + "include_notebook": "yes", + "include_dlt": "yes", + "include_python": "yes", + } + isServicePrincipal = false + build = true + + // On Windows, we can't always remove the resulting temp dir since background + // processes might have it open, so we use 'defer' for a best-effort cleanup + tempDir, err := os.MkdirTemp("", "templates") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + assertBuiltinTemplateValid(t, config, "prod", isServicePrincipal, build, tempDir) + defer os.RemoveAll(tempDir) +} + func TestRendererWithAssociatedTemplateInLibrary(t *testing.T) { tmpDir := t.TempDir() diff --git a/libs/template/templates/default-python/databricks_template_schema.json b/libs/template/templates/default-python/databricks_template_schema.json index 3220e9a6..22c65f30 100644 --- a/libs/template/templates/default-python/databricks_template_schema.json +++ b/libs/template/templates/default-python/databricks_template_schema.json @@ -3,7 +3,32 @@ "project_name": { "type": "string", "default": "my_project", - "description": "Unique name for this project" + "description": "Unique name for this project", + "order": 1 + }, + "include_notebook": { + "todo": "use an enum here, see https://github.com/databricks/cli/pull/668", + "type": "string", + "default": "yes", + "pattern": "^(yes|no)$", + "description": "Include a stub (sample) notebook in 'my_project/src'", + "order": 2 + }, + "include_dlt": { + "todo": "use an enum here, see https://github.com/databricks/cli/pull/668", + "type": "string", + "default": "yes", + "pattern": "^(yes|no)$", + "description": "Include a stub (sample) DLT pipeline in 'my_project/src'", + "order": 3 + }, + "include_python": { + "todo": "use an enum here, see https://github.com/databricks/cli/pull/668", + "type": "string", + "default": "yes", + "pattern": "^(yes|no)$", + "description": "Include a stub (sample) Python package 'my_project/src'", + "order": 4 } } } diff --git a/libs/template/templates/default-python/defaults.json b/libs/template/templates/default-python/defaults.json index 99ecd36d..510ec4a3 100644 --- a/libs/template/templates/default-python/defaults.json +++ b/libs/template/templates/default-python/defaults.json @@ -1,3 +1,6 @@ { - "project_name": "my_project" + "project_name": "my_project", + "include_notebook": "yes", + "include_dlt": "yes", + "include_python": "yes" } diff --git a/libs/template/templates/default-python/template/__preamble.tmpl b/libs/template/templates/default-python/template/__preamble.tmpl new file mode 100644 index 00000000..c018f282 --- /dev/null +++ b/libs/template/templates/default-python/template/__preamble.tmpl @@ -0,0 +1,38 @@ +# Preamble + +This file only template directives; it is skipped for the actual output. + +{{skip "__preamble"}} + +{{ $value := .project_name }} +{{with (regexp "^[A-Za-z0-9_]*$")}} + {{if not (.MatchString $value)}} + {{fail "Invalid project_name: %s. Must consist of letter and underscores only." $value}} + {{end}} +{{end}} + +{{$notDLT := not (eq .include_dlt "yes")}} +{{$notNotebook := not (eq .include_notebook "yes")}} +{{$notPython := not (eq .include_python "yes")}} + +{{if $notPython}} + {{skip "{{.project_name}}/src/{{.project_name}}"}} + {{skip "{{.project_name}}/tests/test_main.py"}} + {{skip "{{.project_name}}/setup.py"}} + {{skip "{{.project_name}}/pytest.ini"}} +{{end}} + +{{if $notDLT}} + {{skip "{{.project_name}}/src/dlt_pipeline.ipynb"}} + {{skip "{{.project_name}}/resources/{{.project_name}}_pipeline.yml"}} +{{end}} + +{{if $notNotebook}} + {{skip "{{.project_name}}/src/notebook.iypnb"}} +{{end}} + +{{if (and $notDLT $notNotebook $notPython)}} + {{skip "{{.project_name}}/resources/{{.project_name}}_job.yml"}} +{{else}} + {{skip "{{.project_name}}/resources/.gitkeep"}} +{{end}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl index 4c89435b..7c8876e7 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl @@ -28,10 +28,17 @@ The '{{.project_name}}' project was generated by using the default-python templa $ databricks bundle deploy --target prod ``` -5. Optionally, install developer tools such as the Databricks extension for Visual Studio Code from - https://docs.databricks.com/dev-tools/vscode-ext.html. Or read the "getting started" documentation for - **Databricks Connect** for instructions on running the included Python code from a different IDE. +5. To run a job or pipeline, use the "run" comand: + ``` + $ databricks bundle run {{.project_name}}_job + ``` -6. For documentation on the Databricks asset bundles format used +6. Optionally, install developer tools such as the Databricks extension for Visual Studio Code from + https://docs.databricks.com/dev-tools/vscode-ext.html. +{{- if (eq .include_python "yes") }} Or read the "getting started" documentation for + **Databricks Connect** for instructions on running the included Python code from a different IDE. +{{- end}} + +7. For documentation on the Databricks asset bundles format used for this project, and for CI/CD configuration, see https://docs.databricks.com/dev-tools/bundles/index.html. diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/.gitkeep b/libs/template/templates/default-python/template/{{.project_name}}/resources/.gitkeep new file mode 100644 index 00000000..3e09c14c --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/.gitkeep @@ -0,0 +1 @@ +This folder is reserved for Databricks Asset Bundles resource definitions. diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl index f8116cdf..1792f947 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl @@ -1,6 +1,5 @@ # The main job for {{.project_name}} resources: - jobs: {{.project_name}}_job: name: {{.project_name}}_job @@ -10,20 +9,41 @@ resources: timezone_id: Europe/Amsterdam {{- if not is_service_principal}} + email_notifications: on_failure: - {{user_name}} + + {{else}} + {{end -}} tasks: + {{- if eq .include_notebook "yes" }} - task_key: notebook_task job_cluster_key: job_cluster notebook_task: notebook_path: ../src/notebook.ipynb - - - task_key: python_wheel_task + {{end -}} + {{- if (eq .include_dlt "yes") }} + - task_key: refresh_pipeline + {{- if (eq .include_notebook "yes" )}} depends_on: - task_key: notebook_task + {{- end}} + pipeline_task: + {{- /* TODO: we should find a way that doesn't use magics for the below, like ./{{project_name}}_pipeline.yml */}} + pipeline_id: ${resources.pipelines.{{.project_name}}_pipeline.id} + {{end -}} + {{- if (eq .include_python "yes") }} + - task_key: main_task + {{- if (eq .include_dlt "yes") }} + depends_on: + - task_key: refresh_pipeline + {{- else if (eq .include_notebook "yes" )}} + depends_on: + - task_key: notebook_task + {{end}} job_cluster_key: job_cluster python_wheel_task: package_name: {{.project_name}} @@ -31,6 +51,8 @@ resources: libraries: - whl: ../dist/*.whl + {{else}} + {{end -}} job_clusters: - job_cluster_key: job_cluster new_cluster: diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl new file mode 100644 index 00000000..ffe400cb --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl @@ -0,0 +1,12 @@ +# The main pipeline for {{.project_name}} +resources: + pipelines: + {{.project_name}}_pipeline: + name: "{{.project_name}}_pipeline" + target: "{{.project_name}}_${bundle.environment}" + libraries: + - notebook: + path: ../src/dlt_pipeline.ipynb + + configuration: + "bundle.sourcePath": "/Workspace/${workspace.file_path}/src" diff --git a/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb b/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl similarity index 84% rename from libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb rename to libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl index 2ee36c3c..04bb261c 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb +++ b/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl @@ -17,11 +17,15 @@ }, "outputs": [], "source": [ + {{- if (eq .include_python "yes") }} "import sys\n", "sys.path.append('../src')\n", - "from project import main\n", + "from {{.project_name}} import main\n", "\n", - "main.taxis.show(10)" + "main.get_taxis().show(10)" + {{else}} + "spark.range(10)" + {{end -}} ] } ], diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl new file mode 100644 index 00000000..74893238 --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl @@ -0,0 +1,112 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "9a626959-61c8-4bba-84d2-2a4ecab1f7ec", + "showTitle": false, + "title": "" + } + }, + "source": [ + "# DLT pipeline\n", + "\n", + "This Delta Live Tables (DLT) definition is executed using a pipeline defined in resources/{{.my_project}}_pipeline.yml." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "9198e987-5606-403d-9f6d-8f14e6a4017f", + "showTitle": false, + "title": "" + }, + "jupyter": { + {{- /* Collapse this cell by default. Just boring imports here! */}} + "source_hidden": true + } + }, + "outputs": [], + "source": [ + {{- if (eq .include_python "yes") }} + "# Import DLT and make sure 'my_project' is on the Python path\n", + "import dlt\n", + "from pyspark.sql.functions import expr\n", + "from pyspark.sql import SparkSession\n", + "spark = SparkSession.builder.getOrCreate()\n", + "import sys\n", + "try:\n", + " sys.path.append(spark.conf.get(\"bundle.sourcePath\"))\n", + "except:\n", + " pass\n", + "from my_project import main" + {{else}} + "# Import DLT\n", + "import dlt\n", + "from pyspark.sql.functions import expr\n", + "from pyspark.sql import SparkSession\n", + "spark = SparkSession.builder.getOrCreate()" + {{end -}} + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "3fc19dba-61fd-4a89-8f8c-24fee63bfb14", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + {{- if (eq .include_python "yes") }} + "@dlt.view\n", + "def taxi_raw():\n", + " return main.get_taxis()\n", + {{else}} + "\n", + "@dlt.view\n", + "def taxi_raw():\n", + " return spark.read.format(\"json\").load(\"/databricks-datasets/nyctaxi/sample/json/\")\n", + {{end -}} + "\n", + "@dlt.table\n", + "def filtered_taxis():\n", + " return dlt.read(\"taxi_raw\").filter(expr(\"fare_amount < 30\"))" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 2 + }, + "notebookName": "dlt_pipeline", + "widgets": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl index 26c74303..8423ecf8 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl @@ -34,9 +34,13 @@ }, "outputs": [], "source": [ + {{- if (eq .include_python "yes") }} "from {{.project_name}} import main\n", "\n", - "main.get_taxis().show(10)\n" + "main.get_taxis().show(10)" + {{else}} + "spark.range(10)" + {{end -}} ] } ], diff --git a/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl index 92afccc6..f1750046 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl @@ -2,4 +2,4 @@ from {{.project_name}} import main def test_main(): taxis = main.get_taxis() - assert taxis.count() == 5 + assert taxis.count() > 5 From c8f5990f47edf6429a88494e871084e49508835c Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 6 Sep 2023 13:46:21 +0200 Subject: [PATCH 066/310] Release v0.204.0 (#738) This release includes permission related commands for a subset of workspace services where they apply. These complement the `permissions` command and do not require specification of the object type to work with, as that is implied by the command they are nested under. CLI: * Group permission related commands ([#730](https://github.com/databricks/cli/pull/730)). Bundles: * Fixed artifact file uploading on Windows and wheel execution on DBR 13.3 ([#722](https://github.com/databricks/cli/pull/722)). * Make resource and artifact paths in bundle config relative to config folder ([#708](https://github.com/databricks/cli/pull/708)). * Add support for ordering of input prompts ([#662](https://github.com/databricks/cli/pull/662)). * Fix IsServicePrincipal() only working for workspace admins ([#732](https://github.com/databricks/cli/pull/732)). * databricks bundle init template v1 ([#686](https://github.com/databricks/cli/pull/686)). * databricks bundle init template v2: optional stubs, DLT support ([#700](https://github.com/databricks/cli/pull/700)). * Show 'databricks bundle init' template in CLI prompt ([#725](https://github.com/databricks/cli/pull/725)). * Include $PATH in set of environment variables to pass along. ([#736](https://github.com/databricks/cli/pull/736)). Internal: * Update Go SDK to v0.19.0 ([#729](https://github.com/databricks/cli/pull/729)). * Replace API call to test configuration with dummy authenticate call ([#728](https://github.com/databricks/cli/pull/728)). API Changes: * Changed `databricks account storage-credentials create` command to return . * Changed `databricks account storage-credentials get` command to return . * Changed `databricks account storage-credentials list` command to return . * Changed `databricks account storage-credentials update` command to return . * Changed `databricks connections create` command with new required argument order. * Changed `databricks connections update` command with new required argument order. * Changed `databricks volumes create` command with new required argument order. * Added `databricks artifact-allowlists` command group. * Added `databricks model-versions` command group. * Added `databricks registered-models` command group. * Added `databricks cluster-policies get-permission-levels` command. * Added `databricks cluster-policies get-permissions` command. * Added `databricks cluster-policies set-permissions` command. * Added `databricks cluster-policies update-permissions` command. * Added `databricks clusters get-permission-levels` command. * Added `databricks clusters get-permissions` command. * Added `databricks clusters set-permissions` command. * Added `databricks clusters update-permissions` command. * Added `databricks instance-pools get-permission-levels` command. * Added `databricks instance-pools get-permissions` command. * Added `databricks instance-pools set-permissions` command. * Added `databricks instance-pools update-permissions` command. * Added `databricks files` command group. * Changed `databricks permissions set` command to start returning . * Changed `databricks permissions update` command to start returning . * Added `databricks users get-permission-levels` command. * Added `databricks users get-permissions` command. * Added `databricks users set-permissions` command. * Added `databricks users update-permissions` command. * Added `databricks jobs get-permission-levels` command. * Added `databricks jobs get-permissions` command. * Added `databricks jobs set-permissions` command. * Added `databricks jobs update-permissions` command. * Changed `databricks experiments get-by-name` command to return . * Changed `databricks experiments get-experiment` command to return . * Added `databricks experiments delete-runs` command. * Added `databricks experiments get-permission-levels` command. * Added `databricks experiments get-permissions` command. * Added `databricks experiments restore-runs` command. * Added `databricks experiments set-permissions` command. * Added `databricks experiments update-permissions` command. * Added `databricks model-registry get-permission-levels` command. * Added `databricks model-registry get-permissions` command. * Added `databricks model-registry set-permissions` command. * Added `databricks model-registry update-permissions` command. * Added `databricks pipelines get-permission-levels` command. * Added `databricks pipelines get-permissions` command. * Added `databricks pipelines set-permissions` command. * Added `databricks pipelines update-permissions` command. * Added `databricks serving-endpoints get-permission-levels` command. * Added `databricks serving-endpoints get-permissions` command. * Added `databricks serving-endpoints set-permissions` command. * Added `databricks serving-endpoints update-permissions` command. * Added `databricks token-management get-permission-levels` command. * Added `databricks token-management get-permissions` command. * Added `databricks token-management set-permissions` command. * Added `databricks token-management update-permissions` command. * Changed `databricks dashboards create` command with new required argument order. * Added `databricks warehouses get-permission-levels` command. * Added `databricks warehouses get-permissions` command. * Added `databricks warehouses set-permissions` command. * Added `databricks warehouses update-permissions` command. * Added `databricks dashboard-widgets` command group. * Added `databricks query-visualizations` command group. * Added `databricks repos get-permission-levels` command. * Added `databricks repos get-permissions` command. * Added `databricks repos set-permissions` command. * Added `databricks repos update-permissions` command. * Added `databricks secrets get-secret` command. * Added `databricks workspace get-permission-levels` command. * Added `databricks workspace get-permissions` command. * Added `databricks workspace set-permissions` command. * Added `databricks workspace update-permissions` command. OpenAPI commit 09a7fa63d9ae243e5407941f200960ca14d48b07 (2023-09-04) --- CHANGELOG.md | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fcbab8c..9835b0bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,106 @@ # Version changelog +## 0.204.0 + +This release includes permission related commands for a subset of workspace +services where they apply. These complement the `permissions` command and +do not require specification of the object type to work with, as that is +implied by the command they are nested under. + +CLI: + * Group permission related commands ([#730](https://github.com/databricks/cli/pull/730)). + +Bundles: + * Fixed artifact file uploading on Windows and wheel execution on DBR 13.3 ([#722](https://github.com/databricks/cli/pull/722)). + * Make resource and artifact paths in bundle config relative to config folder ([#708](https://github.com/databricks/cli/pull/708)). + * Add support for ordering of input prompts ([#662](https://github.com/databricks/cli/pull/662)). + * Fix IsServicePrincipal() only working for workspace admins ([#732](https://github.com/databricks/cli/pull/732)). + * databricks bundle init template v1 ([#686](https://github.com/databricks/cli/pull/686)). + * databricks bundle init template v2: optional stubs, DLT support ([#700](https://github.com/databricks/cli/pull/700)). + * Show 'databricks bundle init' template in CLI prompt ([#725](https://github.com/databricks/cli/pull/725)). + * Include in set of environment variables to pass along. ([#736](https://github.com/databricks/cli/pull/736)). + +Internal: + * Update Go SDK to v0.19.0 ([#729](https://github.com/databricks/cli/pull/729)). + * Replace API call to test configuration with dummy authenticate call ([#728](https://github.com/databricks/cli/pull/728)). + +API Changes: + * Changed `databricks account storage-credentials create` command to return . + * Changed `databricks account storage-credentials get` command to return . + * Changed `databricks account storage-credentials list` command to return . + * Changed `databricks account storage-credentials update` command to return . + * Changed `databricks connections create` command with new required argument order. + * Changed `databricks connections update` command with new required argument order. + * Changed `databricks volumes create` command with new required argument order. + * Added `databricks artifact-allowlists` command group. + * Added `databricks model-versions` command group. + * Added `databricks registered-models` command group. + * Added `databricks cluster-policies get-permission-levels` command. + * Added `databricks cluster-policies get-permissions` command. + * Added `databricks cluster-policies set-permissions` command. + * Added `databricks cluster-policies update-permissions` command. + * Added `databricks clusters get-permission-levels` command. + * Added `databricks clusters get-permissions` command. + * Added `databricks clusters set-permissions` command. + * Added `databricks clusters update-permissions` command. + * Added `databricks instance-pools get-permission-levels` command. + * Added `databricks instance-pools get-permissions` command. + * Added `databricks instance-pools set-permissions` command. + * Added `databricks instance-pools update-permissions` command. + * Added `databricks files` command group. + * Changed `databricks permissions set` command to start returning . + * Changed `databricks permissions update` command to start returning . + * Added `databricks users get-permission-levels` command. + * Added `databricks users get-permissions` command. + * Added `databricks users set-permissions` command. + * Added `databricks users update-permissions` command. + * Added `databricks jobs get-permission-levels` command. + * Added `databricks jobs get-permissions` command. + * Added `databricks jobs set-permissions` command. + * Added `databricks jobs update-permissions` command. + * Changed `databricks experiments get-by-name` command to return . + * Changed `databricks experiments get-experiment` command to return . + * Added `databricks experiments delete-runs` command. + * Added `databricks experiments get-permission-levels` command. + * Added `databricks experiments get-permissions` command. + * Added `databricks experiments restore-runs` command. + * Added `databricks experiments set-permissions` command. + * Added `databricks experiments update-permissions` command. + * Added `databricks model-registry get-permission-levels` command. + * Added `databricks model-registry get-permissions` command. + * Added `databricks model-registry set-permissions` command. + * Added `databricks model-registry update-permissions` command. + * Added `databricks pipelines get-permission-levels` command. + * Added `databricks pipelines get-permissions` command. + * Added `databricks pipelines set-permissions` command. + * Added `databricks pipelines update-permissions` command. + * Added `databricks serving-endpoints get-permission-levels` command. + * Added `databricks serving-endpoints get-permissions` command. + * Added `databricks serving-endpoints set-permissions` command. + * Added `databricks serving-endpoints update-permissions` command. + * Added `databricks token-management get-permission-levels` command. + * Added `databricks token-management get-permissions` command. + * Added `databricks token-management set-permissions` command. + * Added `databricks token-management update-permissions` command. + * Changed `databricks dashboards create` command with new required argument order. + * Added `databricks warehouses get-permission-levels` command. + * Added `databricks warehouses get-permissions` command. + * Added `databricks warehouses set-permissions` command. + * Added `databricks warehouses update-permissions` command. + * Added `databricks dashboard-widgets` command group. + * Added `databricks query-visualizations` command group. + * Added `databricks repos get-permission-levels` command. + * Added `databricks repos get-permissions` command. + * Added `databricks repos set-permissions` command. + * Added `databricks repos update-permissions` command. + * Added `databricks secrets get-secret` command. + * Added `databricks workspace get-permission-levels` command. + * Added `databricks workspace get-permissions` command. + * Added `databricks workspace set-permissions` command. + * Added `databricks workspace update-permissions` command. + +OpenAPI commit 09a7fa63d9ae243e5407941f200960ca14d48b07 (2023-09-04) + ## 0.203.3 Bundles: From 3c79181148957592b53412f97f63fbbfd6aa00f5 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 6 Sep 2023 20:18:15 +0200 Subject: [PATCH 067/310] Remove unused file (#742) defaults.json was originally used in tests. It's no longer used and should be removed. --- libs/template/templates/default-python/defaults.json | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 libs/template/templates/default-python/defaults.json diff --git a/libs/template/templates/default-python/defaults.json b/libs/template/templates/default-python/defaults.json deleted file mode 100644 index 510ec4a3..00000000 --- a/libs/template/templates/default-python/defaults.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "project_name": "my_project", - "include_notebook": "yes", - "include_dlt": "yes", - "include_python": "yes" -} From 50b2c0b83bde8bd645c1165ec6b70acf06284151 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Thu, 7 Sep 2023 10:26:43 +0200 Subject: [PATCH 068/310] Fix notebook showing up in template when not selected (#743) ## Changes This fixes a typo that caused the notebook.ipynb file to show up even if the user answered "no" to the question about including a notebook. ## Tests We have matrix validation tests for all the yes/no combinations and whether the build + validate. There is no current test for the absence of files. --- libs/template/templates/default-python/template/__preamble.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/template/templates/default-python/template/__preamble.tmpl b/libs/template/templates/default-python/template/__preamble.tmpl index c018f282..95c61333 100644 --- a/libs/template/templates/default-python/template/__preamble.tmpl +++ b/libs/template/templates/default-python/template/__preamble.tmpl @@ -28,7 +28,7 @@ This file only template directives; it is skipped for the actual output. {{end}} {{if $notNotebook}} - {{skip "{{.project_name}}/src/notebook.iypnb"}} + {{skip "{{.project_name}}/src/notebook.ipynb"}} {{end}} {{if (and $notDLT $notNotebook $notPython)}} From c0ebfb8101700d6e6300954c2efcf2077c690e01 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 7 Sep 2023 14:48:59 +0200 Subject: [PATCH 069/310] Fix conversion of job parameters (#744) ## Changes Another example of singular/plural conversion. Longer term solution is we do a full sweep of the type using reflection to make sure we cover all fields. ## Tests Unit test passes. --- bundle/deploy/terraform/convert.go | 6 ++++++ bundle/deploy/terraform/convert_test.go | 13 +++++++++++++ 2 files changed, 19 insertions(+) diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 41bde91d..cd480c89 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -90,6 +90,12 @@ func BundleToTerraform(config *config.Root) (*schema.Root, bool) { Tag: git.GitTag, } } + + for _, v := range src.Parameters { + var t schema.ResourceJobParameter + conv(v, &t) + dst.Parameter = append(dst.Parameter, t) + } } tfroot.Resource.Job[k] = &dst diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 4d912fbe..34a65d70 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -29,6 +29,16 @@ func TestConvertJob(t *testing.T) { GitProvider: jobs.GitProviderGitHub, GitUrl: "https://github.com/foo/bar", }, + Parameters: []jobs.JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, }, } @@ -44,6 +54,9 @@ func TestConvertJob(t *testing.T) { assert.Equal(t, "my job", out.Resource.Job["my_job"].Name) assert.Len(t, out.Resource.Job["my_job"].JobCluster, 1) assert.Equal(t, "https://github.com/foo/bar", out.Resource.Job["my_job"].GitSource.Url) + assert.Len(t, out.Resource.Job["my_job"].Parameter, 2) + assert.Equal(t, "param1", out.Resource.Job["my_job"].Parameter[0].Name) + assert.Equal(t, "param2", out.Resource.Job["my_job"].Parameter[1].Name) assert.Nil(t, out.Data) } From 10e08367495e0400b16ba68da5bb218e626ebcfb Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 7 Sep 2023 16:08:16 +0200 Subject: [PATCH 070/310] Added end-to-end test for deploying and running Python wheel task (#741) ## Changes Added end-to-end test for deploying and running Python wheel task ## Tests Test successfully passed on all environments, takes about 9-10 minutes to pass. ``` Deleted snapshot file at /var/folders/nt/xjv68qzs45319w4k36dhpylc0000gp/T/TestAccPythonWheelTaskDeployAndRun1845899209/002/.databricks/bundle/default/sync-snapshots/1f7cc766ffe038d6.json Successfully deleted files! 2023/09/06 17:50:50 INFO Releasing deployment lock mutator=destroy mutator=seq mutator=seq mutator=deferred mutator=lock:release --- PASS: TestAccPythonWheelTaskDeployAndRun (508.16s) PASS coverage: 77.9% of statements in ./... ok github.com/databricks/cli/internal/bundle 508.810s coverage: 77.9% of statements in ./... ``` --------- Co-authored-by: Pieter Noordhuis --- bundle/deploy/terraform/init.go | 2 + .../databricks_template_schema.json | 17 +++++ .../template/databricks.yml.tmpl | 21 ++++++ .../python_wheel_task/template/setup.py.tmpl | 15 ++++ .../template/{{.project_name}}/__init__.py | 2 + .../template/{{.project_name}}/__main__.py | 16 +++++ internal/bundle/helpers.go | 70 +++++++++++++++++++ internal/bundle/python_wheel_test.go | 41 +++++++++++ internal/helpers.go | 13 +++- 9 files changed, 196 insertions(+), 1 deletion(-) create mode 100644 internal/bundle/bundles/python_wheel_task/databricks_template_schema.json create mode 100644 internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl create mode 100644 internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py create mode 100644 internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py create mode 100644 internal/bundle/helpers.go create mode 100644 internal/bundle/python_wheel_test.go diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 878c4e8b..60f0a6c4 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -8,6 +8,7 @@ import ( "path/filepath" "runtime" "strings" + "time" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" @@ -59,6 +60,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con Product: product.Terraform, Version: version.Must(version.NewVersion("1.5.5")), InstallDir: binDir, + Timeout: 1 * time.Minute, } execPath, err = installer.Install(ctx) if err != nil { diff --git a/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json b/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json new file mode 100644 index 00000000..b39a628c --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json @@ -0,0 +1,17 @@ +{ + "properties": { + "project_name": { + "type": "string", + "default": "my_test_code", + "description": "Unique name for this project" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" + } + } +} diff --git a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl b/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl new file mode 100644 index 00000000..4386879a --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl @@ -0,0 +1,21 @@ +bundle: + name: wheel-task + +resources: + jobs: + some_other_job: + name: "[${bundle.target}] Test Wheel Job" + tasks: + - task_key: TestTask + new_cluster: + num_workers: 1 + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + python_wheel_task: + package_name: my_test_code + entry_point: run + parameters: + - "one" + - "two" + libraries: + - whl: ./dist/*.whl diff --git a/internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl b/internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl new file mode 100644 index 00000000..b528657b --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl @@ -0,0 +1,15 @@ +from setuptools import setup, find_packages + +import {{.project_name}} + +setup( + name="{{.project_name}}", + version={{.project_name}}.__version__, + author={{.project_name}}.__author__, + url="https://databricks.com", + author_email="john.doe@databricks.com", + description="my example wheel", + packages=find_packages(include=["{{.project_name}}"]), + entry_points={"group1": "run={{.project_name}}.__main__:main"}, + install_requires=["setuptools"], +) diff --git a/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py b/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py new file mode 100644 index 00000000..909f1f32 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py @@ -0,0 +1,2 @@ +__version__ = "0.0.1" +__author__ = "Databricks" diff --git a/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py b/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py new file mode 100644 index 00000000..ea918ce2 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py @@ -0,0 +1,16 @@ +""" +The entry point of the Python Wheel +""" + +import sys + + +def main(): + # This method will print the provided arguments + print("Hello from my func") + print("Got arguments:") + print(sys.argv) + + +if __name__ == "__main__": + main() diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go new file mode 100644 index 00000000..3fd4eabc --- /dev/null +++ b/internal/bundle/helpers.go @@ -0,0 +1,70 @@ +package bundle + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/internal" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/cli/libs/template" +) + +func initTestTemplate(t *testing.T, templateName string, config map[string]any) (string, error) { + templateRoot := filepath.Join("bundles", templateName) + + bundleRoot := t.TempDir() + configFilePath, err := writeConfigFile(t, config) + if err != nil { + return "", err + } + + ctx := root.SetWorkspaceClient(context.Background(), nil) + cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "bundles") + ctx = cmdio.InContext(ctx, cmd) + + err = template.Materialize(ctx, configFilePath, templateRoot, bundleRoot) + return bundleRoot, err +} + +func writeConfigFile(t *testing.T, config map[string]any) (string, error) { + bytes, err := json.Marshal(config) + if err != nil { + return "", err + } + + dir := t.TempDir() + filepath := filepath.Join(dir, "config.json") + t.Log("Configuration for template: ", string(bytes)) + + err = os.WriteFile(filepath, bytes, 0644) + return filepath, err +} + +func deployBundle(t *testing.T, path string) error { + t.Setenv("BUNDLE_ROOT", path) + c := internal.NewCobraTestRunner(t, "bundle", "deploy", "--force-lock") + _, _, err := c.Run() + return err +} + +func runResource(t *testing.T, path string, key string) (string, error) { + ctx := context.Background() + ctx = cmdio.NewContext(ctx, cmdio.Default()) + + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "run", key) + stdout, _, err := c.Run() + return stdout.String(), err +} + +func destroyBundle(t *testing.T, path string) error { + t.Setenv("BUNDLE_ROOT", path) + c := internal.NewCobraTestRunner(t, "bundle", "destroy", "--auto-approve") + _, _, err := c.Run() + return err +} diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go new file mode 100644 index 00000000..52683edc --- /dev/null +++ b/internal/bundle/python_wheel_test.go @@ -0,0 +1,41 @@ +package bundle + +import ( + "testing" + + "github.com/databricks/cli/internal" + "github.com/stretchr/testify/require" +) + +func TestAccPythonWheelTaskDeployAndRun(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + var nodeTypeId string + if env == "gcp" { + nodeTypeId = "n1-standard-4" + } else if env == "aws" { + nodeTypeId = "i3.xlarge" + } else { + nodeTypeId = "Standard_DS4_v2" + } + + bundleRoot, err := initTestTemplate(t, "python_wheel_task", map[string]any{ + "node_type_id": nodeTypeId, + "spark_version": "13.2.x-snapshot-scala2.12", + }) + require.NoError(t, err) + + err = deployBundle(t, bundleRoot) + require.NoError(t, err) + + t.Cleanup(func() { + destroyBundle(t, bundleRoot) + }) + + out, err := runResource(t, bundleRoot, "some_other_job") + require.NoError(t, err) + require.Contains(t, out, "Hello from my func") + require.Contains(t, out, "Got arguments:") + require.Contains(t, out, "['python', 'one', 'two']") +} diff --git a/internal/helpers.go b/internal/helpers.go index ddc00517..bf27fbb5 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -58,6 +58,8 @@ type cobraTestRunner struct { stdout bytes.Buffer stderr bytes.Buffer + ctx context.Context + // Line-by-line output. // Background goroutines populate these channels by reading from stdout/stderr pipes. stdoutLines <-chan string @@ -128,7 +130,7 @@ func (t *cobraTestRunner) RunBackground() { t.registerFlagCleanup(root) errch := make(chan error) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.ctx) // Tee stdout/stderr to buffers. stdoutR = io.TeeReader(stdoutR, &t.stdout) @@ -234,6 +236,15 @@ func (c *cobraTestRunner) Eventually(condition func() bool, waitFor time.Duratio func NewCobraTestRunner(t *testing.T, args ...string) *cobraTestRunner { return &cobraTestRunner{ T: t, + ctx: context.Background(), + args: args, + } +} + +func NewCobraTestRunnerWithContext(t *testing.T, ctx context.Context, args ...string) *cobraTestRunner { + return &cobraTestRunner{ + T: t, + ctx: ctx, args: args, } } From 1a7bf4e4f127b9ef6dc3aa6ab88f8dd437174d9f Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 7 Sep 2023 16:36:06 +0200 Subject: [PATCH 071/310] Add schema and config validation to jsonschema package (#740) ## Changes At a high level this PR adds new schema validation and moves functionality that should be present in the jsonschema package, but resides in the template package today, to the jsonschema package. This includes for example schema validation, schema instance validation, to / from string conversion methods etc. The list below outlines all the pieces that have been moved over, and the new validation bits added. This PR: 1. Adds casting default value of schema properties to integers to the jsonschema.Load method. 2. Adds validation for default value types for schema properties, checking they are consistant with the type defined. 3. Introduces the LoadInstance and ValidateInstance methods to the json schema package. These methods can be used to read and validate JSON documents against the schema. 4. Replaces validation done for template inputs to use the newly defined JSON schema validation functions. 5. Moves to/from string and isInteger utility methods to the json schema package. ## Tests Existing and new unit tests. --- libs/jsonschema/instance.go | 91 ++++++++++++ libs/jsonschema/instance_test.go | 129 +++++++++++++++++ libs/jsonschema/schema.go | 32 +++++ libs/jsonschema/schema_test.go | 39 ++++- .../instance-load/invalid-type-instance.json | 6 + .../instance-load/valid-instance.json | 6 + .../test-schema-no-additional-properties.json | 19 +++ .../test-schema-some-fields-required.json | 19 +++ .../instance-validate/test-schema.json | 18 +++ .../schema-invalid-default.json | 9 ++ .../schema-load-int/schema-valid.json | 9 ++ libs/{template => jsonschema}/utils.go | 28 ++-- libs/{template => jsonschema}/utils_test.go | 41 +++--- .../validate_type.go} | 20 ++- .../validate_type_test.go} | 41 +++--- libs/template/config.go | 95 ++----------- libs/template/config_test.go | 134 ++++++------------ .../config-test-schema/test-schema.json | 18 +++ 18 files changed, 512 insertions(+), 242 deletions(-) create mode 100644 libs/jsonschema/instance.go create mode 100644 libs/jsonschema/instance_test.go create mode 100644 libs/jsonschema/testdata/instance-load/invalid-type-instance.json create mode 100644 libs/jsonschema/testdata/instance-load/valid-instance.json create mode 100644 libs/jsonschema/testdata/instance-validate/test-schema-no-additional-properties.json create mode 100644 libs/jsonschema/testdata/instance-validate/test-schema-some-fields-required.json create mode 100644 libs/jsonschema/testdata/instance-validate/test-schema.json create mode 100644 libs/jsonschema/testdata/schema-load-int/schema-invalid-default.json create mode 100644 libs/jsonschema/testdata/schema-load-int/schema-valid.json rename libs/{template => jsonschema}/utils.go (80%) rename libs/{template => jsonschema}/utils_test.go (72%) rename libs/{template/validators.go => jsonschema/validate_type.go} (68%) rename libs/{template/validators_test.go => jsonschema/validate_type_test.go} (75%) create mode 100644 libs/template/testdata/config-test-schema/test-schema.json diff --git a/libs/jsonschema/instance.go b/libs/jsonschema/instance.go new file mode 100644 index 00000000..02ab9f28 --- /dev/null +++ b/libs/jsonschema/instance.go @@ -0,0 +1,91 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "os" +) + +// Load a JSON document and validate it against the JSON schema. Instance here +// refers to a JSON document. see: https://json-schema.org/draft/2020-12/json-schema-core.html#name-instance +func (s *Schema) LoadInstance(path string) (map[string]any, error) { + instance := make(map[string]any) + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + err = json.Unmarshal(b, &instance) + if err != nil { + return nil, err + } + + // The default JSON unmarshaler parses untyped number values as float64. + // We convert integer properties from float64 to int64 here. + for name, v := range instance { + propertySchema, ok := s.Properties[name] + if !ok { + continue + } + if propertySchema.Type != IntegerType { + continue + } + integerValue, err := toInteger(v) + if err != nil { + return nil, fmt.Errorf("failed to parse property %s: %w", name, err) + } + instance[name] = integerValue + } + return instance, s.ValidateInstance(instance) +} + +func (s *Schema) ValidateInstance(instance map[string]any) error { + if err := s.validateAdditionalProperties(instance); err != nil { + return err + } + if err := s.validateRequired(instance); err != nil { + return err + } + return s.validateTypes(instance) +} + +// If additional properties is set to false, this function validates instance only +// contains properties defined in the schema. +func (s *Schema) validateAdditionalProperties(instance map[string]any) error { + // Note: AdditionalProperties has the type any. + if s.AdditionalProperties != false { + return nil + } + for k := range instance { + _, ok := s.Properties[k] + if !ok { + return fmt.Errorf("property %s is not defined in the schema", k) + } + } + return nil +} + +// This function validates that all require properties in the schema have values +// in the instance. +func (s *Schema) validateRequired(instance map[string]any) error { + for _, name := range s.Required { + if _, ok := instance[name]; !ok { + return fmt.Errorf("no value provided for required property %s", name) + } + } + return nil +} + +// Validates the types of all input properties values match their types defined in the schema +func (s *Schema) validateTypes(instance map[string]any) error { + for k, v := range instance { + fieldInfo, ok := s.Properties[k] + if !ok { + continue + } + err := validateType(v, fieldInfo.Type) + if err != nil { + return fmt.Errorf("incorrect type for property %s: %w", k, err) + } + } + return nil +} diff --git a/libs/jsonschema/instance_test.go b/libs/jsonschema/instance_test.go new file mode 100644 index 00000000..d5e0766d --- /dev/null +++ b/libs/jsonschema/instance_test.go @@ -0,0 +1,129 @@ +package jsonschema + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateInstanceAdditionalPropertiesPermitted(t *testing.T) { + instance := map[string]any{ + "int_val": 1, + "float_val": 1.0, + "bool_val": false, + "an_additional_property": "abc", + } + + schema, err := Load("./testdata/instance-validate/test-schema.json") + require.NoError(t, err) + + err = schema.validateAdditionalProperties(instance) + assert.NoError(t, err) + + err = schema.ValidateInstance(instance) + assert.NoError(t, err) +} + +func TestValidateInstanceAdditionalPropertiesForbidden(t *testing.T) { + instance := map[string]any{ + "int_val": 1, + "float_val": 1.0, + "bool_val": false, + "an_additional_property": "abc", + } + + schema, err := Load("./testdata/instance-validate/test-schema-no-additional-properties.json") + require.NoError(t, err) + + err = schema.validateAdditionalProperties(instance) + assert.EqualError(t, err, "property an_additional_property is not defined in the schema") + + err = schema.ValidateInstance(instance) + assert.EqualError(t, err, "property an_additional_property is not defined in the schema") + + instanceWOAdditionalProperties := map[string]any{ + "int_val": 1, + "float_val": 1.0, + "bool_val": false, + } + + err = schema.validateAdditionalProperties(instanceWOAdditionalProperties) + assert.NoError(t, err) + + err = schema.ValidateInstance(instanceWOAdditionalProperties) + assert.NoError(t, err) +} + +func TestValidateInstanceTypes(t *testing.T) { + schema, err := Load("./testdata/instance-validate/test-schema.json") + require.NoError(t, err) + + validInstance := map[string]any{ + "int_val": 1, + "float_val": 1.0, + "bool_val": false, + } + + err = schema.validateTypes(validInstance) + assert.NoError(t, err) + + err = schema.ValidateInstance(validInstance) + assert.NoError(t, err) + + invalidInstance := map[string]any{ + "int_val": "abc", + "float_val": 1.0, + "bool_val": false, + } + + err = schema.validateTypes(invalidInstance) + assert.EqualError(t, err, "incorrect type for property int_val: expected type integer, but value is \"abc\"") + + err = schema.ValidateInstance(invalidInstance) + assert.EqualError(t, err, "incorrect type for property int_val: expected type integer, but value is \"abc\"") +} + +func TestValidateInstanceRequired(t *testing.T) { + schema, err := Load("./testdata/instance-validate/test-schema-some-fields-required.json") + require.NoError(t, err) + + validInstance := map[string]any{ + "int_val": 1, + "float_val": 1.0, + "bool_val": false, + } + err = schema.validateRequired(validInstance) + assert.NoError(t, err) + err = schema.ValidateInstance(validInstance) + assert.NoError(t, err) + + invalidInstance := map[string]any{ + "string_val": "abc", + "float_val": 1.0, + "bool_val": false, + } + err = schema.validateRequired(invalidInstance) + assert.EqualError(t, err, "no value provided for required property int_val") + err = schema.ValidateInstance(invalidInstance) + assert.EqualError(t, err, "no value provided for required property int_val") +} + +func TestLoadInstance(t *testing.T) { + schema, err := Load("./testdata/instance-validate/test-schema.json") + require.NoError(t, err) + + // Expect the instance to be loaded successfully. + instance, err := schema.LoadInstance("./testdata/instance-load/valid-instance.json") + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "bool_val": false, + "int_val": int64(1), + "string_val": "abc", + "float_val": 2.0, + }, instance) + + // Expect instance validation against the schema to fail. + _, err = schema.LoadInstance("./testdata/instance-load/invalid-type-instance.json") + assert.EqualError(t, err, "incorrect type for property string_val: expected type string, but value is 123") +} diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index 87e9acd5..44c65ecc 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -58,6 +58,7 @@ const ( ) func (schema *Schema) validate() error { + // Validate property types are all valid JSON schema types. for _, v := range schema.Properties { switch v.Type { case NumberType, BooleanType, StringType, IntegerType: @@ -72,6 +73,17 @@ func (schema *Schema) validate() error { return fmt.Errorf("type %s is not a recognized json schema type", v.Type) } } + + // Validate default property values are consistent with types. + for name, property := range schema.Properties { + if property.Default == nil { + continue + } + if err := validateType(property.Default, property.Type); err != nil { + return fmt.Errorf("type validation for default value of property %s failed: %w", name, err) + } + } + return nil } @@ -85,5 +97,25 @@ func Load(path string) (*Schema, error) { if err != nil { return nil, err } + + // Convert the default values of top-level properties to integers. + // This is required because the default JSON unmarshaler parses numbers + // as floats when the Golang field it's being loaded to is untyped. + // + // NOTE: properties can be recursively defined in a schema, but the current + // use-cases only uses the first layer of properties so we skip converting + // any recursive properties. + for name, property := range schema.Properties { + if property.Type != IntegerType { + continue + } + if property.Default != nil { + property.Default, err = toInteger(property.Default) + if err != nil { + return nil, fmt.Errorf("failed to parse default value for property %s: %w", name, err) + } + } + } + return schema, schema.validate() } diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go index 76112492..5b92d846 100644 --- a/libs/jsonschema/schema_test.go +++ b/libs/jsonschema/schema_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestJsonSchemaValidate(t *testing.T) { +func TestSchemaValidateTypeNames(t *testing.T) { var err error toSchema := func(s string) *Schema { return &Schema{ @@ -42,3 +42,40 @@ func TestJsonSchemaValidate(t *testing.T) { err = toSchema("foobar").validate() assert.EqualError(t, err, "type foobar is not a recognized json schema type") } + +func TestSchemaLoadIntegers(t *testing.T) { + schema, err := Load("./testdata/schema-load-int/schema-valid.json") + assert.NoError(t, err) + assert.Equal(t, int64(1), schema.Properties["abc"].Default) +} + +func TestSchemaLoadIntegersWithInvalidDefault(t *testing.T) { + _, err := Load("./testdata/schema-load-int/schema-invalid-default.json") + assert.EqualError(t, err, "failed to parse default value for property abc: expected integer value, got: 1.1") +} + +func TestSchemaValidateDefaultType(t *testing.T) { + invalidSchema := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "number", + Default: "abc", + }, + }, + } + + err := invalidSchema.validate() + assert.EqualError(t, err, "type validation for default value of property foo failed: expected type float, but value is \"abc\"") + + validSchema := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "boolean", + Default: true, + }, + }, + } + + err = validSchema.validate() + assert.NoError(t, err) +} diff --git a/libs/jsonschema/testdata/instance-load/invalid-type-instance.json b/libs/jsonschema/testdata/instance-load/invalid-type-instance.json new file mode 100644 index 00000000..c55b6fcc --- /dev/null +++ b/libs/jsonschema/testdata/instance-load/invalid-type-instance.json @@ -0,0 +1,6 @@ +{ + "int_val": 1, + "bool_val": false, + "string_val": 123, + "float_val": 3.0 +} diff --git a/libs/jsonschema/testdata/instance-load/valid-instance.json b/libs/jsonschema/testdata/instance-load/valid-instance.json new file mode 100644 index 00000000..7d4dc818 --- /dev/null +++ b/libs/jsonschema/testdata/instance-load/valid-instance.json @@ -0,0 +1,6 @@ +{ + "int_val": 1, + "bool_val": false, + "string_val": "abc", + "float_val": 2.0 +} diff --git a/libs/jsonschema/testdata/instance-validate/test-schema-no-additional-properties.json b/libs/jsonschema/testdata/instance-validate/test-schema-no-additional-properties.json new file mode 100644 index 00000000..98b19d5a --- /dev/null +++ b/libs/jsonschema/testdata/instance-validate/test-schema-no-additional-properties.json @@ -0,0 +1,19 @@ +{ + "properties": { + "int_val": { + "type": "integer", + "default": 123 + }, + "float_val": { + "type": "number" + }, + "bool_val": { + "type": "boolean" + }, + "string_val": { + "type": "string", + "default": "abc" + } + }, + "additionalProperties": false +} diff --git a/libs/jsonschema/testdata/instance-validate/test-schema-some-fields-required.json b/libs/jsonschema/testdata/instance-validate/test-schema-some-fields-required.json new file mode 100644 index 00000000..46581103 --- /dev/null +++ b/libs/jsonschema/testdata/instance-validate/test-schema-some-fields-required.json @@ -0,0 +1,19 @@ +{ + "properties": { + "int_val": { + "type": "integer", + "default": 123 + }, + "float_val": { + "type": "number" + }, + "bool_val": { + "type": "boolean" + }, + "string_val": { + "type": "string", + "default": "abc" + } + }, + "required": ["int_val", "float_val", "bool_val"] +} diff --git a/libs/jsonschema/testdata/instance-validate/test-schema.json b/libs/jsonschema/testdata/instance-validate/test-schema.json new file mode 100644 index 00000000..41eb8251 --- /dev/null +++ b/libs/jsonschema/testdata/instance-validate/test-schema.json @@ -0,0 +1,18 @@ +{ + "properties": { + "int_val": { + "type": "integer", + "default": 123 + }, + "float_val": { + "type": "number" + }, + "bool_val": { + "type": "boolean" + }, + "string_val": { + "type": "string", + "default": "abc" + } + } +} diff --git a/libs/jsonschema/testdata/schema-load-int/schema-invalid-default.json b/libs/jsonschema/testdata/schema-load-int/schema-invalid-default.json new file mode 100644 index 00000000..1e709f62 --- /dev/null +++ b/libs/jsonschema/testdata/schema-load-int/schema-invalid-default.json @@ -0,0 +1,9 @@ +{ + "type": "object", + "properties": { + "abc": { + "type": "integer", + "default": 1.1 + } + } +} diff --git a/libs/jsonschema/testdata/schema-load-int/schema-valid.json b/libs/jsonschema/testdata/schema-load-int/schema-valid.json new file mode 100644 index 00000000..599ac04d --- /dev/null +++ b/libs/jsonschema/testdata/schema-load-int/schema-valid.json @@ -0,0 +1,9 @@ +{ + "type": "object", + "properties": { + "abc": { + "type": "integer", + "default": 1 + } + } +} diff --git a/libs/template/utils.go b/libs/jsonschema/utils.go similarity index 80% rename from libs/template/utils.go rename to libs/jsonschema/utils.go index ade6a573..21866965 100644 --- a/libs/template/utils.go +++ b/libs/jsonschema/utils.go @@ -1,11 +1,9 @@ -package template +package jsonschema import ( "errors" "fmt" "strconv" - - "github.com/databricks/cli/libs/jsonschema" ) // function to check whether a float value represents an integer @@ -40,41 +38,41 @@ func toInteger(v any) (int64, error) { } } -func toString(v any, T jsonschema.Type) (string, error) { +func ToString(v any, T Type) (string, error) { switch T { - case jsonschema.BooleanType: + case BooleanType: boolVal, ok := v.(bool) if !ok { return "", fmt.Errorf("expected bool, got: %#v", v) } return strconv.FormatBool(boolVal), nil - case jsonschema.StringType: + case StringType: strVal, ok := v.(string) if !ok { return "", fmt.Errorf("expected string, got: %#v", v) } return strVal, nil - case jsonschema.NumberType: + case NumberType: floatVal, ok := v.(float64) if !ok { return "", fmt.Errorf("expected float, got: %#v", v) } return strconv.FormatFloat(floatVal, 'f', -1, 64), nil - case jsonschema.IntegerType: + case IntegerType: intVal, err := toInteger(v) if err != nil { return "", err } return strconv.FormatInt(intVal, 10), nil - case jsonschema.ArrayType, jsonschema.ObjectType: + case ArrayType, ObjectType: return "", fmt.Errorf("cannot format object of type %s as a string. Value of object: %#v", T, v) default: return "", fmt.Errorf("unknown json schema type: %q", T) } } -func fromString(s string, T jsonschema.Type) (any, error) { - if T == jsonschema.StringType { +func FromString(s string, T Type) (any, error) { + if T == StringType { return s, nil } @@ -83,13 +81,13 @@ func fromString(s string, T jsonschema.Type) (any, error) { var err error switch T { - case jsonschema.BooleanType: + case BooleanType: v, err = strconv.ParseBool(s) - case jsonschema.NumberType: + case NumberType: v, err = strconv.ParseFloat(s, 32) - case jsonschema.IntegerType: + case IntegerType: v, err = strconv.ParseInt(s, 10, 64) - case jsonschema.ArrayType, jsonschema.ObjectType: + case ArrayType, ObjectType: return "", fmt.Errorf("cannot parse string as object of type %s. Value of string: %q", T, s) default: return "", fmt.Errorf("unknown json schema type: %q", T) diff --git a/libs/template/utils_test.go b/libs/jsonschema/utils_test.go similarity index 72% rename from libs/template/utils_test.go rename to libs/jsonschema/utils_test.go index 1e038aac..9686cf39 100644 --- a/libs/template/utils_test.go +++ b/libs/jsonschema/utils_test.go @@ -1,10 +1,9 @@ -package template +package jsonschema import ( "math" "testing" - "github.com/databricks/cli/libs/jsonschema" "github.com/stretchr/testify/assert" ) @@ -50,72 +49,72 @@ func TestTemplateToInteger(t *testing.T) { } func TestTemplateToString(t *testing.T) { - s, err := toString(true, jsonschema.BooleanType) + s, err := ToString(true, BooleanType) assert.NoError(t, err) assert.Equal(t, "true", s) - s, err = toString("abc", jsonschema.StringType) + s, err = ToString("abc", StringType) assert.NoError(t, err) assert.Equal(t, "abc", s) - s, err = toString(1.1, jsonschema.NumberType) + s, err = ToString(1.1, NumberType) assert.NoError(t, err) assert.Equal(t, "1.1", s) - s, err = toString(2, jsonschema.IntegerType) + s, err = ToString(2, IntegerType) assert.NoError(t, err) assert.Equal(t, "2", s) - _, err = toString([]string{}, jsonschema.ArrayType) + _, err = ToString([]string{}, ArrayType) assert.EqualError(t, err, "cannot format object of type array as a string. Value of object: []string{}") - _, err = toString("true", jsonschema.BooleanType) + _, err = ToString("true", BooleanType) assert.EqualError(t, err, "expected bool, got: \"true\"") - _, err = toString(123, jsonschema.StringType) + _, err = ToString(123, StringType) assert.EqualError(t, err, "expected string, got: 123") - _, err = toString(false, jsonschema.NumberType) + _, err = ToString(false, NumberType) assert.EqualError(t, err, "expected float, got: false") - _, err = toString("abc", jsonschema.IntegerType) + _, err = ToString("abc", IntegerType) assert.EqualError(t, err, "cannot convert \"abc\" to an integer") - _, err = toString("abc", "foobar") + _, err = ToString("abc", "foobar") assert.EqualError(t, err, "unknown json schema type: \"foobar\"") } func TestTemplateFromString(t *testing.T) { - v, err := fromString("true", jsonschema.BooleanType) + v, err := FromString("true", BooleanType) assert.NoError(t, err) assert.Equal(t, true, v) - v, err = fromString("abc", jsonschema.StringType) + v, err = FromString("abc", StringType) assert.NoError(t, err) assert.Equal(t, "abc", v) - v, err = fromString("1.1", jsonschema.NumberType) + v, err = FromString("1.1", NumberType) assert.NoError(t, err) // Floating point conversions are not perfect assert.True(t, (v.(float64)-1.1) < 0.000001) - v, err = fromString("12345", jsonschema.IntegerType) + v, err = FromString("12345", IntegerType) assert.NoError(t, err) assert.Equal(t, int64(12345), v) - v, err = fromString("123", jsonschema.NumberType) + v, err = FromString("123", NumberType) assert.NoError(t, err) assert.Equal(t, float64(123), v) - _, err = fromString("qrt", jsonschema.ArrayType) + _, err = FromString("qrt", ArrayType) assert.EqualError(t, err, "cannot parse string as object of type array. Value of string: \"qrt\"") - _, err = fromString("abc", jsonschema.IntegerType) + _, err = FromString("abc", IntegerType) assert.EqualError(t, err, "could not parse \"abc\" as a integer: strconv.ParseInt: parsing \"abc\": invalid syntax") - _, err = fromString("1.0", jsonschema.IntegerType) + _, err = FromString("1.0", IntegerType) assert.EqualError(t, err, "could not parse \"1.0\" as a integer: strconv.ParseInt: parsing \"1.0\": invalid syntax") - _, err = fromString("1.0", "foobar") + _, err = FromString("1.0", "foobar") assert.EqualError(t, err, "unknown json schema type: \"foobar\"") } diff --git a/libs/template/validators.go b/libs/jsonschema/validate_type.go similarity index 68% rename from libs/template/validators.go rename to libs/jsonschema/validate_type.go index 209700b6..125d6b20 100644 --- a/libs/template/validators.go +++ b/libs/jsonschema/validate_type.go @@ -1,17 +1,15 @@ -package template +package jsonschema import ( "fmt" "reflect" "slices" - - "github.com/databricks/cli/libs/jsonschema" ) -type validator func(v any) error +type validateTypeFunc func(v any) error -func validateType(v any, fieldType jsonschema.Type) error { - validateFunc, ok := validators[fieldType] +func validateType(v any, fieldType Type) error { + validateFunc, ok := validateTypeFuncs[fieldType] if !ok { return nil } @@ -50,9 +48,9 @@ func validateInteger(v any) error { return nil } -var validators map[jsonschema.Type]validator = map[jsonschema.Type]validator{ - jsonschema.StringType: validateString, - jsonschema.BooleanType: validateBoolean, - jsonschema.IntegerType: validateInteger, - jsonschema.NumberType: validateNumber, +var validateTypeFuncs map[Type]validateTypeFunc = map[Type]validateTypeFunc{ + StringType: validateString, + BooleanType: validateBoolean, + IntegerType: validateInteger, + NumberType: validateNumber, } diff --git a/libs/template/validators_test.go b/libs/jsonschema/validate_type_test.go similarity index 75% rename from libs/template/validators_test.go rename to libs/jsonschema/validate_type_test.go index f34f037a..36d9e575 100644 --- a/libs/template/validators_test.go +++ b/libs/jsonschema/validate_type_test.go @@ -1,9 +1,8 @@ -package template +package jsonschema import ( "testing" - "github.com/databricks/cli/libs/jsonschema" "github.com/stretchr/testify/assert" ) @@ -77,53 +76,53 @@ func TestValidatorInt(t *testing.T) { func TestTemplateValidateType(t *testing.T) { // assert validation passing - err := validateType(int(0), jsonschema.IntegerType) + err := validateType(int(0), IntegerType) assert.NoError(t, err) - err = validateType(int32(1), jsonschema.IntegerType) + err = validateType(int32(1), IntegerType) assert.NoError(t, err) - err = validateType(int64(1), jsonschema.IntegerType) + err = validateType(int64(1), IntegerType) assert.NoError(t, err) - err = validateType(float32(1.1), jsonschema.NumberType) + err = validateType(float32(1.1), NumberType) assert.NoError(t, err) - err = validateType(float64(1.2), jsonschema.NumberType) + err = validateType(float64(1.2), NumberType) assert.NoError(t, err) - err = validateType(false, jsonschema.BooleanType) + err = validateType(false, BooleanType) assert.NoError(t, err) - err = validateType("abc", jsonschema.StringType) + err = validateType("abc", StringType) assert.NoError(t, err) // assert validation failing for integers - err = validateType(float64(1.2), jsonschema.IntegerType) + err = validateType(float64(1.2), IntegerType) assert.ErrorContains(t, err, "expected type integer, but value is 1.2") - err = validateType(true, jsonschema.IntegerType) + err = validateType(true, IntegerType) assert.ErrorContains(t, err, "expected type integer, but value is true") - err = validateType("abc", jsonschema.IntegerType) + err = validateType("abc", IntegerType) assert.ErrorContains(t, err, "expected type integer, but value is \"abc\"") // assert validation failing for floats - err = validateType(true, jsonschema.NumberType) + err = validateType(true, NumberType) assert.ErrorContains(t, err, "expected type float, but value is true") - err = validateType("abc", jsonschema.NumberType) + err = validateType("abc", NumberType) assert.ErrorContains(t, err, "expected type float, but value is \"abc\"") - err = validateType(int(1), jsonschema.NumberType) + err = validateType(int(1), NumberType) assert.ErrorContains(t, err, "expected type float, but value is 1") // assert validation failing for boolean - err = validateType(int(1), jsonschema.BooleanType) + err = validateType(int(1), BooleanType) assert.ErrorContains(t, err, "expected type boolean, but value is 1") - err = validateType(float64(1), jsonschema.BooleanType) + err = validateType(float64(1), BooleanType) assert.ErrorContains(t, err, "expected type boolean, but value is 1") - err = validateType("abc", jsonschema.BooleanType) + err = validateType("abc", BooleanType) assert.ErrorContains(t, err, "expected type boolean, but value is \"abc\"") // assert validation failing for string - err = validateType(int(1), jsonschema.StringType) + err = validateType(int(1), StringType) assert.ErrorContains(t, err, "expected type string, but value is 1") - err = validateType(float64(1), jsonschema.StringType) + err = validateType(float64(1), StringType) assert.ErrorContains(t, err, "expected type string, but value is 1") - err = validateType(false, jsonschema.StringType) + err = validateType(false, StringType) assert.ErrorContains(t, err, "expected type string, but value is false") } diff --git a/libs/template/config.go b/libs/template/config.go index 8a1ed6c8..6f980f61 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -2,12 +2,11 @@ package template import ( "context" - "encoding/json" "fmt" - "os" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/jsonschema" + "golang.org/x/exp/maps" ) type config struct { @@ -26,6 +25,9 @@ func newConfig(ctx context.Context, schemaPath string) (*config, error) { return nil, err } + // Do not allow template input variables that are not defined in the schema. + schema.AdditionalProperties = false + // Return config return &config{ ctx: ctx, @@ -45,32 +47,10 @@ func validateSchema(schema *jsonschema.Schema) error { // Reads json file at path and assigns values from the file func (c *config) assignValuesFromFile(path string) error { - // Read the config file - configFromFile := make(map[string]any, 0) - b, err := os.ReadFile(path) + // Load the config file. + configFromFile, err := c.schema.LoadInstance(path) if err != nil { - return err - } - err = json.Unmarshal(b, &configFromFile) - if err != nil { - return err - } - - // Cast any integer properties, from float to integer. Required because - // the json unmarshaller treats all json numbers as floating point - for name, floatVal := range configFromFile { - property, ok := c.schema.Properties[name] - if !ok { - return fmt.Errorf("%s is not defined as an input parameter for the template", name) - } - if property.Type != jsonschema.IntegerType { - continue - } - v, err := toInteger(floatVal) - if err != nil { - return fmt.Errorf("failed to cast value %v of property %s from file %s to an integer: %w", floatVal, name, path, err) - } - configFromFile[name] = v + return fmt.Errorf("failed to load config from file %s: %w", path, err) } // Write configs from the file to the input map, not overwriting any existing @@ -91,26 +71,11 @@ func (c *config) assignDefaultValues() error { if _, ok := c.values[name]; ok { continue } - // No default value defined for the property if property.Default == nil { continue } - - // Assign default value if property is not an integer - if property.Type != jsonschema.IntegerType { - c.values[name] = property.Default - continue - } - - // Cast default value to int before assigning to an integer configuration. - // Required because untyped field Default will read all numbers as floats - // during unmarshalling - v, err := toInteger(property.Default) - if err != nil { - return fmt.Errorf("failed to cast default value %v of property %s to an integer: %w", property.Default, name, err) - } - c.values[name] = v + c.values[name] = property.Default } return nil } @@ -130,7 +95,7 @@ func (c *config) promptForValues() error { var defaultVal string var err error if property.Default != nil { - defaultVal, err = toString(property.Default, property.Type) + defaultVal, err = jsonschema.ToString(property.Default, property.Type) if err != nil { return err } @@ -143,7 +108,7 @@ func (c *config) promptForValues() error { } // Convert user input string back to a value - c.values[name], err = fromString(userInput, property.Type) + c.values[name], err = jsonschema.FromString(userInput, property.Type) if err != nil { return err } @@ -163,42 +128,10 @@ func (c *config) promptOrAssignDefaultValues() error { // Validates the configuration. If passes, the configuration is ready to be used // to initialize the template. func (c *config) validate() error { - validateFns := []func() error{ - c.validateValuesDefined, - c.validateValuesType, - } - - for _, fn := range validateFns { - err := fn() - if err != nil { - return err - } - } - return nil -} - -// Validates all input properties have a user defined value assigned to them -func (c *config) validateValuesDefined() error { - for k := range c.schema.Properties { - if _, ok := c.values[k]; ok { - continue - } - return fmt.Errorf("no value has been assigned to input parameter %s", k) - } - return nil -} - -// Validates the types of all input properties values match their types defined in the schema -func (c *config) validateValuesType() error { - for k, v := range c.values { - fieldInfo, ok := c.schema.Properties[k] - if !ok { - return fmt.Errorf("%s is not defined as an input parameter for the template", k) - } - err := validateType(v, fieldInfo.Type) - if err != nil { - return fmt.Errorf("incorrect type for %s. %w", k, err) - } + // All properties in the JSON schema should have a value defined. + c.schema.Required = maps.Keys(c.schema.Properties) + if err := c.schema.ValidateInstance(c.values); err != nil { + return fmt.Errorf("validation for template input parameters failed. %w", err) } return nil } diff --git a/libs/template/config_test.go b/libs/template/config_test.go index 33524246..bba22c75 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -1,7 +1,7 @@ package template import ( - "encoding/json" + "context" "testing" "github.com/databricks/cli/libs/jsonschema" @@ -9,36 +9,14 @@ import ( "github.com/stretchr/testify/require" ) -func testSchema(t *testing.T) *jsonschema.Schema { - schemaJson := `{ - "properties": { - "int_val": { - "type": "integer", - "default": 123 - }, - "float_val": { - "type": "number" - }, - "bool_val": { - "type": "boolean" - }, - "string_val": { - "type": "string", - "default": "abc" - } - } - }` - var jsonSchema jsonschema.Schema - err := json.Unmarshal([]byte(schemaJson), &jsonSchema) +func testConfig(t *testing.T) *config { + c, err := newConfig(context.Background(), "./testdata/config-test-schema/test-schema.json") require.NoError(t, err) - return &jsonSchema + return c } func TestTemplateConfigAssignValuesFromFile(t *testing.T) { - c := config{ - schema: testSchema(t), - values: make(map[string]any), - } + c := testConfig(t) err := c.assignValuesFromFile("./testdata/config-assign-from-file/config.json") assert.NoError(t, err) @@ -49,32 +27,17 @@ func TestTemplateConfigAssignValuesFromFile(t *testing.T) { assert.Equal(t, "hello", c.values["string_val"]) } -func TestTemplateConfigAssignValuesFromFileForUnknownField(t *testing.T) { - c := config{ - schema: testSchema(t), - values: make(map[string]any), - } - - err := c.assignValuesFromFile("./testdata/config-assign-from-file-unknown-property/config.json") - assert.EqualError(t, err, "unknown_prop is not defined as an input parameter for the template") -} - func TestTemplateConfigAssignValuesFromFileForInvalidIntegerValue(t *testing.T) { - c := config{ - schema: testSchema(t), - values: make(map[string]any), - } + c := testConfig(t) err := c.assignValuesFromFile("./testdata/config-assign-from-file-invalid-int/config.json") - assert.EqualError(t, err, "failed to cast value abc of property int_val from file ./testdata/config-assign-from-file-invalid-int/config.json to an integer: cannot convert \"abc\" to an integer") + assert.EqualError(t, err, "failed to load config from file ./testdata/config-assign-from-file-invalid-int/config.json: failed to parse property int_val: cannot convert \"abc\" to an integer") } func TestTemplateConfigAssignValuesFromFileDoesNotOverwriteExistingConfigs(t *testing.T) { - c := config{ - schema: testSchema(t), - values: map[string]any{ - "string_val": "this-is-not-overwritten", - }, + c := testConfig(t) + c.values = map[string]any{ + "string_val": "this-is-not-overwritten", } err := c.assignValuesFromFile("./testdata/config-assign-from-file/config.json") @@ -87,10 +50,7 @@ func TestTemplateConfigAssignValuesFromFileDoesNotOverwriteExistingConfigs(t *te } func TestTemplateConfigAssignDefaultValues(t *testing.T) { - c := config{ - schema: testSchema(t), - values: make(map[string]any), - } + c := testConfig(t) err := c.assignDefaultValues() assert.NoError(t, err) @@ -101,65 +61,55 @@ func TestTemplateConfigAssignDefaultValues(t *testing.T) { } func TestTemplateConfigValidateValuesDefined(t *testing.T) { - c := config{ - schema: testSchema(t), - values: map[string]any{ - "int_val": 1, - "float_val": 1.0, - "bool_val": false, - }, + c := testConfig(t) + c.values = map[string]any{ + "int_val": 1, + "float_val": 1.0, + "bool_val": false, } - err := c.validateValuesDefined() - assert.EqualError(t, err, "no value has been assigned to input parameter string_val") + err := c.validate() + assert.EqualError(t, err, "validation for template input parameters failed. no value provided for required property string_val") } func TestTemplateConfigValidateTypeForValidConfig(t *testing.T) { - c := &config{ - schema: testSchema(t), - values: map[string]any{ - "int_val": 1, - "float_val": 1.1, - "bool_val": true, - "string_val": "abcd", - }, + c := testConfig(t) + c.values = map[string]any{ + "int_val": 1, + "float_val": 1.1, + "bool_val": true, + "string_val": "abcd", } - err := c.validateValuesType() - assert.NoError(t, err) - - err = c.validate() + err := c.validate() assert.NoError(t, err) } func TestTemplateConfigValidateTypeForUnknownField(t *testing.T) { - c := &config{ - schema: testSchema(t), - values: map[string]any{ - "unknown_prop": 1, - }, + c := testConfig(t) + c.values = map[string]any{ + "unknown_prop": 1, + "int_val": 1, + "float_val": 1.1, + "bool_val": true, + "string_val": "abcd", } - err := c.validateValuesType() - assert.EqualError(t, err, "unknown_prop is not defined as an input parameter for the template") + err := c.validate() + assert.EqualError(t, err, "validation for template input parameters failed. property unknown_prop is not defined in the schema") } func TestTemplateConfigValidateTypeForInvalidType(t *testing.T) { - c := &config{ - schema: testSchema(t), - values: map[string]any{ - "int_val": "this-should-be-an-int", - "float_val": 1.1, - "bool_val": true, - "string_val": "abcd", - }, + c := testConfig(t) + c.values = map[string]any{ + "int_val": "this-should-be-an-int", + "float_val": 1.1, + "bool_val": true, + "string_val": "abcd", } - err := c.validateValuesType() - assert.EqualError(t, err, `incorrect type for int_val. expected type integer, but value is "this-should-be-an-int"`) - - err = c.validate() - assert.EqualError(t, err, `incorrect type for int_val. expected type integer, but value is "this-should-be-an-int"`) + err := c.validate() + assert.EqualError(t, err, "validation for template input parameters failed. incorrect type for property int_val: expected type integer, but value is \"this-should-be-an-int\"") } func TestTemplateValidateSchema(t *testing.T) { diff --git a/libs/template/testdata/config-test-schema/test-schema.json b/libs/template/testdata/config-test-schema/test-schema.json new file mode 100644 index 00000000..41eb8251 --- /dev/null +++ b/libs/template/testdata/config-test-schema/test-schema.json @@ -0,0 +1,18 @@ +{ + "properties": { + "int_val": { + "type": "integer", + "default": 123 + }, + "float_val": { + "type": "number" + }, + "bool_val": { + "type": "boolean" + }, + "string_val": { + "type": "string", + "default": "abc" + } + } +} From 5a14c7cb433a0b23c1703f56f68b5e65e0717714 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 7 Sep 2023 22:02:26 +0200 Subject: [PATCH 072/310] Generate unique name for a job in Python wheel test (#745) ## Changes Generate unique name for a job in Python wheel test --- .../bundles/python_wheel_task/databricks_template_schema.json | 4 ++++ .../bundles/python_wheel_task/template/databricks.yml.tmpl | 2 +- internal/bundle/python_wheel_test.go | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json b/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json index b39a628c..f7f4b634 100644 --- a/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json +++ b/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json @@ -12,6 +12,10 @@ "node_type_id": { "type": "string", "description": "Node type id for job cluster" + }, + "unique_id": { + "type": "string", + "description": "Unique ID for job name" } } } diff --git a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl b/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl index 4386879a..a3201e03 100644 --- a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl @@ -4,7 +4,7 @@ bundle: resources: jobs: some_other_job: - name: "[${bundle.target}] Test Wheel Job" + name: "[${bundle.target}] Test Wheel Job {{.unique_id}}" tasks: - task_key: TestTask new_cluster: diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index 52683edc..ee5d897d 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/databricks/cli/internal" + "github.com/google/uuid" "github.com/stretchr/testify/require" ) @@ -22,6 +23,7 @@ func TestAccPythonWheelTaskDeployAndRun(t *testing.T) { bundleRoot, err := initTestTemplate(t, "python_wheel_task", map[string]any{ "node_type_id": nodeTypeId, + "unique_id": uuid.New().String(), "spark_version": "13.2.x-snapshot-scala2.12", }) require.NoError(t, err) From 50eaf16307bae42a08edb506a9b9430de3eb0f1b Mon Sep 17 00:00:00 2001 From: Arpit Jasapara <87999496+arpitjasa-db@users.noreply.github.com> Date: Thu, 7 Sep 2023 14:54:31 -0700 Subject: [PATCH 073/310] Support Model Serving Endpoints in bundles (#682) ## Changes Add Model Serving Endpoints to Databricks Bundles ## Tests Unit tests and manual testing via https://github.com/databricks/bundle-examples-internal/pull/76 Screenshot 2023-08-28 at 7 46 23 PM Screenshot 2023-08-28 at 7 47 01 PM Signed-off-by: Arpit Jasapara --- bundle/config/mutator/process_target_mode.go | 6 ++ .../mutator/process_target_mode_test.go | 8 ++ bundle/config/resources.go | 21 ++++- .../resources/model_serving_endpoint.go | 24 ++++++ bundle/deploy/terraform/convert.go | 19 +++++ bundle/deploy/terraform/convert_test.go | 74 +++++++++++++++++ bundle/deploy/terraform/interpolate.go | 3 + bundle/schema/docs/bundle_descriptions.json | 81 +++++++++++++++++++ bundle/schema/openapi.go | 26 +++++- .../model_serving_endpoint/databricks.yml | 38 +++++++++ bundle/tests/model_serving_endpoint_test.go | 48 +++++++++++ 11 files changed, 342 insertions(+), 6 deletions(-) create mode 100644 bundle/config/resources/model_serving_endpoint.go create mode 100644 bundle/tests/model_serving_endpoint/databricks.yml create mode 100644 bundle/tests/model_serving_endpoint_test.go diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 06ae7b85..93149ad0 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -77,6 +77,12 @@ func transformDevelopmentMode(b *bundle.Bundle) error { r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: b.Config.Workspace.CurrentUser.DisplayName}) } + for i := range r.ModelServingEndpoints { + prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_" + r.ModelServingEndpoints[i].Name = prefix + r.ModelServingEndpoints[i].Name + // (model serving doesn't yet support tags) + } + return nil } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 489632e1..4ea33c70 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -13,6 +13,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/databricks-sdk-go/service/serving" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -53,6 +54,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle { Models: map[string]*resources.MlflowModel{ "model1": {Model: &ml.Model{Name: "model1"}}, }, + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "servingendpoint1": {CreateServingEndpoint: &serving.CreateServingEndpoint{Name: "servingendpoint1"}}, + }, }, }, } @@ -69,6 +73,7 @@ func TestProcessTargetModeDevelopment(t *testing.T) { assert.Equal(t, "/Users/lennart.kats@databricks.com/[dev lennart] experiment1", bundle.Config.Resources.Experiments["experiment1"].Name) assert.Equal(t, "[dev lennart] experiment2", bundle.Config.Resources.Experiments["experiment2"].Name) assert.Equal(t, "[dev lennart] model1", bundle.Config.Resources.Models["model1"].Name) + assert.Equal(t, "dev_lennart_servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "dev", bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key) assert.True(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) } @@ -82,6 +87,7 @@ func TestProcessTargetModeDefault(t *testing.T) { assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name) assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name) assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) + assert.Equal(t, "servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) } func TestProcessTargetModeProduction(t *testing.T) { @@ -109,6 +115,7 @@ func TestProcessTargetModeProduction(t *testing.T) { bundle.Config.Resources.Experiments["experiment1"].Permissions = permissions bundle.Config.Resources.Experiments["experiment2"].Permissions = permissions bundle.Config.Resources.Models["model1"].Permissions = permissions + bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions err = validateProductionMode(context.Background(), bundle, false) require.NoError(t, err) @@ -116,6 +123,7 @@ func TestProcessTargetModeProduction(t *testing.T) { assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name) assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name) assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) + assert.Equal(t, "servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) } func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 5d47b918..c239b510 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -11,8 +11,9 @@ type Resources struct { Jobs map[string]*resources.Job `json:"jobs,omitempty"` Pipelines map[string]*resources.Pipeline `json:"pipelines,omitempty"` - Models map[string]*resources.MlflowModel `json:"models,omitempty"` - Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"` + Models map[string]*resources.MlflowModel `json:"models,omitempty"` + Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"` + ModelServingEndpoints map[string]*resources.ModelServingEndpoint `json:"model_serving_endpoints,omitempty"` } type UniqueResourceIdTracker struct { @@ -93,6 +94,19 @@ func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker, tracker.Type[k] = "mlflow_experiment" tracker.ConfigPath[k] = r.Experiments[k].ConfigFilePath } + for k := range r.ModelServingEndpoints { + if _, ok := tracker.Type[k]; ok { + return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)", + k, + tracker.Type[k], + tracker.ConfigPath[k], + "model_serving_endpoint", + r.ModelServingEndpoints[k].ConfigFilePath, + ) + } + tracker.Type[k] = "model_serving_endpoint" + tracker.ConfigPath[k] = r.ModelServingEndpoints[k].ConfigFilePath + } return tracker, nil } @@ -112,6 +126,9 @@ func (r *Resources) SetConfigFilePath(path string) { for _, e := range r.Experiments { e.ConfigFilePath = path } + for _, e := range r.ModelServingEndpoints { + e.ConfigFilePath = path + } } // MergeJobClusters iterates over all jobs and merges their job clusters. diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go new file mode 100644 index 00000000..dccecaa6 --- /dev/null +++ b/bundle/config/resources/model_serving_endpoint.go @@ -0,0 +1,24 @@ +package resources + +import ( + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/service/serving" +) + +type ModelServingEndpoint struct { + // This represents the input args for terraform, and will get converted + // to a HCL representation for CRUD + *serving.CreateServingEndpoint + + // This represents the id (ie serving_endpoint_id) that can be used + // as a reference in other resources. This value is returned by terraform. + ID string + + // Local path where the bundle is defined. All bundle resources include + // this for interpolation purposes. + paths.Paths + + // This is a resource agnostic implementation of permissions for ACLs. + // Implementation could be different based on the resource type. + Permissions []Permission `json:"permissions,omitempty"` +} diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index cd480c89..0956ea7b 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -161,6 +161,19 @@ func BundleToTerraform(config *config.Root) (*schema.Root, bool) { } } + for k, src := range config.Resources.ModelServingEndpoints { + noResources = false + var dst schema.ResourceModelServing + conv(src, &dst) + tfroot.Resource.ModelServing[k] = &dst + + // Configure permissions for this resource. + if rp := convPermissions(src.Permissions); rp != nil { + rp.ServingEndpointId = fmt.Sprintf("${databricks_model_serving.%s.serving_endpoint_id}", k) + tfroot.Resource.Permissions["model_serving_"+k] = rp + } + } + return tfroot, noResources } @@ -196,6 +209,12 @@ func TerraformToBundle(state *tfjson.State, config *config.Root) error { cur := config.Resources.Experiments[resource.Name] conv(tmp, &cur) config.Resources.Experiments[resource.Name] = cur + case "databricks_model_serving": + var tmp schema.ResourceModelServing + conv(resource.AttributeValues, &tmp) + cur := config.Resources.ModelServingEndpoints[resource.Name] + conv(tmp, &cur) + config.Resources.ModelServingEndpoints[resource.Name] = cur case "databricks_permissions": // Ignore; no need to pull these back into the configuration. default: diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 34a65d70..ad626606 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/databricks-sdk-go/service/serving" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -292,3 +293,76 @@ func TestConvertExperimentPermissions(t *testing.T) { assert.Equal(t, "CAN_READ", p.PermissionLevel) } + +func TestConvertModelServing(t *testing.T) { + var src = resources.ModelServingEndpoint{ + CreateServingEndpoint: &serving.CreateServingEndpoint{ + Name: "name", + Config: serving.EndpointCoreConfigInput{ + ServedModels: []serving.ServedModelInput{ + { + ModelName: "model_name", + ModelVersion: "1", + ScaleToZeroEnabled: true, + WorkloadSize: "Small", + }, + }, + TrafficConfig: &serving.TrafficConfig{ + Routes: []serving.Route{ + { + ServedModelName: "model_name-1", + TrafficPercentage: 100, + }, + }, + }, + }, + }, + } + + var config = config.Root{ + Resources: config.Resources{ + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "my_model_serving_endpoint": &src, + }, + }, + } + + out, _ := BundleToTerraform(&config) + resource := out.Resource.ModelServing["my_model_serving_endpoint"] + assert.Equal(t, "name", resource.Name) + assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName) + assert.Equal(t, "1", resource.Config.ServedModels[0].ModelVersion) + assert.Equal(t, true, resource.Config.ServedModels[0].ScaleToZeroEnabled) + assert.Equal(t, "Small", resource.Config.ServedModels[0].WorkloadSize) + assert.Equal(t, "model_name-1", resource.Config.TrafficConfig.Routes[0].ServedModelName) + assert.Equal(t, 100, resource.Config.TrafficConfig.Routes[0].TrafficPercentage) + assert.Nil(t, out.Data) +} + +func TestConvertModelServingPermissions(t *testing.T) { + var src = resources.ModelServingEndpoint{ + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + } + + var config = config.Root{ + Resources: config.Resources{ + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "my_model_serving_endpoint": &src, + }, + }, + } + + out, _ := BundleToTerraform(&config) + assert.NotEmpty(t, out.Resource.Permissions["model_serving_my_model_serving_endpoint"].ServingEndpointId) + assert.Len(t, out.Resource.Permissions["model_serving_my_model_serving_endpoint"].AccessControl, 1) + + p := out.Resource.Permissions["model_serving_my_model_serving_endpoint"].AccessControl[0] + assert.Equal(t, "jane@doe.com", p.UserName) + assert.Equal(t, "CAN_VIEW", p.PermissionLevel) + +} diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index dd1dcbb8..ea3c99aa 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -25,6 +25,9 @@ func interpolateTerraformResourceIdentifiers(path string, lookup map[string]stri case "experiments": path = strings.Join(append([]string{"databricks_mlflow_experiment"}, parts[2:]...), interpolation.Delimiter) return fmt.Sprintf("${%s}", path), nil + case "model_serving_endpoints": + path = strings.Join(append([]string{"databricks_model_serving"}, parts[2:]...), interpolation.Delimiter) + return fmt.Sprintf("${%s}", path), nil default: panic("TODO: " + parts[1]) } diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 84f0492f..ffdb5629 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -1441,6 +1441,87 @@ } } }, + "model_serving_endpoints": { + "description": "List of Model Serving Endpoints", + "additionalproperties": { + "description": "", + "properties": { + "name": { + "description": "The name of the model serving endpoint. This field is required and must be unique across a workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores. NOTE: Changing this name will delete the existing endpoint and create a new endpoint with the update name." + }, + "permissions": { + "description": "", + "items": { + "description": "", + "properties": { + "group_name": { + "description": "" + }, + "level": { + "description": "" + }, + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + } + }, + "config": { + "description": "The model serving endpoint configuration.", + "properties": { + "description": "", + "properties": { + "served_models": { + "description": "Each block represents a served model for the endpoint to serve. A model serving endpoint can have up to 10 served models.", + "items": { + "description": "", + "properties" : { + "name": { + "description": "The name of a served model. It must be unique across an endpoint. If not specified, this field will default to modelname-modelversion. A served model name can consist of alphanumeric characters, dashes, and underscores." + }, + "model_name": { + "description": "The name of the model in Databricks Model Registry to be served." + }, + "model_version": { + "description": "The version of the model in Databricks Model Registry to be served." + }, + "workload_size": { + "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency)." + }, + "scale_to_zero_enabled": { + "description": "Whether the compute resources for the served model should scale down to zero. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0." + } + } + } + }, + "traffic_config": { + "description": "A single block represents the traffic split configuration amongst the served models.", + "properties": { + "routes": { + "description": "Each block represents a route that defines traffic to each served model. Each served_models block needs to have a corresponding routes block.", + "items": { + "description": "", + "properties": { + "served_model_name": { + "description": "The name of the served model this route configures traffic for. This needs to match the name of a served_models block." + }, + "traffic_percentage": { + "description": "The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive." + } + } + } + } + } + } + } + } + } + } + } + }, "pipelines": { "description": "List of DLT pipelines", "additionalproperties": { diff --git a/bundle/schema/openapi.go b/bundle/schema/openapi.go index b0d67657..1a8b76ed 100644 --- a/bundle/schema/openapi.go +++ b/bundle/schema/openapi.go @@ -210,6 +210,19 @@ func (reader *OpenapiReader) modelsDocs() (*Docs, error) { return modelsDocs, nil } +func (reader *OpenapiReader) modelServingEndpointsDocs() (*Docs, error) { + modelServingEndpointsSpecSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "serving.CreateServingEndpoint") + if err != nil { + return nil, err + } + modelServingEndpointsDocs := schemaToDocs(modelServingEndpointsSpecSchema) + modelServingEndpointsAllDocs := &Docs{ + Description: "List of Model Serving Endpoints", + AdditionalProperties: modelServingEndpointsDocs, + } + return modelServingEndpointsAllDocs, nil +} + func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) { jobsDocs, err := reader.jobsDocs() if err != nil { @@ -227,14 +240,19 @@ func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) { if err != nil { return nil, err } + modelServingEndpointsDocs, err := reader.modelServingEndpointsDocs() + if err != nil { + return nil, err + } return &Docs{ Description: "Collection of Databricks resources to deploy.", Properties: map[string]*Docs{ - "jobs": jobsDocs, - "pipelines": pipelinesDocs, - "experiments": experimentsDocs, - "models": modelsDocs, + "jobs": jobsDocs, + "pipelines": pipelinesDocs, + "experiments": experimentsDocs, + "models": modelsDocs, + "model_serving_endpoints": modelServingEndpointsDocs, }, }, nil } diff --git a/bundle/tests/model_serving_endpoint/databricks.yml b/bundle/tests/model_serving_endpoint/databricks.yml new file mode 100644 index 00000000..e4fb54a1 --- /dev/null +++ b/bundle/tests/model_serving_endpoint/databricks.yml @@ -0,0 +1,38 @@ +resources: + model_serving_endpoints: + my_model_serving_endpoint: + name: "my-endpoint" + config: + served_models: + - model_name: "model-name" + model_version: "1" + workload_size: "Small" + scale_to_zero_enabled: true + traffic_config: + routes: + - served_model_name: "model-name-1" + traffic_percentage: 100 + permissions: + - level: CAN_QUERY + group_name: users + +targets: + development: + mode: development + resources: + model_serving_endpoints: + my_model_serving_endpoint: + name: "my-dev-endpoint" + + staging: + resources: + model_serving_endpoints: + my_model_serving_endpoint: + name: "my-staging-endpoint" + + production: + mode: production + resources: + model_serving_endpoints: + my_model_serving_endpoint: + name: "my-prod-endpoint" diff --git a/bundle/tests/model_serving_endpoint_test.go b/bundle/tests/model_serving_endpoint_test.go new file mode 100644 index 00000000..bfa1a31b --- /dev/null +++ b/bundle/tests/model_serving_endpoint_test.go @@ -0,0 +1,48 @@ +package config_tests + +import ( + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/stretchr/testify/assert" +) + +func assertExpected(t *testing.T, p *resources.ModelServingEndpoint) { + assert.Equal(t, "model_serving_endpoint/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) + assert.Equal(t, "model-name", p.Config.ServedModels[0].ModelName) + assert.Equal(t, "1", p.Config.ServedModels[0].ModelVersion) + assert.Equal(t, "model-name-1", p.Config.TrafficConfig.Routes[0].ServedModelName) + assert.Equal(t, 100, p.Config.TrafficConfig.Routes[0].TrafficPercentage) + assert.Equal(t, "users", p.Permissions[0].GroupName) + assert.Equal(t, "CAN_QUERY", p.Permissions[0].Level) +} + +func TestModelServingEndpointDevelopment(t *testing.T) { + b := loadTarget(t, "./model_serving_endpoint", "development") + assert.Len(t, b.Config.Resources.ModelServingEndpoints, 1) + assert.Equal(t, b.Config.Bundle.Mode, config.Development) + + p := b.Config.Resources.ModelServingEndpoints["my_model_serving_endpoint"] + assert.Equal(t, "my-dev-endpoint", p.Name) + assertExpected(t, p) +} + +func TestModelServingEndpointStaging(t *testing.T) { + b := loadTarget(t, "./model_serving_endpoint", "staging") + assert.Len(t, b.Config.Resources.ModelServingEndpoints, 1) + + p := b.Config.Resources.ModelServingEndpoints["my_model_serving_endpoint"] + assert.Equal(t, "my-staging-endpoint", p.Name) + assertExpected(t, p) +} + +func TestModelServingEndpointProduction(t *testing.T) { + b := loadTarget(t, "./model_serving_endpoint", "production") + assert.Len(t, b.Config.Resources.ModelServingEndpoints, 1) + + p := b.Config.Resources.ModelServingEndpoints["my_model_serving_endpoint"] + assert.Equal(t, "my-prod-endpoint", p.Name) + assertExpected(t, p) +} From 17d9f7dd2a340e485ac4c783f59e5909d9cbd76e Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 8 Sep 2023 11:19:55 +0200 Subject: [PATCH 074/310] Use unique bundle root path for Python E2E test (#748) ## Changes It helps to make sure jobs in the tests are deployed and executed uniquely and isolated ``` Bundle remote directory is /Users/61b77d30-bc10-4214-9650-29cf5db0e941/.bundle/4b630810-5edc-4d8f-85d1-0eb5baf7bb28 Deleted snapshot file at /var/folders/nt/xjv68qzs45319w4k36dhpylc0000gp/T/TestAccPythonWheelTaskDeployAndRun3933198431/001/.databricks/bundle/default/sync-snapshots/dd9db100465e3d91.json Successfully deleted files! --- PASS: TestAccPythonWheelTaskDeployAndRun (346.28s) PASS coverage: 93.5% of statements in ./... ok github.com/databricks/cli/internal/bundle 346.976s coverage: 93.5% of statements in ./... ``` --- .../bundles/python_wheel_task/template/databricks.yml.tmpl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl b/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl index a3201e03..e715cdf1 100644 --- a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl @@ -1,6 +1,9 @@ bundle: name: wheel-task +workspace: + root_path: "~/.bundle/{{.unique_id}}" + resources: jobs: some_other_job: From e08f419ef68faad79d1976181ff517c63fc8fe0a Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 8 Sep 2023 11:52:45 +0200 Subject: [PATCH 075/310] Do not include empty output in job run output (#749) ## Changes Do not include empty output in job run output ## Tests Running a job from CLI, the result: ``` andrew.nester@HFW9Y94129 wheel % databricks bundle run some_other_job --output json Run URL: https://***/?o=6051921418418893#job/780620378804085/run/386695528477456 2023-09-08 11:33:24 "[default] My Wheel Job" TERMINATED SUCCESS { "task_outputs": [ { "TaskKey": "TestTask", "Output": { "result": "Hello from my func\nGot arguments v2:\n['python']\n" }, "EndTime": 1694165597474 } ] ``` --- bundle/run/output/job.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/bundle/run/output/job.go b/bundle/run/output/job.go index 4bea4c7a..6199ac2f 100644 --- a/bundle/run/output/job.go +++ b/bundle/run/output/job.go @@ -60,7 +60,7 @@ func GetJobOutput(ctx context.Context, w *databricks.WorkspaceClient, runId int6 return nil, err } result := &JobOutput{ - TaskOutputs: make([]TaskOutput, len(jobRun.Tasks)), + TaskOutputs: make([]TaskOutput, 0), } for _, task := range jobRun.Tasks { jobRunOutput, err := w.Jobs.GetRunOutput(ctx, jobs.GetRunOutputRequest{ @@ -69,7 +69,11 @@ func GetJobOutput(ctx context.Context, w *databricks.WorkspaceClient, runId int6 if err != nil { return nil, err } - task := TaskOutput{TaskKey: task.TaskKey, Output: toRunOutput(jobRunOutput), EndTime: task.EndTime} + out := toRunOutput(jobRunOutput) + if out == nil { + continue + } + task := TaskOutput{TaskKey: task.TaskKey, Output: out, EndTime: task.EndTime} result.TaskOutputs = append(result.TaskOutputs, task) } return result, nil From e64463ba47c9b86874ad32b873103bdb003463f6 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 8 Sep 2023 11:53:57 +0200 Subject: [PATCH 076/310] Fixed marking libraries from DBFS as remote (#750) ## Changes Fixed marking libraries from DBFS as remote ## Tests Updated unit tests to catch the regression --- bundle/config/mutator/translate_paths_test.go | 4 ++-- bundle/libraries/libraries.go | 4 ++-- bundle/libraries/libraries_test.go | 1 + bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index e7ac5e8a..f7edee30 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -162,7 +162,7 @@ func TestTranslatePaths(t *testing.T) { MainClassName: "HelloWorldRemote", }, Libraries: []compute.Library{ - {Jar: "dbfs:///bundle/dist/task_remote.jar"}, + {Jar: "dbfs:/bundle/dist/task_remote.jar"}, }, }, }, @@ -243,7 +243,7 @@ func TestTranslatePaths(t *testing.T) { ) assert.Equal( t, - "dbfs:///bundle/dist/task_remote.jar", + "dbfs:/bundle/dist/task_remote.jar", bundle.Config.Resources.Jobs["job"].Tasks[6].Libraries[0].Jar, ) diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index d26768f9..076180f4 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -165,8 +165,8 @@ func isRemoteStorageScheme(path string) bool { return false } - // If the path starts with scheme:// format, it's a correct remote storage scheme - return strings.HasPrefix(path, url.Scheme+"://") + // If the path starts with scheme:/ format, it's a correct remote storage scheme + return strings.HasPrefix(path, url.Scheme+":/") } diff --git a/bundle/libraries/libraries_test.go b/bundle/libraries/libraries_test.go index 050efe74..7ff1609a 100644 --- a/bundle/libraries/libraries_test.go +++ b/bundle/libraries/libraries_test.go @@ -16,6 +16,7 @@ var testCases map[string]bool = map[string]bool{ "file://path/to/package": true, "C:\\path\\to\\package": true, "dbfs://path/to/package": false, + "dbfs:/path/to/package": false, "s3://path/to/package": false, "abfss://path/to/package": false, } diff --git a/bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml b/bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml index 54577d65..07f4957b 100644 --- a/bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml +++ b/bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml @@ -12,4 +12,4 @@ resources: package_name: "my_test_code" entry_point: "run" libraries: - - whl: dbfs://path/to/dist/mywheel.whl + - whl: dbfs:/path/to/dist/mywheel.whl From f7566b82648ab317a7ad6e875eb5eac40f09fcd6 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 8 Sep 2023 12:47:17 +0200 Subject: [PATCH 077/310] Close local Terraform state file when pushing to remote (#752) ## Changes Close local Terraform state file when pushing to remote Should help fix E2E test cleanup ``` testing.go:1225: TempDir RemoveAll cleanup: remove C:\Users\RUNNER~1\AppData\Local\Temp\TestAccPythonWheelTaskDeployAndRun1395546390\001\.databricks\bundle\default\terraform\terraform.tfstate: The process cannot access the file because it is being used by another process. ``` --- bundle/deploy/terraform/state_push.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bundle/deploy/terraform/state_push.go b/bundle/deploy/terraform/state_push.go index 0b4c5dbf..0cd69e52 100644 --- a/bundle/deploy/terraform/state_push.go +++ b/bundle/deploy/terraform/state_push.go @@ -32,6 +32,7 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { if err != nil { return err } + defer local.Close() // Upload state file from local cache directory to filer. log.Infof(ctx, "Writing local state file to remote state directory") From 67af171a68209b49c9d20e2e9cbdcf22500eebc3 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 8 Sep 2023 13:08:21 +0200 Subject: [PATCH 078/310] Process only Python wheel tasks which have local libraries used (#751) ## Changes Process only Python wheel tasks which have local libraries used ## Tests Updated uni test to catch the regression --- bundle/artifacts/whl/autodetect.go | 4 ++-- bundle/artifacts/whl/from_libraries.go | 2 +- bundle/libraries/libraries.go | 14 ++++++++++++-- bundle/python/transform.go | 5 +++-- bundle/python/transform_test.go | 11 +++++++++++ 5 files changed, 29 insertions(+), 7 deletions(-) diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index 41d80bb7..29031e86 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -27,9 +27,9 @@ func (m *detectPkg) Name() string { } func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error { - wheelTasks := libraries.FindAllWheelTasks(b) + wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b) if len(wheelTasks) == 0 { - log.Infof(ctx, "No wheel tasks in databricks.yml config, skipping auto detect") + log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect") return nil } cmdio.LogString(ctx, "artifacts.whl.AutoDetect: Detecting Python wheel project...") diff --git a/bundle/artifacts/whl/from_libraries.go b/bundle/artifacts/whl/from_libraries.go index 855e5b94..9d35f631 100644 --- a/bundle/artifacts/whl/from_libraries.go +++ b/bundle/artifacts/whl/from_libraries.go @@ -26,7 +26,7 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error { return nil } - tasks := libraries.FindAllWheelTasks(b) + tasks := libraries.FindAllWheelTasksWithLocalLibraries(b) for _, task := range tasks { for _, lib := range task.Libraries { matches, err := filepath.Glob(filepath.Join(b.Config.Path, lib.Whl)) diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 076180f4..d9a257db 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -56,11 +56,11 @@ func findAllTasks(b *bundle.Bundle) []*jobs.Task { return result } -func FindAllWheelTasks(b *bundle.Bundle) []*jobs.Task { +func FindAllWheelTasksWithLocalLibraries(b *bundle.Bundle) []*jobs.Task { tasks := findAllTasks(b) wheelTasks := make([]*jobs.Task, 0) for _, task := range tasks { - if task.PythonWheelTask != nil { + if task.PythonWheelTask != nil && IsTaskWithLocalLibraries(task) { wheelTasks = append(wheelTasks, task) } } @@ -68,6 +68,16 @@ func FindAllWheelTasks(b *bundle.Bundle) []*jobs.Task { return wheelTasks } +func IsTaskWithLocalLibraries(task *jobs.Task) bool { + for _, l := range task.Libraries { + if isLocalLibrary(&l) { + return true + } + } + + return false +} + func isMissingRequiredLibraries(task *jobs.Task) bool { if task.Libraries != nil { return false diff --git a/bundle/python/transform.go b/bundle/python/transform.go index 53db450b..3d744df9 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/libraries" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -72,8 +73,8 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey { for i := range tasks { task := &tasks[i] - // Keep only Python wheel tasks - if task.PythonWheelTask == nil { + // Keep only Python wheel tasks with local libraries referenced + if task.PythonWheelTask == nil || !libraries.IsTaskWithLocalLibraries(task) { continue } diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index a9f57db8..99d3129d 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) @@ -82,11 +83,21 @@ func TestTransformFiltersWheelTasksOnly(t *testing.T) { { TaskKey: "key1", PythonWheelTask: &jobs.PythonWheelTask{}, + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, }, { TaskKey: "key2", NotebookTask: &jobs.NotebookTask{}, }, + { + TaskKey: "key3", + PythonWheelTask: &jobs.PythonWheelTask{}, + Libraries: []compute.Library{ + {Whl: "dbfs:/FileStore/dist/test.whl"}, + }, + }, }, }, }, From 368321d07ddb2357361f2a94e0ba7e25bc508d99 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 8 Sep 2023 13:24:51 +0200 Subject: [PATCH 079/310] Close python wheel directory file descriptor after read (#753) ## Changes Close python wheel directory file descriptor after read --- python/utils.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/utils.go b/python/utils.go index a8408fae..47d5462d 100644 --- a/python/utils.go +++ b/python/utils.go @@ -30,6 +30,8 @@ func FindFilesWithSuffixInPath(dir, suffix string) []string { log.Debugf(context.Background(), "open dir %s: %s", dir, err) return nil } + defer f.Close() + entries, err := f.ReadDir(0) if err != nil { log.Debugf(context.Background(), "read dir %s: %s", dir, err) From 7c96270db8c95a54e8d59893c15e370dff1f2f76 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 8 Sep 2023 14:07:22 +0200 Subject: [PATCH 080/310] Add enum support for bundle templates (#668) ## Changes This PR includes: 1. Adding enum field to the json schema struct 2. Adding prompting logic for enum values. See demo for how it looks 3. Validation rules, validating the default value and config values when an enum list is specified This will now enable template authors to use enums for input parameters. ## Tests Manually and new unit tests --- libs/cmdio/logger.go | 31 ++++++++++ libs/cmdio/logger_test.go | 9 +++ libs/jsonschema/instance.go | 34 +++++++++-- libs/jsonschema/instance_test.go | 26 ++++++++ libs/jsonschema/schema.go | 34 +++++++++++ libs/jsonschema/schema_test.go | 60 +++++++++++++++++++ .../instance-validate/test-schema-enum.json | 12 ++++ .../schema-load-int/schema-invalid-enum.json | 10 ++++ .../schema-load-int/schema-valid.json | 3 +- libs/jsonschema/utils.go | 12 ++++ libs/jsonschema/utils_test.go | 10 ++++ libs/template/config.go | 20 ++++++- libs/template/config_test.go | 27 +++++++++ 13 files changed, 278 insertions(+), 10 deletions(-) create mode 100644 libs/jsonschema/testdata/instance-validate/test-schema-enum.json create mode 100644 libs/jsonschema/testdata/schema-load-int/schema-invalid-enum.json diff --git a/libs/cmdio/logger.go b/libs/cmdio/logger.go index 0663306e..7d760b99 100644 --- a/libs/cmdio/logger.go +++ b/libs/cmdio/logger.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/databricks/cli/libs/flags" + "github.com/manifoldco/promptui" ) // This is the interface for all io interactions with a user @@ -104,6 +105,36 @@ func AskYesOrNo(ctx context.Context, question string) (bool, error) { return false, nil } +func AskSelect(ctx context.Context, question string, choices []string) (string, error) { + logger, ok := FromContext(ctx) + if !ok { + logger = Default() + } + return logger.AskSelect(question, choices) +} + +func (l *Logger) AskSelect(question string, choices []string) (string, error) { + if l.Mode == flags.ModeJson { + return "", fmt.Errorf("question prompts are not supported in json mode") + } + + prompt := promptui.Select{ + Label: question, + Items: choices, + HideHelp: true, + Templates: &promptui.SelectTemplates{ + Label: "{{.}}: ", + Selected: fmt.Sprintf("%s: {{.}}", question), + }, + } + + _, ans, err := prompt.Run() + if err != nil { + return "", err + } + return ans, nil +} + func (l *Logger) Ask(question string, defaultVal string) (string, error) { if l.Mode == flags.ModeJson { return "", fmt.Errorf("question prompts are not supported in json mode") diff --git a/libs/cmdio/logger_test.go b/libs/cmdio/logger_test.go index da619046..c5c00d02 100644 --- a/libs/cmdio/logger_test.go +++ b/libs/cmdio/logger_test.go @@ -1,6 +1,7 @@ package cmdio import ( + "context" "testing" "github.com/databricks/cli/libs/flags" @@ -12,3 +13,11 @@ func TestAskFailedInJsonMode(t *testing.T) { _, err := l.Ask("What is your spirit animal?", "") assert.ErrorContains(t, err, "question prompts are not supported in json mode") } + +func TestAskChoiceFailsInJsonMode(t *testing.T) { + l := NewLogger(flags.ModeJson) + ctx := NewContext(context.Background(), l) + + _, err := AskSelect(ctx, "what is a question?", []string{"b", "c", "a"}) + assert.EqualError(t, err, "question prompts are not supported in json mode") +} diff --git a/libs/jsonschema/instance.go b/libs/jsonschema/instance.go index 02ab9f28..229a45b5 100644 --- a/libs/jsonschema/instance.go +++ b/libs/jsonschema/instance.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "slices" ) // Load a JSON document and validate it against the JSON schema. Instance here @@ -39,13 +40,18 @@ func (s *Schema) LoadInstance(path string) (map[string]any, error) { } func (s *Schema) ValidateInstance(instance map[string]any) error { - if err := s.validateAdditionalProperties(instance); err != nil { - return err + for _, fn := range []func(map[string]any) error{ + s.validateAdditionalProperties, + s.validateEnum, + s.validateRequired, + s.validateTypes, + } { + err := fn(instance) + if err != nil { + return err + } } - if err := s.validateRequired(instance); err != nil { - return err - } - return s.validateTypes(instance) + return nil } // If additional properties is set to false, this function validates instance only @@ -89,3 +95,19 @@ func (s *Schema) validateTypes(instance map[string]any) error { } return nil } + +func (s *Schema) validateEnum(instance map[string]any) error { + for k, v := range instance { + fieldInfo, ok := s.Properties[k] + if !ok { + continue + } + if fieldInfo.Enum == nil { + continue + } + if !slices.Contains(fieldInfo.Enum, v) { + return fmt.Errorf("expected value of property %s to be one of %v. Found: %v", k, fieldInfo.Enum, v) + } + } + return nil +} diff --git a/libs/jsonschema/instance_test.go b/libs/jsonschema/instance_test.go index d5e0766d..ffd10ca4 100644 --- a/libs/jsonschema/instance_test.go +++ b/libs/jsonschema/instance_test.go @@ -127,3 +127,29 @@ func TestLoadInstance(t *testing.T) { _, err = schema.LoadInstance("./testdata/instance-load/invalid-type-instance.json") assert.EqualError(t, err, "incorrect type for property string_val: expected type string, but value is 123") } + +func TestValidateInstanceEnum(t *testing.T) { + schema, err := Load("./testdata/instance-validate/test-schema-enum.json") + require.NoError(t, err) + + validInstance := map[string]any{ + "foo": "b", + "bar": int64(6), + } + assert.NoError(t, schema.validateEnum(validInstance)) + assert.NoError(t, schema.ValidateInstance(validInstance)) + + invalidStringInstance := map[string]any{ + "foo": "d", + "bar": int64(2), + } + assert.EqualError(t, schema.validateEnum(invalidStringInstance), "expected value of property foo to be one of [a b c]. Found: d") + assert.EqualError(t, schema.ValidateInstance(invalidStringInstance), "expected value of property foo to be one of [a b c]. Found: d") + + invalidIntInstance := map[string]any{ + "foo": "a", + "bar": int64(1), + } + assert.EqualError(t, schema.validateEnum(invalidIntInstance), "expected value of property bar to be one of [2 4 6]. Found: 1") + assert.EqualError(t, schema.ValidateInstance(invalidIntInstance), "expected value of property bar to be one of [2 4 6]. Found: 1") +} diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index 44c65ecc..108102a6 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "slices" ) // defines schema for a json object @@ -41,6 +42,9 @@ type Schema struct { // Default value for the property / object Default any `json:"default,omitempty"` + // List of valid values for a JSON instance for this schema. + Enum []any `json:"enum,omitempty"` + // Extension embeds our custom JSON schema extensions. Extension } @@ -84,6 +88,30 @@ func (schema *Schema) validate() error { } } + // Validate enum field values for properties are consistent with types. + for name, property := range schema.Properties { + if property.Enum == nil { + continue + } + for i, enum := range property.Enum { + err := validateType(enum, property.Type) + if err != nil { + return fmt.Errorf("type validation for enum at index %v failed for property %s: %w", i, name, err) + } + } + } + + // Validate default value is contained in the list of enums if both are defined. + for name, property := range schema.Properties { + if property.Default == nil || property.Enum == nil { + continue + } + // We expect the default value to be consistent with the list of enum + // values. + if !slices.Contains(property.Enum, property.Default) { + return fmt.Errorf("list of enum values for property %s does not contain default value %v: %v", name, property.Default, property.Enum) + } + } return nil } @@ -115,6 +143,12 @@ func Load(path string) (*Schema, error) { return nil, fmt.Errorf("failed to parse default value for property %s: %w", name, err) } } + for i, enum := range property.Enum { + property.Enum[i], err = toInteger(enum) + if err != nil { + return nil, fmt.Errorf("failed to parse enum value %v at index %v for property %s: %w", enum, i, name, err) + } + } } return schema, schema.validate() diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go index 5b92d846..db559ea8 100644 --- a/libs/jsonschema/schema_test.go +++ b/libs/jsonschema/schema_test.go @@ -47,6 +47,7 @@ func TestSchemaLoadIntegers(t *testing.T) { schema, err := Load("./testdata/schema-load-int/schema-valid.json") assert.NoError(t, err) assert.Equal(t, int64(1), schema.Properties["abc"].Default) + assert.Equal(t, []any{int64(1), int64(2), int64(3)}, schema.Properties["abc"].Enum) } func TestSchemaLoadIntegersWithInvalidDefault(t *testing.T) { @@ -54,6 +55,11 @@ func TestSchemaLoadIntegersWithInvalidDefault(t *testing.T) { assert.EqualError(t, err, "failed to parse default value for property abc: expected integer value, got: 1.1") } +func TestSchemaLoadIntegersWithInvalidEnums(t *testing.T) { + _, err := Load("./testdata/schema-load-int/schema-invalid-enum.json") + assert.EqualError(t, err, "failed to parse enum value 2.4 at index 1 for property abc: expected integer value, got: 2.4") +} + func TestSchemaValidateDefaultType(t *testing.T) { invalidSchema := &Schema{ Properties: map[string]*Schema{ @@ -79,3 +85,57 @@ func TestSchemaValidateDefaultType(t *testing.T) { err = validSchema.validate() assert.NoError(t, err) } + +func TestSchemaValidateEnumType(t *testing.T) { + invalidSchema := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "boolean", + Enum: []any{true, "false"}, + }, + }, + } + + err := invalidSchema.validate() + assert.EqualError(t, err, "type validation for enum at index 1 failed for property foo: expected type boolean, but value is \"false\"") + + validSchema := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "boolean", + Enum: []any{true, false}, + }, + }, + } + + err = validSchema.validate() + assert.NoError(t, err) +} + +func TestSchemaValidateErrorWhenDefaultValueIsNotInEnums(t *testing.T) { + invalidSchema := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Default: "abc", + Enum: []any{"def", "ghi"}, + }, + }, + } + + err := invalidSchema.validate() + assert.EqualError(t, err, "list of enum values for property foo does not contain default value abc: [def ghi]") + + validSchema := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Default: "abc", + Enum: []any{"def", "ghi", "abc"}, + }, + }, + } + + err = validSchema.validate() + assert.NoError(t, err) +} diff --git a/libs/jsonschema/testdata/instance-validate/test-schema-enum.json b/libs/jsonschema/testdata/instance-validate/test-schema-enum.json new file mode 100644 index 00000000..75ffd6eb --- /dev/null +++ b/libs/jsonschema/testdata/instance-validate/test-schema-enum.json @@ -0,0 +1,12 @@ +{ + "properties": { + "foo": { + "type": "string", + "enum": ["a", "b", "c"] + }, + "bar": { + "type": "integer", + "enum": [2,4,6] + } + } +} diff --git a/libs/jsonschema/testdata/schema-load-int/schema-invalid-enum.json b/libs/jsonschema/testdata/schema-load-int/schema-invalid-enum.json new file mode 100644 index 00000000..5bd2b3f2 --- /dev/null +++ b/libs/jsonschema/testdata/schema-load-int/schema-invalid-enum.json @@ -0,0 +1,10 @@ +{ + "type": "object", + "properties": { + "abc": { + "type": "integer", + "default": 1, + "enum": [1,2.4,3] + } + } +} diff --git a/libs/jsonschema/testdata/schema-load-int/schema-valid.json b/libs/jsonschema/testdata/schema-load-int/schema-valid.json index 599ac04d..a1167a6c 100644 --- a/libs/jsonschema/testdata/schema-load-int/schema-valid.json +++ b/libs/jsonschema/testdata/schema-load-int/schema-valid.json @@ -3,7 +3,8 @@ "properties": { "abc": { "type": "integer", - "default": 1 + "default": 1, + "enum": [1,2,3] } } } diff --git a/libs/jsonschema/utils.go b/libs/jsonschema/utils.go index 21866965..66db9603 100644 --- a/libs/jsonschema/utils.go +++ b/libs/jsonschema/utils.go @@ -71,6 +71,18 @@ func ToString(v any, T Type) (string, error) { } } +func ToStringSlice(arr []any, T Type) ([]string, error) { + res := []string{} + for _, v := range arr { + s, err := ToString(v, T) + if err != nil { + return nil, err + } + res = append(res, s) + } + return res, nil +} + func FromString(s string, T Type) (any, error) { if T == StringType { return s, nil diff --git a/libs/jsonschema/utils_test.go b/libs/jsonschema/utils_test.go index 9686cf39..29529aaa 100644 --- a/libs/jsonschema/utils_test.go +++ b/libs/jsonschema/utils_test.go @@ -118,3 +118,13 @@ func TestTemplateFromString(t *testing.T) { _, err = FromString("1.0", "foobar") assert.EqualError(t, err, "unknown json schema type: \"foobar\"") } + +func TestTemplateToStringSlice(t *testing.T) { + s, err := ToStringSlice([]any{"a", "b", "c"}, StringType) + assert.NoError(t, err) + assert.Equal(t, []string{"a", "b", "c"}, s) + + s, err = ToStringSlice([]any{1.1, 2.2, 3.3}, NumberType) + assert.NoError(t, err) + assert.Equal(t, []string{"1.1", "2.2", "3.3"}, s) +} diff --git a/libs/template/config.go b/libs/template/config.go index 6f980f61..21618ac9 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -102,9 +102,23 @@ func (c *config) promptForValues() error { } // Get user input by running the prompt - userInput, err := cmdio.Ask(c.ctx, property.Description, defaultVal) - if err != nil { - return err + var userInput string + if property.Enum != nil { + // convert list of enums to string slice + enums, err := jsonschema.ToStringSlice(property.Enum, property.Type) + if err != nil { + return err + } + userInput, err = cmdio.AskSelect(c.ctx, property.Description, enums) + if err != nil { + return err + } + } else { + userInput, err = cmdio.Ask(c.ctx, property.Description, defaultVal) + if err != nil { + return err + } + } // Convert user input string back to a value diff --git a/libs/template/config_test.go b/libs/template/config_test.go index bba22c75..1b1fc338 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -142,3 +142,30 @@ func TestTemplateValidateSchema(t *testing.T) { err = validateSchema(toSchema("array")) assert.EqualError(t, err, "property type array is not supported by bundle templates") } + +func TestTemplateEnumValidation(t *testing.T) { + schema := jsonschema.Schema{ + Properties: map[string]*jsonschema.Schema{ + "abc": { + Type: "integer", + Enum: []any{1, 2, 3, 4}, + }, + }, + } + + c := &config{ + schema: &schema, + values: map[string]any{ + "abc": 5, + }, + } + assert.EqualError(t, c.validate(), "validation for template input parameters failed. expected value of property abc to be one of [1 2 3 4]. Found: 5") + + c = &config{ + schema: &schema, + values: map[string]any{ + "abc": 4, + }, + } + assert.NoError(t, c.validate()) +} From 18a5b05d82ce589c4bfb26ea50c8b99d52fe3f72 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 8 Sep 2023 15:45:21 +0200 Subject: [PATCH 081/310] Apply Python wheel trampoline if workspace library is used (#755) ## Changes Workspace library will be detected by trampoline in 2 cases: - User defined to use local wheel file - User defined to use remote wheel file from Workspace file system In both of these cases we should correctly apply Python trampoline ## Tests Added a regression test (also covered by Python e2e test) --- bundle/artifacts/artifacts_test.go | 2 ++ bundle/config/artifact.go | 8 ++++++-- bundle/libraries/libraries.go | 11 +++++++++++ bundle/python/transform.go | 11 +++++++++-- bundle/python/transform_test.go | 2 +- 5 files changed, 29 insertions(+), 5 deletions(-) diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go index 4c0a18f3..bbae44ef 100644 --- a/bundle/artifacts/artifacts_test.go +++ b/bundle/artifacts/artifacts_test.go @@ -105,6 +105,7 @@ func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) { b.WorkspaceClient().Workspace.WithImpl(MockWorkspaceService{}) artifact := &config.Artifact{ + Type: "whl", Files: []config.ArtifactFile{ { Source: whlPath, @@ -118,4 +119,5 @@ func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) { err := uploadArtifact(context.Background(), artifact, b) require.NoError(t, err) require.Regexp(t, regexp.MustCompile("/Users/test@databricks.com/whatever/.internal/[a-z0-9]+/test.whl"), artifact.Files[0].RemotePath) + require.Regexp(t, regexp.MustCompile("/Workspace/Users/test@databricks.com/whatever/.internal/[a-z0-9]+/test.whl"), artifact.Files[0].Libraries[0].Whl) } diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index 1955e265..d7048a02 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -78,9 +78,13 @@ func (a *Artifact) NormalisePaths() { remotePath := path.Join(wsfsBase, f.RemotePath) for i := range f.Libraries { lib := f.Libraries[i] - switch a.Type { - case ArtifactPythonWheel: + if lib.Whl != "" { lib.Whl = remotePath + continue + } + if lib.Jar != "" { + lib.Jar = remotePath + continue } } diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index d9a257db..8e2e504c 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -78,6 +78,17 @@ func IsTaskWithLocalLibraries(task *jobs.Task) bool { return false } +func IsTaskWithWorkspaceLibraries(task *jobs.Task) bool { + for _, l := range task.Libraries { + path := libPath(&l) + if isWorkspacePath(path) { + return true + } + } + + return false +} + func isMissingRequiredLibraries(task *jobs.Task) bool { if task.Libraries != nil { return false diff --git a/bundle/python/transform.go b/bundle/python/transform.go index 3d744df9..d8eb33f5 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -73,8 +73,11 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey { for i := range tasks { task := &tasks[i] - // Keep only Python wheel tasks with local libraries referenced - if task.PythonWheelTask == nil || !libraries.IsTaskWithLocalLibraries(task) { + // Keep only Python wheel tasks with workspace libraries referenced. + // At this point of moment we don't have local paths in Libraries sections anymore + // Local paths have been replaced with the remote when the artifacts where uploaded + // in artifacts.UploadAll mutator. + if task.PythonWheelTask == nil || !needsTrampoline(task) { continue } @@ -87,6 +90,10 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey { return result } +func needsTrampoline(task *jobs.Task) bool { + return libraries.IsTaskWithWorkspaceLibraries(task) +} + func (t *pythonTrampoline) GetTemplateData(task *jobs.Task) (map[string]any, error) { params, err := t.generateParameters(task.PythonWheelTask) if err != nil { diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index 99d3129d..a7448f23 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -84,7 +84,7 @@ func TestTransformFiltersWheelTasksOnly(t *testing.T) { TaskKey: "key1", PythonWheelTask: &jobs.PythonWheelTask{}, Libraries: []compute.Library{ - {Whl: "./dist/test.whl"}, + {Whl: "/Workspace/Users/test@test.com/bundle/dist/test.whl"}, }, }, { From b5d033d1542b3e8f235457b8e1ed8ddb2ab54555 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 8 Sep 2023 17:37:55 +0200 Subject: [PATCH 082/310] List available targets when incorrect target passed (#756) ## Changes List available targets when incorrect target passed ## Tests ``` andrew.nester@HFW9Y94129 wheel % databricks bundle validate -t incorrect Error: incorrect: no such target. Available targets: prod, development ``` --- bundle/config/mutator/select_target.go | 4 +++- bundle/tests/suggest_target_test.go | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 bundle/tests/suggest_target_test.go diff --git a/bundle/config/mutator/select_target.go b/bundle/config/mutator/select_target.go index 3be1f2e1..2ad43112 100644 --- a/bundle/config/mutator/select_target.go +++ b/bundle/config/mutator/select_target.go @@ -3,8 +3,10 @@ package mutator import ( "context" "fmt" + "strings" "github.com/databricks/cli/bundle" + "golang.org/x/exp/maps" ) type selectTarget struct { @@ -30,7 +32,7 @@ func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) error { // Get specified target target, ok := b.Config.Targets[m.name] if !ok { - return fmt.Errorf("%s: no such target", m.name) + return fmt.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", ")) } // Merge specified target into root configuration structure. diff --git a/bundle/tests/suggest_target_test.go b/bundle/tests/suggest_target_test.go new file mode 100644 index 00000000..924d6a4e --- /dev/null +++ b/bundle/tests/suggest_target_test.go @@ -0,0 +1,17 @@ +package config_tests + +import ( + "path/filepath" + "testing" + + "github.com/databricks/cli/internal" + "github.com/stretchr/testify/require" +) + +func TestSuggestTargetIfWrongPassed(t *testing.T) { + t.Setenv("BUNDLE_ROOT", filepath.Join("target_overrides", "workspace")) + _, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") + require.ErrorContains(t, err, "Available targets:") + require.ErrorContains(t, err, "development") + require.ErrorContains(t, err, "staging") +} From d9a276b17de3b0771f231eab6de1c55be9d7725b Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Sat, 9 Sep 2023 23:55:43 +0200 Subject: [PATCH 083/310] Fix minor typos in default-python template (#754) Co-authored-by: Pieter Noordhuis --- .../template/{{.project_name}}/databricks.yml.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl index 48aef0ea..7fbf4da4 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl @@ -7,10 +7,10 @@ include: - resources/*.yml targets: - # The 'dev' target, used development purposes. + # The 'dev' target, used for development purposes. # Whenever a developer deploys using 'dev', they get their own copy. dev: - # We use 'mode: development' to make everything deployed to this target gets a prefix + # We use 'mode: development' to make sure everything deployed to this target gets a prefix # like '[dev my_user_name]'. Setting this mode also disables any schedules and # automatic triggers for jobs and enables the 'development' mode for Delta Live Tables pipelines. mode: development From 9e56bed5935df915d5f8cb6c2b673a3ed7c3d462 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 11 Sep 2023 09:36:44 +0200 Subject: [PATCH 084/310] Minor default template tweaks (#758) Minor template tweaks, mostly making the imports section for DLT notebooks a bit more elegant. Tested with DAB deployment + in-workspace UI. --- .../default-python/template/__preamble.tmpl | 2 +- .../template/{{.project_name}}/README.md.tmpl | 2 +- .../{{.project_name}}/fixtures/.gitkeep.tmpl | 2 +- .../{{.project_name}}_pipeline.yml.tmpl | 6 +++--- .../src/dlt_pipeline.ipynb.tmpl | 20 +++++-------------- .../{{.project_name}}/src/notebook.ipynb.tmpl | 2 +- 6 files changed, 12 insertions(+), 22 deletions(-) diff --git a/libs/template/templates/default-python/template/__preamble.tmpl b/libs/template/templates/default-python/template/__preamble.tmpl index 95c61333..a86d3bff 100644 --- a/libs/template/templates/default-python/template/__preamble.tmpl +++ b/libs/template/templates/default-python/template/__preamble.tmpl @@ -17,7 +17,7 @@ This file only template directives; it is skipped for the actual output. {{if $notPython}} {{skip "{{.project_name}}/src/{{.project_name}}"}} - {{skip "{{.project_name}}/tests/test_main.py"}} + {{skip "{{.project_name}}/tests/main_test.py"}} {{skip "{{.project_name}}/setup.py"}} {{skip "{{.project_name}}/pytest.ini"}} {{end}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl index 7c8876e7..1bcd7af4 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl @@ -20,7 +20,7 @@ The '{{.project_name}}' project was generated by using the default-python templa This deploys everything that's defined for this project. For example, the default template would deploy a job called - `[dev yourname] {{.project_name}}-job` to your workspace. + `[dev yourname] {{.project_name}}_job` to your workspace. You can find that job by opening your workpace and clicking on **Workflows**. 4. Similarly, to deploy a production copy, type: diff --git a/libs/template/templates/default-python/template/{{.project_name}}/fixtures/.gitkeep.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/fixtures/.gitkeep.tmpl index 361c681f..ee957030 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/fixtures/.gitkeep.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/fixtures/.gitkeep.tmpl @@ -17,7 +17,7 @@ def get_absolute_path(*relative_parts): if 'dbutils' in globals(): base_dir = os.path.dirname(dbutils.notebook.entry_point.getDbutils().notebook().getContext().notebookPath().get()) # type: ignore path = os.path.normpath(os.path.join(base_dir, *relative_parts)) - return path if path.startswith("/Workspace") else os.path.join("/Workspace", path) + return path if path.startswith("/Workspace") else "/Workspace" + path else: return os.path.join(*relative_parts) diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl index ffe400cb..4b8f74d1 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl @@ -2,11 +2,11 @@ resources: pipelines: {{.project_name}}_pipeline: - name: "{{.project_name}}_pipeline" - target: "{{.project_name}}_${bundle.environment}" + name: {{.project_name}}_pipeline + target: {{.project_name}}_${bundle.environment} libraries: - notebook: path: ../src/dlt_pipeline.ipynb configuration: - "bundle.sourcePath": "/Workspace/${workspace.file_path}/src" + bundle.sourcePath: /Workspace/${workspace.file_path}/src diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl index 74893238..4f50294f 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl @@ -14,7 +14,7 @@ "source": [ "# DLT pipeline\n", "\n", - "This Delta Live Tables (DLT) definition is executed using a pipeline defined in resources/{{.my_project}}_pipeline.yml." + "This Delta Live Tables (DLT) definition is executed using a pipeline defined in resources/{{.project_name}}_pipeline.yml." ] }, { @@ -27,28 +27,18 @@ "nuid": "9198e987-5606-403d-9f6d-8f14e6a4017f", "showTitle": false, "title": "" - }, - "jupyter": { - {{- /* Collapse this cell by default. Just boring imports here! */}} - "source_hidden": true } }, "outputs": [], "source": [ {{- if (eq .include_python "yes") }} - "# Import DLT and make sure 'my_project' is on the Python path\n", + "# Import DLT and src/{{.project_name}}\n", "import dlt\n", - "from pyspark.sql.functions import expr\n", - "from pyspark.sql import SparkSession\n", - "spark = SparkSession.builder.getOrCreate()\n", "import sys\n", - "try:\n", - " sys.path.append(spark.conf.get(\"bundle.sourcePath\"))\n", - "except:\n", - " pass\n", - "from my_project import main" + "sys.path.append(spark.conf.get(\"bundle.sourcePath\", \".\"))\n", + "from pyspark.sql.functions import expr\n", + "from {{.project_name}} import main" {{else}} - "# Import DLT\n", "import dlt\n", "from pyspark.sql.functions import expr\n", "from pyspark.sql import SparkSession\n", diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl index 8423ecf8..0ab61db2 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl @@ -14,7 +14,7 @@ "source": [ "# Default notebook\n", "\n", - "This default notebook is executed using Databricks Workflows as defined in resources/{{.my_project}}_job.yml." + "This default notebook is executed using Databricks Workflows as defined in resources/{{.project_name}}_job.yml." ] }, { From 9a51f72f0b86d7fc57c35392cbeba4c5ccb15650 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 11 Sep 2023 10:16:22 +0200 Subject: [PATCH 085/310] Make bundle and sync fields optional (#757) ## Changes This PR: 1. Makes the bundle and sync properties optional in the generated schema. 2. Fixes schema generation that was broken due to a rogue "description" field in the bundle docs. ## Tests Tested manually. The generated schema no longer has "bundle" and "sync" marked as required. --- bundle/config/root.go | 4 ++-- bundle/schema/docs/bundle_descriptions.json | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/bundle/config/root.go b/bundle/config/root.go index 99ea33ad..0377f60a 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -52,7 +52,7 @@ type Root struct { // Bundle contains details about this bundle, such as its name, // version of the spec (TODO), default cluster, default warehouse, etc. - Bundle Bundle `json:"bundle"` + Bundle Bundle `json:"bundle,omitempty"` // Include specifies a list of patterns of file names to load and // merge into the this configuration. Only includes defined in the root @@ -80,7 +80,7 @@ type Root struct { Environments map[string]*Target `json:"environments,omitempty"` // Sync section specifies options for files synchronization - Sync Sync `json:"sync"` + Sync Sync `json:"sync,omitempty"` // RunAs section allows to define an execution identity for jobs and pipelines runs RunAs *jobs.JobRunAs `json:"run_as,omitempty"` diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index ffdb5629..98f3cf8d 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -1472,7 +1472,6 @@ "config": { "description": "The model serving endpoint configuration.", "properties": { - "description": "", "properties": { "served_models": { "description": "Each block represents a served model for the endpoint to serve. A model serving endpoint can have up to 10 served models.", From 4ccc70aeaca336ceac0aebb924e428dac38eb84f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 11 Sep 2023 10:18:43 +0200 Subject: [PATCH 086/310] Consolidate environment variable interaction (#747) ## Changes There are a couple places throughout the code base where interaction with environment variables takes place. Moreover, more than one of these would try to read a value from more than one environment variable as fallback (for backwards compatibility). This change consolidates those accesses. The majority of diffs in this change are mechanical (i.e. add an argument or replace a call). This change: * Moves common environment variable lookups for bundles to `bundles/env`. * Adds a `libs/env` package that wraps `os.LookupEnv` and `os.Getenv` and allows for overrides to take place in a `context.Context`. By scoping overrides to a `context.Context` we can avoid `t.Setenv` in testing and unlock parallel test execution for integration tests. * Updates call sites to pass through a `context.Context` where needed. * For bundles, introduces `DATABRICKS_BUNDLE_ROOT` as new primary variable instead of `BUNDLE_ROOT`. This was the last environment variable that did not use the `DATABRICKS_` prefix. ## Tests Unit tests pass. --- bundle/bundle.go | 26 ++++---- bundle/bundle_test.go | 19 +++--- bundle/config/mutator/override_compute.go | 6 +- .../config/mutator/process_root_includes.go | 7 ++- .../mutator/process_root_includes_test.go | 16 +++-- bundle/config/mutator/set_variables.go | 8 +-- bundle/config/mutator/set_variables_test.go | 10 +-- bundle/config/mutator/trampoline.go | 6 +- bundle/config/mutator/trampoline_test.go | 2 +- bundle/deploy/files/sync.go | 4 +- bundle/deploy/terraform/dir.go | 6 +- bundle/deploy/terraform/init.go | 59 ++++++++--------- bundle/deploy/terraform/init_test.go | 22 +++---- bundle/deploy/terraform/plan.go | 2 +- bundle/deploy/terraform/state_pull.go | 2 +- bundle/deploy/terraform/state_push.go | 2 +- bundle/deploy/terraform/write.go | 2 +- bundle/env/env.go | 18 ++++++ bundle/env/env_test.go | 44 +++++++++++++ bundle/env/includes.go | 14 +++++ bundle/env/includes_test.go | 28 +++++++++ bundle/env/root.go | 16 +++++ bundle/env/root_test.go | 43 +++++++++++++ bundle/env/target.go | 17 +++++ bundle/env/target_test.go | 43 +++++++++++++ bundle/env/temp_dir.go | 13 ++++ bundle/env/temp_dir_test.go | 28 +++++++++ bundle/root.go | 20 +++--- bundle/root_test.go | 59 ++++++++++------- cmd/bundle/sync.go | 4 +- cmd/cmd.go | 5 +- cmd/configure/configure_test.go | 6 +- cmd/root/bundle.go | 16 ++--- cmd/root/io.go | 5 +- cmd/root/logger.go | 8 +-- cmd/root/progress_logger.go | 3 +- cmd/root/root.go | 6 +- cmd/root/user_agent_upstream.go | 6 +- cmd/sync/sync.go | 4 +- internal/helpers.go | 2 +- internal/testutil/env.go | 33 ++++++++++ libs/env/context.go | 63 +++++++++++++++++++ libs/env/context_test.go | 41 ++++++++++++ libs/env/pkg.go | 7 +++ main.go | 4 +- main_test.go | 3 +- 46 files changed, 594 insertions(+), 164 deletions(-) create mode 100644 bundle/env/env.go create mode 100644 bundle/env/env_test.go create mode 100644 bundle/env/includes.go create mode 100644 bundle/env/includes_test.go create mode 100644 bundle/env/root.go create mode 100644 bundle/env/root_test.go create mode 100644 bundle/env/target.go create mode 100644 bundle/env/target_test.go create mode 100644 bundle/env/temp_dir.go create mode 100644 bundle/env/temp_dir_test.go create mode 100644 internal/testutil/env.go create mode 100644 libs/env/context.go create mode 100644 libs/env/context_test.go create mode 100644 libs/env/pkg.go diff --git a/bundle/bundle.go b/bundle/bundle.go index 8175ce28..4fc60539 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -14,6 +14,7 @@ import ( "sync" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/folders" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" @@ -51,8 +52,6 @@ type Bundle struct { AutoApprove bool } -const ExtraIncludePathsKey string = "DATABRICKS_BUNDLE_INCLUDES" - func Load(ctx context.Context, path string) (*Bundle, error) { bundle := &Bundle{} stat, err := os.Stat(path) @@ -61,9 +60,9 @@ func Load(ctx context.Context, path string) (*Bundle, error) { } configFile, err := config.FileNames.FindInPath(path) if err != nil { - _, hasIncludePathEnv := os.LookupEnv(ExtraIncludePathsKey) - _, hasBundleRootEnv := os.LookupEnv(envBundleRoot) - if hasIncludePathEnv && hasBundleRootEnv && stat.IsDir() { + _, hasRootEnv := env.Root(ctx) + _, hasIncludesEnv := env.Includes(ctx) + if hasRootEnv && hasIncludesEnv && stat.IsDir() { log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path) bundle.Config = config.Root{ Path: path, @@ -86,7 +85,7 @@ func Load(ctx context.Context, path string) (*Bundle, error) { // MustLoad returns a bundle configuration. // It returns an error if a bundle was not found or could not be loaded. func MustLoad(ctx context.Context) (*Bundle, error) { - root, err := mustGetRoot() + root, err := mustGetRoot(ctx) if err != nil { return nil, err } @@ -98,7 +97,7 @@ func MustLoad(ctx context.Context) (*Bundle, error) { // It returns an error if a bundle was found but could not be loaded. // It returns a `nil` bundle if a bundle was not found. func TryLoad(ctx context.Context) (*Bundle, error) { - root, err := tryGetRoot() + root, err := tryGetRoot(ctx) if err != nil { return nil, err } @@ -124,13 +123,12 @@ func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient { // CacheDir returns directory to use for temporary files for this bundle. // Scoped to the bundle's target. -func (b *Bundle) CacheDir(paths ...string) (string, error) { +func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error) { if b.Config.Bundle.Target == "" { panic("target not set") } - cacheDirName, exists := os.LookupEnv("DATABRICKS_BUNDLE_TMP") - + cacheDirName, exists := env.TempDir(ctx) if !exists || cacheDirName == "" { cacheDirName = filepath.Join( // Anchor at bundle root directory. @@ -163,8 +161,8 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) { // This directory is used to store and automaticaly sync internal bundle files, such as, f.e // notebook trampoline files for Python wheel and etc. -func (b *Bundle) InternalDir() (string, error) { - cacheDir, err := b.CacheDir() +func (b *Bundle) InternalDir(ctx context.Context) (string, error) { + cacheDir, err := b.CacheDir(ctx) if err != nil { return "", err } @@ -181,8 +179,8 @@ func (b *Bundle) InternalDir() (string, error) { // GetSyncIncludePatterns returns a list of user defined includes // And also adds InternalDir folder to include list for sync command // so this folder is always synced -func (b *Bundle) GetSyncIncludePatterns() ([]string, error) { - internalDir, err := b.InternalDir() +func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { + internalDir, err := b.InternalDir(ctx) if err != nil { return nil, err } diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index 4a3e7f2c..43477efd 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/bundle/env" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -23,12 +24,13 @@ func TestLoadExists(t *testing.T) { } func TestBundleCacheDir(t *testing.T) { + ctx := context.Background() projectDir := t.TempDir() f1, err := os.Create(filepath.Join(projectDir, "databricks.yml")) require.NoError(t, err) f1.Close() - bundle, err := Load(context.Background(), projectDir) + bundle, err := Load(ctx, projectDir) require.NoError(t, err) // Artificially set target. @@ -38,7 +40,7 @@ func TestBundleCacheDir(t *testing.T) { // unset env variable in case it's set t.Setenv("DATABRICKS_BUNDLE_TMP", "") - cacheDir, err := bundle.CacheDir() + cacheDir, err := bundle.CacheDir(ctx) // format is /.databricks/bundle/ assert.NoError(t, err) @@ -46,13 +48,14 @@ func TestBundleCacheDir(t *testing.T) { } func TestBundleCacheDirOverride(t *testing.T) { + ctx := context.Background() projectDir := t.TempDir() bundleTmpDir := t.TempDir() f1, err := os.Create(filepath.Join(projectDir, "databricks.yml")) require.NoError(t, err) f1.Close() - bundle, err := Load(context.Background(), projectDir) + bundle, err := Load(ctx, projectDir) require.NoError(t, err) // Artificially set target. @@ -62,7 +65,7 @@ func TestBundleCacheDirOverride(t *testing.T) { // now we expect to use 'bundleTmpDir' instead of CWD/.databricks/bundle t.Setenv("DATABRICKS_BUNDLE_TMP", bundleTmpDir) - cacheDir, err := bundle.CacheDir() + cacheDir, err := bundle.CacheDir(ctx) // format is / assert.NoError(t, err) @@ -70,14 +73,14 @@ func TestBundleCacheDirOverride(t *testing.T) { } func TestBundleMustLoadSuccess(t *testing.T) { - t.Setenv(envBundleRoot, "./tests/basic") + t.Setenv(env.RootVariable, "./tests/basic") b, err := MustLoad(context.Background()) require.NoError(t, err) assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path)) } func TestBundleMustLoadFailureWithEnv(t *testing.T) { - t.Setenv(envBundleRoot, "./tests/doesntexist") + t.Setenv(env.RootVariable, "./tests/doesntexist") _, err := MustLoad(context.Background()) require.Error(t, err, "not a directory") } @@ -89,14 +92,14 @@ func TestBundleMustLoadFailureIfNotFound(t *testing.T) { } func TestBundleTryLoadSuccess(t *testing.T) { - t.Setenv(envBundleRoot, "./tests/basic") + t.Setenv(env.RootVariable, "./tests/basic") b, err := TryLoad(context.Background()) require.NoError(t, err) assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path)) } func TestBundleTryLoadFailureWithEnv(t *testing.T) { - t.Setenv(envBundleRoot, "./tests/doesntexist") + t.Setenv(env.RootVariable, "./tests/doesntexist") _, err := TryLoad(context.Background()) require.Error(t, err, "not a directory") } diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index ee2e2a82..21d95013 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -3,11 +3,11 @@ package mutator import ( "context" "fmt" - "os" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/env" ) type overrideCompute struct{} @@ -39,8 +39,8 @@ func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) error { } return nil } - if os.Getenv("DATABRICKS_CLUSTER_ID") != "" { - b.Config.Bundle.ComputeID = os.Getenv("DATABRICKS_CLUSTER_ID") + if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" { + b.Config.Bundle.ComputeID = v } if b.Config.Bundle.ComputeID == "" { diff --git a/bundle/config/mutator/process_root_includes.go b/bundle/config/mutator/process_root_includes.go index 98992872..5a5ab1b1 100644 --- a/bundle/config/mutator/process_root_includes.go +++ b/bundle/config/mutator/process_root_includes.go @@ -10,11 +10,12 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/env" ) // Get extra include paths from environment variable -func GetExtraIncludePaths() []string { - value, exists := os.LookupEnv(bundle.ExtraIncludePathsKey) +func getExtraIncludePaths(ctx context.Context) []string { + value, exists := env.Includes(ctx) if !exists { return nil } @@ -48,7 +49,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error var files []string // Converts extra include paths from environment variable to relative paths - for _, extraIncludePath := range GetExtraIncludePaths() { + for _, extraIncludePath := range getExtraIncludePaths(ctx) { if filepath.IsAbs(extraIncludePath) { rel, err := filepath.Rel(b.Config.Path, extraIncludePath) if err != nil { diff --git a/bundle/config/mutator/process_root_includes_test.go b/bundle/config/mutator/process_root_includes_test.go index 1ce094bc..aec9b32d 100644 --- a/bundle/config/mutator/process_root_includes_test.go +++ b/bundle/config/mutator/process_root_includes_test.go @@ -2,16 +2,17 @@ package mutator_test import ( "context" - "fmt" "os" "path" "path/filepath" "runtime" + "strings" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/env" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -129,10 +130,7 @@ func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { rootPath := t.TempDir() testYamlName := "extra_include_path.yml" touch(t, rootPath, testYamlName) - os.Setenv(bundle.ExtraIncludePathsKey, path.Join(rootPath, testYamlName)) - t.Cleanup(func() { - os.Unsetenv(bundle.ExtraIncludePathsKey) - }) + t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName)) bundle := &bundle.Bundle{ Config: config.Root{ @@ -149,7 +147,13 @@ func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) { rootPath := t.TempDir() testYamlName := "extra_include_path.yml" touch(t, rootPath, testYamlName) - t.Setenv(bundle.ExtraIncludePathsKey, fmt.Sprintf("%s%s%s", path.Join(rootPath, testYamlName), string(os.PathListSeparator), path.Join(rootPath, testYamlName))) + t.Setenv(env.IncludesVariable, strings.Join( + []string{ + path.Join(rootPath, testYamlName), + path.Join(rootPath, testYamlName), + }, + string(os.PathListSeparator), + )) bundle := &bundle.Bundle{ Config: config.Root{ diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index 427b6dce..4bf8ff82 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -3,10 +3,10 @@ package mutator import ( "context" "fmt" - "os" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/env" ) const bundleVarPrefix = "BUNDLE_VAR_" @@ -21,7 +21,7 @@ func (m *setVariables) Name() string { return "SetVariables" } -func setVariable(v *variable.Variable, name string) error { +func setVariable(ctx context.Context, v *variable.Variable, name string) error { // case: variable already has value initialized, so skip if v.HasValue() { return nil @@ -29,7 +29,7 @@ func setVariable(v *variable.Variable, name string) error { // case: read and set variable value from process environment envVarName := bundleVarPrefix + name - if val, ok := os.LookupEnv(envVarName); ok { + if val, ok := env.Lookup(ctx, envVarName); ok { err := v.Set(val) if err != nil { return fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %w`, val, name, envVarName, err) @@ -54,7 +54,7 @@ func setVariable(v *variable.Variable, name string) error { func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) error { for name, variable := range b.Config.Variables { - err := setVariable(variable, name) + err := setVariable(ctx, variable, name) if err != nil { return err } diff --git a/bundle/config/mutator/set_variables_test.go b/bundle/config/mutator/set_variables_test.go index 91948aa4..323f1e86 100644 --- a/bundle/config/mutator/set_variables_test.go +++ b/bundle/config/mutator/set_variables_test.go @@ -21,7 +21,7 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) { // set value for variable as an environment variable t.Setenv("BUNDLE_VAR_foo", "process-env") - err := setVariable(&variable, "foo") + err := setVariable(context.Background(), &variable, "foo") require.NoError(t, err) assert.Equal(t, *variable.Value, "process-env") } @@ -33,7 +33,7 @@ func TestSetVariableUsingDefaultValue(t *testing.T) { Default: &defaultVal, } - err := setVariable(&variable, "foo") + err := setVariable(context.Background(), &variable, "foo") require.NoError(t, err) assert.Equal(t, *variable.Value, "default") } @@ -49,7 +49,7 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { // since a value is already assigned to the variable, it would not be overridden // by the default value - err := setVariable(&variable, "foo") + err := setVariable(context.Background(), &variable, "foo") require.NoError(t, err) assert.Equal(t, *variable.Value, "assigned-value") } @@ -68,7 +68,7 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { // since a value is already assigned to the variable, it would not be overridden // by the value from environment - err := setVariable(&variable, "foo") + err := setVariable(context.Background(), &variable, "foo") require.NoError(t, err) assert.Equal(t, *variable.Value, "assigned-value") } @@ -79,7 +79,7 @@ func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) { } // fails because we could not resolve a value for the variable - err := setVariable(&variable, "foo") + err := setVariable(context.Background(), &variable, "foo") assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable") } diff --git a/bundle/config/mutator/trampoline.go b/bundle/config/mutator/trampoline.go index 7c06c7fa..52d62c1b 100644 --- a/bundle/config/mutator/trampoline.go +++ b/bundle/config/mutator/trampoline.go @@ -43,7 +43,7 @@ func (m *trampoline) Name() string { func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) error { tasks := m.functions.GetTasks(b) for _, task := range tasks { - err := m.generateNotebookWrapper(b, task) + err := m.generateNotebookWrapper(ctx, b, task) if err != nil { return err } @@ -51,8 +51,8 @@ func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) error { return nil } -func (m *trampoline) generateNotebookWrapper(b *bundle.Bundle, task TaskWithJobKey) error { - internalDir, err := b.InternalDir() +func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bundle, task TaskWithJobKey) error { + internalDir, err := b.InternalDir(ctx) if err != nil { return err } diff --git a/bundle/config/mutator/trampoline_test.go b/bundle/config/mutator/trampoline_test.go index aec58618..a3e06b30 100644 --- a/bundle/config/mutator/trampoline_test.go +++ b/bundle/config/mutator/trampoline_test.go @@ -83,7 +83,7 @@ func TestGenerateTrampoline(t *testing.T) { err := bundle.Apply(ctx, b, trampoline) require.NoError(t, err) - dir, err := b.InternalDir() + dir, err := b.InternalDir(ctx) require.NoError(t, err) filename := filepath.Join(dir, "notebook_test_to_trampoline.py") diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index 2dccd20a..ff3d78d0 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -9,12 +9,12 @@ import ( ) func getSync(ctx context.Context, b *bundle.Bundle) (*sync.Sync, error) { - cacheDir, err := b.CacheDir() + cacheDir, err := b.CacheDir(ctx) if err != nil { return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) } - includes, err := b.GetSyncIncludePatterns() + includes, err := b.GetSyncIncludePatterns(ctx) if err != nil { return nil, fmt.Errorf("cannot get list of sync includes: %w", err) } diff --git a/bundle/deploy/terraform/dir.go b/bundle/deploy/terraform/dir.go index 9f83b8da..b7b086ce 100644 --- a/bundle/deploy/terraform/dir.go +++ b/bundle/deploy/terraform/dir.go @@ -1,11 +1,13 @@ package terraform import ( + "context" + "github.com/databricks/cli/bundle" ) // Dir returns the Terraform working directory for a given bundle. // The working directory is emphemeral and nested under the bundle's cache directory. -func Dir(b *bundle.Bundle) (string, error) { - return b.CacheDir("terraform") +func Dir(ctx context.Context, b *bundle.Bundle) (string, error) { + return b.CacheDir(ctx, "terraform") } diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 60f0a6c4..aa1dff74 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/log" "github.com/hashicorp/go-version" "github.com/hashicorp/hc-install/product" @@ -38,7 +39,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con return tf.ExecPath, nil } - binDir, err := b.CacheDir("bin") + binDir, err := b.CacheDir(context.Background(), "bin") if err != nil { return "", err } @@ -73,25 +74,25 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con } // This function inherits some environment variables for Terraform CLI. -func inheritEnvVars(env map[string]string) error { +func inheritEnvVars(ctx context.Context, environ map[string]string) error { // Include $HOME in set of environment variables to pass along. - home, ok := os.LookupEnv("HOME") + home, ok := env.Lookup(ctx, "HOME") if ok { - env["HOME"] = home + environ["HOME"] = home } // Include $PATH in set of environment variables to pass along. // This is necessary to ensure that our Terraform provider can use the // same auxiliary programs (e.g. `az`, or `gcloud`) as the CLI. - path, ok := os.LookupEnv("PATH") + path, ok := env.Lookup(ctx, "PATH") if ok { - env["PATH"] = path + environ["PATH"] = path } // Include $TF_CLI_CONFIG_FILE to override terraform provider in development. - configFile, ok := os.LookupEnv("TF_CLI_CONFIG_FILE") + configFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE") if ok { - env["TF_CLI_CONFIG_FILE"] = configFile + environ["TF_CLI_CONFIG_FILE"] = configFile } return nil @@ -105,40 +106,40 @@ func inheritEnvVars(env map[string]string) error { // the CLI and its dependencies do not have access to. // // see: os.TempDir for more context -func setTempDirEnvVars(env map[string]string, b *bundle.Bundle) error { +func setTempDirEnvVars(ctx context.Context, environ map[string]string, b *bundle.Bundle) error { switch runtime.GOOS { case "windows": - if v, ok := os.LookupEnv("TMP"); ok { - env["TMP"] = v - } else if v, ok := os.LookupEnv("TEMP"); ok { - env["TEMP"] = v - } else if v, ok := os.LookupEnv("USERPROFILE"); ok { - env["USERPROFILE"] = v + if v, ok := env.Lookup(ctx, "TMP"); ok { + environ["TMP"] = v + } else if v, ok := env.Lookup(ctx, "TEMP"); ok { + environ["TEMP"] = v + } else if v, ok := env.Lookup(ctx, "USERPROFILE"); ok { + environ["USERPROFILE"] = v } else { - tmpDir, err := b.CacheDir("tmp") + tmpDir, err := b.CacheDir(ctx, "tmp") if err != nil { return err } - env["TMP"] = tmpDir + environ["TMP"] = tmpDir } default: // If TMPDIR is not set, we let the process fall back to its default value. - if v, ok := os.LookupEnv("TMPDIR"); ok { - env["TMPDIR"] = v + if v, ok := env.Lookup(ctx, "TMPDIR"); ok { + environ["TMPDIR"] = v } } return nil } // This function passes through all proxy related environment variables. -func setProxyEnvVars(env map[string]string, b *bundle.Bundle) error { +func setProxyEnvVars(ctx context.Context, environ map[string]string, b *bundle.Bundle) error { for _, v := range []string{"http_proxy", "https_proxy", "no_proxy"} { // The case (upper or lower) is notoriously inconsistent for tools on Unix systems. // We therefore try to read both the upper and lower case versions of the variable. for _, v := range []string{strings.ToUpper(v), strings.ToLower(v)} { - if val, ok := os.LookupEnv(v); ok { + if val, ok := env.Lookup(ctx, v); ok { // Only set uppercase version of the variable. - env[strings.ToUpper(v)] = val + environ[strings.ToUpper(v)] = val } } } @@ -157,7 +158,7 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - workingDir, err := Dir(b) + workingDir, err := Dir(ctx, b) if err != nil { return err } @@ -167,31 +168,31 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - env, err := b.AuthEnv() + environ, err := b.AuthEnv() if err != nil { return err } - err = inheritEnvVars(env) + err = inheritEnvVars(ctx, environ) if err != nil { return err } // Set the temporary directory environment variables - err = setTempDirEnvVars(env, b) + err = setTempDirEnvVars(ctx, environ, b) if err != nil { return err } // Set the proxy related environment variables - err = setProxyEnvVars(env, b) + err = setProxyEnvVars(ctx, environ, b) if err != nil { return err } // Configure environment variables for auth for Terraform to use. - log.Debugf(ctx, "Environment variables for Terraform: %s", strings.Join(maps.Keys(env), ", ")) - err = tf.SetEnv(env) + log.Debugf(ctx, "Environment variables for Terraform: %s", strings.Join(maps.Keys(environ), ", ")) + err = tf.SetEnv(environ) if err != nil { return err } diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index b9459387..001e7a22 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -68,7 +68,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) { // compute env env := make(map[string]string, 0) - err := setTempDirEnvVars(env, b) + err := setTempDirEnvVars(context.Background(), env, b) require.NoError(t, err) // Assert that we pass through TMPDIR. @@ -96,7 +96,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirNotSet(t *testing.T) { // compute env env := make(map[string]string, 0) - err := setTempDirEnvVars(env, b) + err := setTempDirEnvVars(context.Background(), env, b) require.NoError(t, err) // Assert that we don't pass through TMPDIR. @@ -124,7 +124,7 @@ func TestSetTempDirEnvVarsForWindowWithAllTmpDirEnvVarsSet(t *testing.T) { // compute env env := make(map[string]string, 0) - err := setTempDirEnvVars(env, b) + err := setTempDirEnvVars(context.Background(), env, b) require.NoError(t, err) // assert that we pass through the highest priority env var value @@ -154,7 +154,7 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) { // compute env env := make(map[string]string, 0) - err := setTempDirEnvVars(env, b) + err := setTempDirEnvVars(context.Background(), env, b) require.NoError(t, err) // assert that we pass through the highest priority env var value @@ -184,7 +184,7 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileSet(t *testing.T) { // compute env env := make(map[string]string, 0) - err := setTempDirEnvVars(env, b) + err := setTempDirEnvVars(context.Background(), env, b) require.NoError(t, err) // assert that we pass through the user profile @@ -214,11 +214,11 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) { // compute env env := make(map[string]string, 0) - err := setTempDirEnvVars(env, b) + err := setTempDirEnvVars(context.Background(), env, b) require.NoError(t, err) // assert TMP is set to b.CacheDir("tmp") - tmpDir, err := b.CacheDir("tmp") + tmpDir, err := b.CacheDir(context.Background(), "tmp") require.NoError(t, err) assert.Equal(t, map[string]string{ "TMP": tmpDir, @@ -248,7 +248,7 @@ func TestSetProxyEnvVars(t *testing.T) { // No proxy env vars set. clearEnv() env := make(map[string]string, 0) - err := setProxyEnvVars(env, b) + err := setProxyEnvVars(context.Background(), env, b) require.NoError(t, err) assert.Len(t, env, 0) @@ -258,7 +258,7 @@ func TestSetProxyEnvVars(t *testing.T) { t.Setenv("https_proxy", "foo") t.Setenv("no_proxy", "foo") env = make(map[string]string, 0) - err = setProxyEnvVars(env, b) + err = setProxyEnvVars(context.Background(), env, b) require.NoError(t, err) assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env)) @@ -268,7 +268,7 @@ func TestSetProxyEnvVars(t *testing.T) { t.Setenv("HTTPS_PROXY", "foo") t.Setenv("NO_PROXY", "foo") env = make(map[string]string, 0) - err = setProxyEnvVars(env, b) + err = setProxyEnvVars(context.Background(), env, b) require.NoError(t, err) assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env)) } @@ -280,7 +280,7 @@ func TestInheritEnvVars(t *testing.T) { t.Setenv("PATH", "/foo:/bar") t.Setenv("TF_CLI_CONFIG_FILE", "/tmp/config.tfrc") - err := inheritEnvVars(env) + err := inheritEnvVars(context.Background(), env) require.NoError(t, err) diff --git a/bundle/deploy/terraform/plan.go b/bundle/deploy/terraform/plan.go index a725b4aa..ff841148 100644 --- a/bundle/deploy/terraform/plan.go +++ b/bundle/deploy/terraform/plan.go @@ -40,7 +40,7 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) error { } // Persist computed plan - tfDir, err := Dir(b) + tfDir, err := Dir(ctx, b) if err != nil { return err } diff --git a/bundle/deploy/terraform/state_pull.go b/bundle/deploy/terraform/state_pull.go index e5a42d89..6dd12ccf 100644 --- a/bundle/deploy/terraform/state_pull.go +++ b/bundle/deploy/terraform/state_pull.go @@ -25,7 +25,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - dir, err := Dir(b) + dir, err := Dir(ctx, b) if err != nil { return err } diff --git a/bundle/deploy/terraform/state_push.go b/bundle/deploy/terraform/state_push.go index 0cd69e52..ae1d8b8b 100644 --- a/bundle/deploy/terraform/state_push.go +++ b/bundle/deploy/terraform/state_push.go @@ -22,7 +22,7 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - dir, err := Dir(b) + dir, err := Dir(ctx, b) if err != nil { return err } diff --git a/bundle/deploy/terraform/write.go b/bundle/deploy/terraform/write.go index 0bf9ab24..eca79ad2 100644 --- a/bundle/deploy/terraform/write.go +++ b/bundle/deploy/terraform/write.go @@ -16,7 +16,7 @@ func (w *write) Name() string { } func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { - dir, err := Dir(b) + dir, err := Dir(ctx, b) if err != nil { return err } diff --git a/bundle/env/env.go b/bundle/env/env.go new file mode 100644 index 00000000..ed2a13c7 --- /dev/null +++ b/bundle/env/env.go @@ -0,0 +1,18 @@ +package env + +import ( + "context" + + envlib "github.com/databricks/cli/libs/env" +) + +// Return the value of the first environment variable that is set. +func get(ctx context.Context, variables []string) (string, bool) { + for _, v := range variables { + value, ok := envlib.Lookup(ctx, v) + if ok { + return value, true + } + } + return "", false +} diff --git a/bundle/env/env_test.go b/bundle/env/env_test.go new file mode 100644 index 00000000..d900242e --- /dev/null +++ b/bundle/env/env_test.go @@ -0,0 +1,44 @@ +package env + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetWithRealEnvSingleVariable(t *testing.T) { + testutil.CleanupEnvironment(t) + t.Setenv("v1", "foo") + + v, ok := get(context.Background(), []string{"v1"}) + require.True(t, ok) + assert.Equal(t, "foo", v) + + // Not set. + v, ok = get(context.Background(), []string{"v2"}) + require.False(t, ok) + assert.Equal(t, "", v) +} + +func TestGetWithRealEnvMultipleVariables(t *testing.T) { + testutil.CleanupEnvironment(t) + t.Setenv("v1", "foo") + + for _, vars := range [][]string{ + {"v1", "v2", "v3"}, + {"v2", "v3", "v1"}, + {"v3", "v1", "v2"}, + } { + v, ok := get(context.Background(), vars) + require.True(t, ok) + assert.Equal(t, "foo", v) + } + + // Not set. + v, ok := get(context.Background(), []string{"v2", "v3", "v4"}) + require.False(t, ok) + assert.Equal(t, "", v) +} diff --git a/bundle/env/includes.go b/bundle/env/includes.go new file mode 100644 index 00000000..4ade0187 --- /dev/null +++ b/bundle/env/includes.go @@ -0,0 +1,14 @@ +package env + +import "context" + +// IncludesVariable names the environment variable that holds additional configuration paths to include +// during bundle configuration loading. Also see `bundle/config/mutator/process_root_includes.go`. +const IncludesVariable = "DATABRICKS_BUNDLE_INCLUDES" + +// Includes returns the bundle Includes environment variable. +func Includes(ctx context.Context) (string, bool) { + return get(ctx, []string{ + IncludesVariable, + }) +} diff --git a/bundle/env/includes_test.go b/bundle/env/includes_test.go new file mode 100644 index 00000000..d9366a59 --- /dev/null +++ b/bundle/env/includes_test.go @@ -0,0 +1,28 @@ +package env + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" +) + +func TestIncludes(t *testing.T) { + ctx := context.Background() + + testutil.CleanupEnvironment(t) + + t.Run("set", func(t *testing.T) { + t.Setenv("DATABRICKS_BUNDLE_INCLUDES", "foo") + includes, ok := Includes(ctx) + assert.True(t, ok) + assert.Equal(t, "foo", includes) + }) + + t.Run("not set", func(t *testing.T) { + includes, ok := Includes(ctx) + assert.False(t, ok) + assert.Equal(t, "", includes) + }) +} diff --git a/bundle/env/root.go b/bundle/env/root.go new file mode 100644 index 00000000..e3c2a38a --- /dev/null +++ b/bundle/env/root.go @@ -0,0 +1,16 @@ +package env + +import "context" + +// RootVariable names the environment variable that holds the bundle root path. +const RootVariable = "DATABRICKS_BUNDLE_ROOT" + +// Root returns the bundle root environment variable. +func Root(ctx context.Context) (string, bool) { + return get(ctx, []string{ + RootVariable, + + // Primary variable name for the bundle root until v0.204.0. + "BUNDLE_ROOT", + }) +} diff --git a/bundle/env/root_test.go b/bundle/env/root_test.go new file mode 100644 index 00000000..fc2d6e20 --- /dev/null +++ b/bundle/env/root_test.go @@ -0,0 +1,43 @@ +package env + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" +) + +func TestRoot(t *testing.T) { + ctx := context.Background() + + testutil.CleanupEnvironment(t) + + t.Run("first", func(t *testing.T) { + t.Setenv("DATABRICKS_BUNDLE_ROOT", "foo") + root, ok := Root(ctx) + assert.True(t, ok) + assert.Equal(t, "foo", root) + }) + + t.Run("second", func(t *testing.T) { + t.Setenv("BUNDLE_ROOT", "foo") + root, ok := Root(ctx) + assert.True(t, ok) + assert.Equal(t, "foo", root) + }) + + t.Run("both set", func(t *testing.T) { + t.Setenv("DATABRICKS_BUNDLE_ROOT", "first") + t.Setenv("BUNDLE_ROOT", "second") + root, ok := Root(ctx) + assert.True(t, ok) + assert.Equal(t, "first", root) + }) + + t.Run("not set", func(t *testing.T) { + root, ok := Root(ctx) + assert.False(t, ok) + assert.Equal(t, "", root) + }) +} diff --git a/bundle/env/target.go b/bundle/env/target.go new file mode 100644 index 00000000..ac3b4887 --- /dev/null +++ b/bundle/env/target.go @@ -0,0 +1,17 @@ +package env + +import "context" + +// TargetVariable names the environment variable that holds the bundle target to use. +const TargetVariable = "DATABRICKS_BUNDLE_TARGET" + +// Target returns the bundle target environment variable. +func Target(ctx context.Context) (string, bool) { + return get(ctx, []string{ + TargetVariable, + + // Primary variable name for the bundle target until v0.203.2. + // See https://github.com/databricks/cli/pull/670. + "DATABRICKS_BUNDLE_ENV", + }) +} diff --git a/bundle/env/target_test.go b/bundle/env/target_test.go new file mode 100644 index 00000000..0c15bf91 --- /dev/null +++ b/bundle/env/target_test.go @@ -0,0 +1,43 @@ +package env + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" +) + +func TestTarget(t *testing.T) { + ctx := context.Background() + + testutil.CleanupEnvironment(t) + + t.Run("first", func(t *testing.T) { + t.Setenv("DATABRICKS_BUNDLE_TARGET", "foo") + target, ok := Target(ctx) + assert.True(t, ok) + assert.Equal(t, "foo", target) + }) + + t.Run("second", func(t *testing.T) { + t.Setenv("DATABRICKS_BUNDLE_ENV", "foo") + target, ok := Target(ctx) + assert.True(t, ok) + assert.Equal(t, "foo", target) + }) + + t.Run("both set", func(t *testing.T) { + t.Setenv("DATABRICKS_BUNDLE_TARGET", "first") + t.Setenv("DATABRICKS_BUNDLE_ENV", "second") + target, ok := Target(ctx) + assert.True(t, ok) + assert.Equal(t, "first", target) + }) + + t.Run("not set", func(t *testing.T) { + target, ok := Target(ctx) + assert.False(t, ok) + assert.Equal(t, "", target) + }) +} diff --git a/bundle/env/temp_dir.go b/bundle/env/temp_dir.go new file mode 100644 index 00000000..b9133907 --- /dev/null +++ b/bundle/env/temp_dir.go @@ -0,0 +1,13 @@ +package env + +import "context" + +// TempDirVariable names the environment variable that holds the temporary directory to use. +const TempDirVariable = "DATABRICKS_BUNDLE_TMP" + +// TempDir returns the temporary directory to use. +func TempDir(ctx context.Context) (string, bool) { + return get(ctx, []string{ + TempDirVariable, + }) +} diff --git a/bundle/env/temp_dir_test.go b/bundle/env/temp_dir_test.go new file mode 100644 index 00000000..7659bac6 --- /dev/null +++ b/bundle/env/temp_dir_test.go @@ -0,0 +1,28 @@ +package env + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" +) + +func TestTempDir(t *testing.T) { + ctx := context.Background() + + testutil.CleanupEnvironment(t) + + t.Run("set", func(t *testing.T) { + t.Setenv("DATABRICKS_BUNDLE_TMP", "foo") + tempDir, ok := TempDir(ctx) + assert.True(t, ok) + assert.Equal(t, "foo", tempDir) + }) + + t.Run("not set", func(t *testing.T) { + tempDir, ok := TempDir(ctx) + assert.False(t, ok) + assert.Equal(t, "", tempDir) + }) +} diff --git a/bundle/root.go b/bundle/root.go index 46f63e13..7518bf5f 100644 --- a/bundle/root.go +++ b/bundle/root.go @@ -1,21 +1,21 @@ package bundle import ( + "context" "fmt" "os" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/folders" ) -const envBundleRoot = "BUNDLE_ROOT" - -// getRootEnv returns the value of the `BUNDLE_ROOT` environment variable +// getRootEnv returns the value of the bundle root environment variable // if it set and is a directory. If the environment variable is set but // is not a directory, it returns an error. If the environment variable is // not set, it returns an empty string. -func getRootEnv() (string, error) { - path, ok := os.LookupEnv(envBundleRoot) +func getRootEnv(ctx context.Context) (string, error) { + path, ok := env.Root(ctx) if !ok { return "", nil } @@ -24,7 +24,7 @@ func getRootEnv() (string, error) { err = fmt.Errorf("not a directory") } if err != nil { - return "", fmt.Errorf(`invalid bundle root %s="%s": %w`, envBundleRoot, path, err) + return "", fmt.Errorf(`invalid bundle root %s="%s": %w`, env.RootVariable, path, err) } return path, nil } @@ -48,8 +48,8 @@ func getRootWithTraversal() (string, error) { } // mustGetRoot returns a bundle root or an error if one cannot be found. -func mustGetRoot() (string, error) { - path, err := getRootEnv() +func mustGetRoot(ctx context.Context) (string, error) { + path, err := getRootEnv(ctx) if path != "" || err != nil { return path, err } @@ -57,9 +57,9 @@ func mustGetRoot() (string, error) { } // tryGetRoot returns a bundle root or an empty string if one cannot be found. -func tryGetRoot() (string, error) { +func tryGetRoot(ctx context.Context) (string, error) { // Note: an invalid value in the environment variable is still an error. - path, err := getRootEnv() + path, err := getRootEnv(ctx) if path != "" || err != nil { return path, err } diff --git a/bundle/root_test.go b/bundle/root_test.go index 0c4c46aa..88113546 100644 --- a/bundle/root_test.go +++ b/bundle/root_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/env" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -32,49 +33,55 @@ func chdir(t *testing.T, dir string) string { } func TestRootFromEnv(t *testing.T) { + ctx := context.Background() dir := t.TempDir() - t.Setenv(envBundleRoot, dir) + t.Setenv(env.RootVariable, dir) // It should pull the root from the environment variable. - root, err := mustGetRoot() + root, err := mustGetRoot(ctx) require.NoError(t, err) require.Equal(t, root, dir) } func TestRootFromEnvDoesntExist(t *testing.T) { + ctx := context.Background() dir := t.TempDir() - t.Setenv(envBundleRoot, filepath.Join(dir, "doesntexist")) + t.Setenv(env.RootVariable, filepath.Join(dir, "doesntexist")) // It should pull the root from the environment variable. - _, err := mustGetRoot() + _, err := mustGetRoot(ctx) require.Errorf(t, err, "invalid bundle root") } func TestRootFromEnvIsFile(t *testing.T) { + ctx := context.Background() dir := t.TempDir() f, err := os.Create(filepath.Join(dir, "invalid")) require.NoError(t, err) f.Close() - t.Setenv(envBundleRoot, f.Name()) + t.Setenv(env.RootVariable, f.Name()) // It should pull the root from the environment variable. - _, err = mustGetRoot() + _, err = mustGetRoot(ctx) require.Errorf(t, err, "invalid bundle root") } func TestRootIfEnvIsEmpty(t *testing.T) { + ctx := context.Background() dir := "" - t.Setenv(envBundleRoot, dir) + t.Setenv(env.RootVariable, dir) // It should pull the root from the environment variable. - _, err := mustGetRoot() + _, err := mustGetRoot(ctx) require.Errorf(t, err, "invalid bundle root") } func TestRootLookup(t *testing.T) { + ctx := context.Background() + // Have to set then unset to allow the testing package to revert it to its original value. - t.Setenv(envBundleRoot, "") - os.Unsetenv(envBundleRoot) + t.Setenv(env.RootVariable, "") + os.Unsetenv(env.RootVariable) chdir(t, t.TempDir()) @@ -89,27 +96,30 @@ func TestRootLookup(t *testing.T) { // It should find the project root from $PWD. wd := chdir(t, "./a/b/c") - root, err := mustGetRoot() + root, err := mustGetRoot(ctx) require.NoError(t, err) require.Equal(t, wd, root) } func TestRootLookupError(t *testing.T) { + ctx := context.Background() + // Have to set then unset to allow the testing package to revert it to its original value. - t.Setenv(envBundleRoot, "") - os.Unsetenv(envBundleRoot) + t.Setenv(env.RootVariable, "") + os.Unsetenv(env.RootVariable) // It can't find a project root from a temporary directory. _ = chdir(t, t.TempDir()) - _, err := mustGetRoot() + _, err := mustGetRoot(ctx) require.ErrorContains(t, err, "unable to locate bundle root") } func TestLoadYamlWhenIncludesEnvPresent(t *testing.T) { + ctx := context.Background() chdir(t, filepath.Join(".", "tests", "basic")) - t.Setenv(ExtraIncludePathsKey, "test") + t.Setenv(env.IncludesVariable, "test") - bundle, err := MustLoad(context.Background()) + bundle, err := MustLoad(ctx) assert.NoError(t, err) assert.Equal(t, "basic", bundle.Config.Bundle.Name) @@ -119,30 +129,33 @@ func TestLoadYamlWhenIncludesEnvPresent(t *testing.T) { } func TestLoadDefautlBundleWhenNoYamlAndRootAndIncludesEnvPresent(t *testing.T) { + ctx := context.Background() dir := t.TempDir() chdir(t, dir) - t.Setenv(envBundleRoot, dir) - t.Setenv(ExtraIncludePathsKey, "test") + t.Setenv(env.RootVariable, dir) + t.Setenv(env.IncludesVariable, "test") - bundle, err := MustLoad(context.Background()) + bundle, err := MustLoad(ctx) assert.NoError(t, err) assert.Equal(t, dir, bundle.Config.Path) } func TestErrorIfNoYamlNoRootEnvAndIncludesEnvPresent(t *testing.T) { + ctx := context.Background() dir := t.TempDir() chdir(t, dir) - t.Setenv(ExtraIncludePathsKey, "test") + t.Setenv(env.IncludesVariable, "test") - _, err := MustLoad(context.Background()) + _, err := MustLoad(ctx) assert.Error(t, err) } func TestErrorIfNoYamlNoIncludesEnvAndRootEnvPresent(t *testing.T) { + ctx := context.Background() dir := t.TempDir() chdir(t, dir) - t.Setenv(envBundleRoot, dir) + t.Setenv(env.RootVariable, dir) - _, err := MustLoad(context.Background()) + _, err := MustLoad(ctx) assert.Error(t, err) } diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index be45626a..6d6a6f5a 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -18,12 +18,12 @@ type syncFlags struct { } func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) { - cacheDir, err := b.CacheDir() + cacheDir, err := b.CacheDir(cmd.Context()) if err != nil { return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) } - includes, err := b.GetSyncIncludePatterns() + includes, err := b.GetSyncIncludePatterns(cmd.Context()) if err != nil { return nil, fmt.Errorf("cannot get list of sync includes: %w", err) } diff --git a/cmd/cmd.go b/cmd/cmd.go index 032fde5c..6dd0f6e2 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "strings" "github.com/databricks/cli/cmd/account" @@ -21,8 +22,8 @@ const ( permissionsGroup = "permissions" ) -func New() *cobra.Command { - cli := root.New() +func New(ctx context.Context) *cobra.Command { + cli := root.New(ctx) // Add account subcommand. cli.AddCommand(account.New()) diff --git a/cmd/configure/configure_test.go b/cmd/configure/configure_test.go index e1ebe916..cf0505ed 100644 --- a/cmd/configure/configure_test.go +++ b/cmd/configure/configure_test.go @@ -54,7 +54,7 @@ func TestDefaultConfigureNoInteractive(t *testing.T) { }) os.Stdin = inp - cmd := cmd.New() + cmd := cmd.New(ctx) cmd.SetArgs([]string{"configure", "--token", "--host", "https://host"}) err := cmd.ExecuteContext(ctx) @@ -87,7 +87,7 @@ func TestConfigFileFromEnvNoInteractive(t *testing.T) { t.Cleanup(func() { os.Stdin = oldStdin }) os.Stdin = inp - cmd := cmd.New() + cmd := cmd.New(ctx) cmd.SetArgs([]string{"configure", "--token", "--host", "https://host"}) err := cmd.ExecuteContext(ctx) @@ -116,7 +116,7 @@ func TestCustomProfileConfigureNoInteractive(t *testing.T) { t.Cleanup(func() { os.Stdin = oldStdin }) os.Stdin = inp - cmd := cmd.New() + cmd := cmd.New(ctx) cmd.SetArgs([]string{"configure", "--token", "--host", "https://host", "--profile", "CUSTOM"}) err := cmd.ExecuteContext(ctx) diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index 10cce67a..3f9d90db 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -2,17 +2,15 @@ package root import ( "context" - "os" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/env" + envlib "github.com/databricks/cli/libs/env" "github.com/spf13/cobra" "golang.org/x/exp/maps" ) -const envName = "DATABRICKS_BUNDLE_ENV" -const targetName = "DATABRICKS_BUNDLE_TARGET" - // getTarget returns the name of the target to operate in. func getTarget(cmd *cobra.Command) (value string) { // The command line flag takes precedence. @@ -33,13 +31,7 @@ func getTarget(cmd *cobra.Command) (value string) { } // If it's not set, use the environment variable. - target := os.Getenv(targetName) - // If target env is not set with a new variable, try to check for old variable name - // TODO: remove when environments section is not supported anymore - if target == "" { - target = os.Getenv(envName) - } - + target, _ := env.Target(cmd.Context()) return target } @@ -54,7 +46,7 @@ func getProfile(cmd *cobra.Command) (value string) { } // If it's not set, use the environment variable. - return os.Getenv("DATABRICKS_CONFIG_PROFILE") + return envlib.Get(cmd.Context(), "DATABRICKS_CONFIG_PROFILE") } // loadBundle loads the bundle configuration and applies default mutators. diff --git a/cmd/root/io.go b/cmd/root/io.go index 380c01b1..23c7d6c6 100644 --- a/cmd/root/io.go +++ b/cmd/root/io.go @@ -1,9 +1,8 @@ package root import ( - "os" - "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" ) @@ -21,7 +20,7 @@ func initOutputFlag(cmd *cobra.Command) *outputFlag { // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. - if v, ok := os.LookupEnv(envOutputFormat); ok { + if v, ok := env.Lookup(cmd.Context(), envOutputFormat); ok { f.output.Set(v) } diff --git a/cmd/root/logger.go b/cmd/root/logger.go index ddfae445..dca07ca4 100644 --- a/cmd/root/logger.go +++ b/cmd/root/logger.go @@ -5,9 +5,9 @@ import ( "fmt" "io" "log/slog" - "os" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/log" "github.com/fatih/color" @@ -126,13 +126,13 @@ func initLogFlags(cmd *cobra.Command) *logFlags { // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. - if v, ok := os.LookupEnv(envLogFile); ok { + if v, ok := env.Lookup(cmd.Context(), envLogFile); ok { f.file.Set(v) } - if v, ok := os.LookupEnv(envLogLevel); ok { + if v, ok := env.Lookup(cmd.Context(), envLogLevel); ok { f.level.Set(v) } - if v, ok := os.LookupEnv(envLogFormat); ok { + if v, ok := env.Lookup(cmd.Context(), envLogFormat); ok { f.output.Set(v) } diff --git a/cmd/root/progress_logger.go b/cmd/root/progress_logger.go index bdf52558..328b9947 100644 --- a/cmd/root/progress_logger.go +++ b/cmd/root/progress_logger.go @@ -6,6 +6,7 @@ import ( "os" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" "golang.org/x/term" @@ -51,7 +52,7 @@ func initProgressLoggerFlag(cmd *cobra.Command, logFlags *logFlags) *progressLog // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. - if v, ok := os.LookupEnv(envProgressFormat); ok { + if v, ok := env.Lookup(cmd.Context(), envProgressFormat); ok { f.Set(v) } diff --git a/cmd/root/root.go b/cmd/root/root.go index c71cf9ea..38eb42cc 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -14,7 +14,7 @@ import ( "github.com/spf13/cobra" ) -func New() *cobra.Command { +func New(ctx context.Context) *cobra.Command { cmd := &cobra.Command{ Use: "databricks", Short: "Databricks CLI", @@ -30,6 +30,10 @@ func New() *cobra.Command { SilenceErrors: true, } + // Pass the context along through the command during initialization. + // It will be overwritten when the command is executed. + cmd.SetContext(ctx) + // Initialize flags logFlags := initLogFlags(cmd) progressLoggerFlag := initProgressLoggerFlag(cmd, logFlags) diff --git a/cmd/root/user_agent_upstream.go b/cmd/root/user_agent_upstream.go index 3e173bda..f580b426 100644 --- a/cmd/root/user_agent_upstream.go +++ b/cmd/root/user_agent_upstream.go @@ -2,8 +2,8 @@ package root import ( "context" - "os" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/useragent" ) @@ -16,7 +16,7 @@ const upstreamKey = "upstream" const upstreamVersionKey = "upstream-version" func withUpstreamInUserAgent(ctx context.Context) context.Context { - value := os.Getenv(upstreamEnvVar) + value := env.Get(ctx, upstreamEnvVar) if value == "" { return ctx } @@ -24,7 +24,7 @@ func withUpstreamInUserAgent(ctx context.Context) context.Context { ctx = useragent.InContext(ctx, upstreamKey, value) // Include upstream version as well, if set. - value = os.Getenv(upstreamVersionEnvVar) + value = env.Get(ctx, upstreamVersionEnvVar) if value == "" { return ctx } diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 4a62123b..5fdfb169 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -30,12 +30,12 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b * return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle") } - cacheDir, err := b.CacheDir() + cacheDir, err := b.CacheDir(cmd.Context()) if err != nil { return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) } - includes, err := b.GetSyncIncludePatterns() + includes, err := b.GetSyncIncludePatterns(cmd.Context()) if err != nil { return nil, fmt.Errorf("cannot get list of sync includes: %w", err) } diff --git a/internal/helpers.go b/internal/helpers.go index bf27fbb5..68c00019 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -118,7 +118,7 @@ func (t *cobraTestRunner) RunBackground() { var stdoutW, stderrW io.WriteCloser stdoutR, stdoutW = io.Pipe() stderrR, stderrW = io.Pipe() - root := cmd.New() + root := cmd.New(context.Background()) root.SetOut(stdoutW) root.SetErr(stderrW) root.SetArgs(t.args) diff --git a/internal/testutil/env.go b/internal/testutil/env.go new file mode 100644 index 00000000..05ffaf00 --- /dev/null +++ b/internal/testutil/env.go @@ -0,0 +1,33 @@ +package testutil + +import ( + "os" + "strings" + "testing" +) + +// CleanupEnvironment sets up a pristine environment containing only $PATH and $HOME. +// The original environment is restored upon test completion. +// Note: use of this function is incompatible with parallel execution. +func CleanupEnvironment(t *testing.T) { + // Restore environment when test finishes. + environ := os.Environ() + t.Cleanup(func() { + // Restore original environment. + for _, kv := range environ { + kvs := strings.SplitN(kv, "=", 2) + os.Setenv(kvs[0], kvs[1]) + } + }) + + path := os.Getenv("PATH") + pwd := os.Getenv("PWD") + os.Clearenv() + + // We use t.Setenv instead of os.Setenv because the former actively + // prevents a test being run with t.Parallel. Modifying the environment + // within a test is not compatible with running tests in parallel + // because of isolation; the environment is scoped to the process. + t.Setenv("PATH", path) + t.Setenv("HOME", pwd) +} diff --git a/libs/env/context.go b/libs/env/context.go new file mode 100644 index 00000000..cf04c1ec --- /dev/null +++ b/libs/env/context.go @@ -0,0 +1,63 @@ +package env + +import ( + "context" + "os" +) + +var envContextKey int + +func copyMap(m map[string]string) map[string]string { + out := make(map[string]string, len(m)) + for k, v := range m { + out[k] = v + } + return out +} + +func getMap(ctx context.Context) map[string]string { + if ctx == nil { + return nil + } + m, ok := ctx.Value(&envContextKey).(map[string]string) + if !ok { + return nil + } + return m +} + +func setMap(ctx context.Context, m map[string]string) context.Context { + return context.WithValue(ctx, &envContextKey, m) +} + +// Lookup key in the context or the the environment. +// Context has precedence. +func Lookup(ctx context.Context, key string) (string, bool) { + m := getMap(ctx) + + // Return if the key is set in the context. + v, ok := m[key] + if ok { + return v, true + } + + // Fall back to the environment. + return os.LookupEnv(key) +} + +// Get key from the context or the environment. +// Context has precedence. +func Get(ctx context.Context, key string) string { + v, _ := Lookup(ctx, key) + return v +} + +// Set key on the context. +// +// Note: this does NOT mutate the processes' actual environment variables. +// It is only visible to other code that uses this package. +func Set(ctx context.Context, key, value string) context.Context { + m := copyMap(getMap(ctx)) + m[key] = value + return setMap(ctx, m) +} diff --git a/libs/env/context_test.go b/libs/env/context_test.go new file mode 100644 index 00000000..9ff19459 --- /dev/null +++ b/libs/env/context_test.go @@ -0,0 +1,41 @@ +package env + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" +) + +func TestContext(t *testing.T) { + testutil.CleanupEnvironment(t) + t.Setenv("FOO", "bar") + + ctx0 := context.Background() + + // Get + assert.Equal(t, "bar", Get(ctx0, "FOO")) + assert.Equal(t, "", Get(ctx0, "dontexist")) + + // Lookup + v, ok := Lookup(ctx0, "FOO") + assert.True(t, ok) + assert.Equal(t, "bar", v) + v, ok = Lookup(ctx0, "dontexist") + assert.False(t, ok) + assert.Equal(t, "", v) + + // Set and get new context. + // Verify that the previous context remains unchanged. + ctx1 := Set(ctx0, "FOO", "baz") + assert.Equal(t, "baz", Get(ctx1, "FOO")) + assert.Equal(t, "bar", Get(ctx0, "FOO")) + + // Set and get new context. + // Verify that the previous contexts remains unchanged. + ctx2 := Set(ctx1, "FOO", "qux") + assert.Equal(t, "qux", Get(ctx2, "FOO")) + assert.Equal(t, "baz", Get(ctx1, "FOO")) + assert.Equal(t, "bar", Get(ctx0, "FOO")) +} diff --git a/libs/env/pkg.go b/libs/env/pkg.go new file mode 100644 index 00000000..e0be7e22 --- /dev/null +++ b/libs/env/pkg.go @@ -0,0 +1,7 @@ +package env + +// The env package provides functions for working with environment variables +// and allowing for overrides via the context.Context. This is useful for +// testing where tainting a processes' environment is at odds with parallelism. +// Use of a context.Context to store variable overrides means tests can be +// parallelized without worrying about environment variable interference. diff --git a/main.go b/main.go index a4b8aabd..8c8516d9 100644 --- a/main.go +++ b/main.go @@ -1,10 +1,12 @@ package main import ( + "context" + "github.com/databricks/cli/cmd" "github.com/databricks/cli/cmd/root" ) func main() { - root.Execute(cmd.New()) + root.Execute(cmd.New(context.Background())) } diff --git a/main_test.go b/main_test.go index 6a5d1944..34ecdca0 100644 --- a/main_test.go +++ b/main_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "testing" "github.com/databricks/cli/cmd" @@ -15,7 +16,7 @@ func TestCommandsDontUseUnderscoreInName(t *testing.T) { // This test lives in the main package because this is where // all commands are imported. // - queue := []*cobra.Command{cmd.New()} + queue := []*cobra.Command{cmd.New(context.Background())} for len(queue) > 0 { cmd := queue[0] assert.NotContains(t, cmd.Name(), "_") From c836194d89b376ba335501dbbb5995518b3dbd21 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 11 Sep 2023 10:18:52 +0200 Subject: [PATCH 087/310] Update Go SDK to v0.19.1 (#759) ## Changes This includes token reuse for Azure CLI based auth. See: https://github.com/databricks/databricks-sdk-go/releases/tag/v0.19.1 ## Tests Confirmed manually that Azure CLI tokens are acquired only once. --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7e24b0db..14c85e67 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.19.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.19.1 // Apache 2.0 github.com/fatih/color v1.15.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.3.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 83bb01b6..20c985b0 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/databricks/databricks-sdk-go v0.19.0 h1:Xh5A90/+8ehW7fTqoQbQK5xZu7a/akv3Xwv8UdWB4GU= -github.com/databricks/databricks-sdk-go v0.19.0/go.mod h1:Bt/3i3ry/rQdE6Y+psvkAENlp+LzJHaQK5PsLIstQb4= +github.com/databricks/databricks-sdk-go v0.19.1 h1:hP7xZb+Hd8n0grnEcf2FOMn6lWox7vp5KAan3D2hnzM= +github.com/databricks/databricks-sdk-go v0.19.1/go.mod h1:Bt/3i3ry/rQdE6Y+psvkAENlp+LzJHaQK5PsLIstQb4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From a4e94e1b3662249599f43009be4b2683e31d943b Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 11 Sep 2023 10:59:48 +0200 Subject: [PATCH 088/310] Fix author in setup.py (#761) Fix author in setup.py showing --- .../default-python/template/{{.project_name}}/setup.py.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl index 93f4e9ff..efd59882 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl @@ -15,7 +15,7 @@ setup( name="{{.project_name}}", version={{.project_name}}.__version__, url="https://databricks.com", - author="{{.user_name}}", + author="{{user_name}}", description="my test wheel", packages=find_packages(where='./src'), package_dir={'': 'src'}, From ad84abf41588eeab78484caff876546ecf2d4199 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 11 Sep 2023 12:22:05 +0200 Subject: [PATCH 089/310] Fix temporary directory cleanup for init repository downloading (#760) ## Changes This PR fixes a bug where the temp directory created to download the template would not be cleaned up. ## Tests Tested manually. The exact process is described in a comment below. --- cmd/bundle/init.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 9a11eb25..3038cb7a 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -74,22 +74,21 @@ func newInitCommand() *cobra.Command { return template.Materialize(ctx, configFile, templatePath, outputDir) } - // Download the template in a temporary directory - tmpDir := os.TempDir() - templateURL := templatePath - repoDir := filepath.Join(tmpDir, repoName(templateURL)) - err := os.MkdirAll(repoDir, 0755) + // Create a temporary directory with the name of the repository. The '*' + // character is replaced by a random string in the generated temporary directory. + repoDir, err := os.MkdirTemp("", repoName(templatePath)+"-*") if err != nil { return err } // TODO: Add automated test that the downloaded git repo is cleaned up. - err = git.Clone(ctx, templateURL, "", repoDir) + // Clone the repository in the temporary directory + err = git.Clone(ctx, templatePath, "", repoDir) if err != nil { return err } - defer os.RemoveAll(templateDir) + // Clean up downloaded repository once the template is materialized. + defer os.RemoveAll(repoDir) return template.Materialize(ctx, configFile, filepath.Join(repoDir, templateDir), outputDir) } - return cmd } From 44726d6444dc0a29942445fb99f3aba3a9378693 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 11 Sep 2023 13:57:21 +0200 Subject: [PATCH 090/310] Release v0.204.1 (#763) Bundles: * Fix conversion of job parameters ([#744](https://github.com/databricks/cli/pull/744)). * Add schema and config validation to jsonschema package ([#740](https://github.com/databricks/cli/pull/740)). * Support Model Serving Endpoints in bundles ([#682](https://github.com/databricks/cli/pull/682)). * Do not include empty output in job run output ([#749](https://github.com/databricks/cli/pull/749)). * Fixed marking libraries from DBFS as remote ([#750](https://github.com/databricks/cli/pull/750)). * Process only Python wheel tasks which have local libraries used ([#751](https://github.com/databricks/cli/pull/751)). * Add enum support for bundle templates ([#668](https://github.com/databricks/cli/pull/668)). * Apply Python wheel trampoline if workspace library is used ([#755](https://github.com/databricks/cli/pull/755)). * List available targets when incorrect target passed ([#756](https://github.com/databricks/cli/pull/756)). * Make bundle and sync fields optional ([#757](https://github.com/databricks/cli/pull/757)). * Consolidate environment variable interaction ([#747](https://github.com/databricks/cli/pull/747)). Internal: * Update Go SDK to v0.19.1 ([#759](https://github.com/databricks/cli/pull/759)). --- CHANGELOG.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9835b0bc..ba0dbcdc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Version changelog +## 0.204.1 + +Bundles: + * Fix conversion of job parameters ([#744](https://github.com/databricks/cli/pull/744)). + * Add schema and config validation to jsonschema package ([#740](https://github.com/databricks/cli/pull/740)). + * Support Model Serving Endpoints in bundles ([#682](https://github.com/databricks/cli/pull/682)). + * Do not include empty output in job run output ([#749](https://github.com/databricks/cli/pull/749)). + * Fixed marking libraries from DBFS as remote ([#750](https://github.com/databricks/cli/pull/750)). + * Process only Python wheel tasks which have local libraries used ([#751](https://github.com/databricks/cli/pull/751)). + * Add enum support for bundle templates ([#668](https://github.com/databricks/cli/pull/668)). + * Apply Python wheel trampoline if workspace library is used ([#755](https://github.com/databricks/cli/pull/755)). + * List available targets when incorrect target passed ([#756](https://github.com/databricks/cli/pull/756)). + * Make bundle and sync fields optional ([#757](https://github.com/databricks/cli/pull/757)). + * Consolidate environment variable interaction ([#747](https://github.com/databricks/cli/pull/747)). + +Internal: + * Update Go SDK to v0.19.1 ([#759](https://github.com/databricks/cli/pull/759)). + + + ## 0.204.0 This release includes permission related commands for a subset of workspace From 373f441eb2e5a8a07905882caa465c039ba05511 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 11 Sep 2023 17:23:25 +0200 Subject: [PATCH 091/310] Use clearer error message when no interpolation value is found. (#764) ## Changes This PR makes the error message clearer for when interpolation fails. ## Tests Existing unit test and manually --- bundle/config/interpolation/interpolation.go | 2 +- bundle/config/interpolation/interpolation_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bundle/config/interpolation/interpolation.go b/bundle/config/interpolation/interpolation.go index bf5bd169..8ba0b8b1 100644 --- a/bundle/config/interpolation/interpolation.go +++ b/bundle/config/interpolation/interpolation.go @@ -184,7 +184,7 @@ func (a *accumulator) Resolve(path string, seenPaths []string, fns ...LookupFunc // fetch the string node to resolve field, ok := a.strings[path] if !ok { - return fmt.Errorf("could not resolve reference %s", path) + return fmt.Errorf("no value found for interpolation reference: ${%s}", path) } // return early if the string field has no variables to interpolate diff --git a/bundle/config/interpolation/interpolation_test.go b/bundle/config/interpolation/interpolation_test.go index 83254c9b..cccb6dc7 100644 --- a/bundle/config/interpolation/interpolation_test.go +++ b/bundle/config/interpolation/interpolation_test.go @@ -247,5 +247,5 @@ func TestInterpolationInvalidVariableReference(t *testing.T) { } err := expand(&config) - assert.ErrorContains(t, err, "could not resolve reference vars.foo") + assert.ErrorContains(t, err, "no value found for interpolation reference: ${vars.foo}") } From 0cb05d1dedc4cdb591b884248b8871f73950c9bd Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 11 Sep 2023 17:32:24 +0200 Subject: [PATCH 092/310] Prompt once for a client profile (#727) ## Changes The previous implementation ran the risk of infinite looping for the account client due to a mismatch in determining what constitutes an account client between the CLI and SDK (see [here](https://github.com/databricks/cli/blob/83443bae8d8ad4df3758f4192c6bbe613faae9c4/libs/databrickscfg/profiles.go#L61) and [here](https://github.com/databricks/databricks-sdk-go/blob/0fdc5165e57a4e7af6ec97b47595c6dddf37b10b/config/config.go#L160)). Ultimately, this code must never infinite loop. If a user is prompted and selects a profile that cannot be used, they should receive that feedback immediately and try again, instead of being prompted again. Related to #726. ## Tests --- cmd/root/auth.go | 146 +++++++++++++++++++++++++------------ cmd/root/auth_test.go | 164 ++++++++++++++++++++++++++++++++++++++++++ libs/cmdio/io.go | 7 ++ libs/cmdio/testing.go | 46 ++++++++++++ 4 files changed, 318 insertions(+), 45 deletions(-) create mode 100644 libs/cmdio/testing.go diff --git a/cmd/root/auth.go b/cmd/root/auth.go index d4c9a31b..de5648c6 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -25,13 +25,57 @@ func initProfileFlag(cmd *cobra.Command) { cmd.RegisterFlagCompletionFunc("profile", databrickscfg.ProfileCompletion) } +func profileFlagValue(cmd *cobra.Command) (string, bool) { + profileFlag := cmd.Flag("profile") + if profileFlag == nil { + return "", false + } + value := profileFlag.Value.String() + return value, value != "" +} + +// Helper function to create an account client or prompt once if the given configuration is not valid. +func accountClientOrPrompt(ctx context.Context, cfg *config.Config, allowPrompt bool) (*databricks.AccountClient, error) { + a, err := databricks.NewAccountClient((*databricks.Config)(cfg)) + if err == nil { + err = a.Config.Authenticate(emptyHttpRequest(ctx)) + } + + prompt := false + if allowPrompt && err != nil && cmdio.IsInteractive(ctx) { + // Prompt to select a profile if the current configuration is not an account client. + prompt = prompt || errors.Is(err, databricks.ErrNotAccountClient) + // Prompt to select a profile if the current configuration doesn't resolve to a credential provider. + prompt = prompt || errors.Is(err, config.ErrCannotConfigureAuth) + } + + if !prompt { + // If we are not prompting, we can return early. + return a, err + } + + // Try picking a profile dynamically if the current configuration is not valid. + profile, err := askForAccountProfile(ctx) + if err != nil { + return nil, err + } + a, err = databricks.NewAccountClient(&databricks.Config{Profile: profile}) + if err == nil { + err = a.Config.Authenticate(emptyHttpRequest(ctx)) + if err != nil { + return nil, err + } + } + return a, nil +} + func MustAccountClient(cmd *cobra.Command, args []string) error { cfg := &config.Config{} - // command-line flag can specify the profile in use - profileFlag := cmd.Flag("profile") - if profileFlag != nil { - cfg.Profile = profileFlag.Value.String() + // The command-line profile flag takes precedence over DATABRICKS_CONFIG_PROFILE. + profile, hasProfileFlag := profileFlagValue(cmd) + if hasProfileFlag { + cfg.Profile = profile } if cfg.Profile == "" { @@ -48,16 +92,8 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { } } -TRY_AUTH: // or try picking a config profile dynamically - a, err := databricks.NewAccountClient((*databricks.Config)(cfg)) - if cmdio.IsInteractive(cmd.Context()) && errors.Is(err, databricks.ErrNotAccountClient) { - profile, err := askForAccountProfile() - if err != nil { - return err - } - cfg = &config.Config{Profile: profile} - goto TRY_AUTH - } + allowPrompt := !hasProfileFlag + a, err := accountClientOrPrompt(cmd.Context(), cfg, allowPrompt) if err != nil { return err } @@ -66,13 +102,48 @@ TRY_AUTH: // or try picking a config profile dynamically return nil } +// Helper function to create a workspace client or prompt once if the given configuration is not valid. +func workspaceClientOrPrompt(ctx context.Context, cfg *config.Config, allowPrompt bool) (*databricks.WorkspaceClient, error) { + w, err := databricks.NewWorkspaceClient((*databricks.Config)(cfg)) + if err == nil { + err = w.Config.Authenticate(emptyHttpRequest(ctx)) + } + + prompt := false + if allowPrompt && err != nil && cmdio.IsInteractive(ctx) { + // Prompt to select a profile if the current configuration is not a workspace client. + prompt = prompt || errors.Is(err, databricks.ErrNotWorkspaceClient) + // Prompt to select a profile if the current configuration doesn't resolve to a credential provider. + prompt = prompt || errors.Is(err, config.ErrCannotConfigureAuth) + } + + if !prompt { + // If we are not prompting, we can return early. + return w, err + } + + // Try picking a profile dynamically if the current configuration is not valid. + profile, err := askForWorkspaceProfile(ctx) + if err != nil { + return nil, err + } + w, err = databricks.NewWorkspaceClient(&databricks.Config{Profile: profile}) + if err == nil { + err = w.Config.Authenticate(emptyHttpRequest(ctx)) + if err != nil { + return nil, err + } + } + return w, nil +} + func MustWorkspaceClient(cmd *cobra.Command, args []string) error { cfg := &config.Config{} - // command-line flag takes precedence over environment variable - profileFlag := cmd.Flag("profile") - if profileFlag != nil { - cfg.Profile = profileFlag.Value.String() + // The command-line profile flag takes precedence over DATABRICKS_CONFIG_PROFILE. + profile, hasProfileFlag := profileFlagValue(cmd) + if hasProfileFlag { + cfg.Profile = profile } // try configuring a bundle @@ -87,24 +158,13 @@ func MustWorkspaceClient(cmd *cobra.Command, args []string) error { cfg = currentBundle.WorkspaceClient().Config } -TRY_AUTH: // or try picking a config profile dynamically + allowPrompt := !hasProfileFlag + w, err := workspaceClientOrPrompt(cmd.Context(), cfg, allowPrompt) + if err != nil { + return err + } + ctx := cmd.Context() - w, err := databricks.NewWorkspaceClient((*databricks.Config)(cfg)) - if err != nil { - return err - } - err = w.Config.Authenticate(emptyHttpRequest(ctx)) - if cmdio.IsInteractive(ctx) && errors.Is(err, config.ErrCannotConfigureAuth) { - profile, err := askForWorkspaceProfile() - if err != nil { - return err - } - cfg = &config.Config{Profile: profile} - goto TRY_AUTH - } - if err != nil { - return err - } ctx = context.WithValue(ctx, &workspaceClient, w) cmd.SetContext(ctx) return nil @@ -121,7 +181,7 @@ func transformLoadError(path string, err error) error { return err } -func askForWorkspaceProfile() (string, error) { +func askForWorkspaceProfile(ctx context.Context) (string, error) { path, err := databrickscfg.GetPath() if err != nil { return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) @@ -136,7 +196,7 @@ func askForWorkspaceProfile() (string, error) { case 1: return profiles[0].Name, nil } - i, _, err := (&promptui.Select{ + i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ Label: fmt.Sprintf("Workspace profiles defined in %s", file), Items: profiles, Searcher: profiles.SearchCaseInsensitive, @@ -147,16 +207,14 @@ func askForWorkspaceProfile() (string, error) { Inactive: `{{.Name}}`, Selected: `{{ "Using workspace profile" | faint }}: {{ .Name | bold }}`, }, - Stdin: os.Stdin, - Stdout: os.Stderr, - }).Run() + }) if err != nil { return "", err } return profiles[i].Name, nil } -func askForAccountProfile() (string, error) { +func askForAccountProfile(ctx context.Context) (string, error) { path, err := databrickscfg.GetPath() if err != nil { return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) @@ -171,7 +229,7 @@ func askForAccountProfile() (string, error) { case 1: return profiles[0].Name, nil } - i, _, err := (&promptui.Select{ + i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ Label: fmt.Sprintf("Account profiles defined in %s", file), Items: profiles, Searcher: profiles.SearchCaseInsensitive, @@ -182,9 +240,7 @@ func askForAccountProfile() (string, error) { Inactive: `{{.Name}}`, Selected: `{{ "Using account profile" | faint }}: {{ .Name | bold }}`, }, - Stdin: os.Stdin, - Stdout: os.Stderr, - }).Run() + }) if err != nil { return "", err } diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go index 75d255b5..70a52d50 100644 --- a/cmd/root/auth_test.go +++ b/cmd/root/auth_test.go @@ -2,9 +2,15 @@ package root import ( "context" + "os" + "path/filepath" "testing" + "time" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/config" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestEmptyHttpRequest(t *testing.T) { @@ -12,3 +18,161 @@ func TestEmptyHttpRequest(t *testing.T) { req := emptyHttpRequest(ctx) assert.Equal(t, req.Context(), ctx) } + +type promptFn func(ctx context.Context, cfg *config.Config, retry bool) (any, error) + +var accountPromptFn = func(ctx context.Context, cfg *config.Config, retry bool) (any, error) { + return accountClientOrPrompt(ctx, cfg, retry) +} + +var workspacePromptFn = func(ctx context.Context, cfg *config.Config, retry bool) (any, error) { + return workspaceClientOrPrompt(ctx, cfg, retry) +} + +func expectPrompts(t *testing.T, fn promptFn, config *config.Config) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // Channel to pass errors from the prompting function back to the test. + errch := make(chan error, 1) + + ctx, io := cmdio.SetupTest(ctx) + go func() { + defer close(errch) + defer cancel() + _, err := fn(ctx, config, true) + errch <- err + }() + + // Expect a prompt + line, _, err := io.Stderr.ReadLine() + if assert.NoError(t, err, "Expected to read a line from stderr") { + assert.Contains(t, string(line), "Search:") + } else { + // If there was an error reading from stderr, the prompting function must have terminated early. + assert.NoError(t, <-errch) + } +} + +func expectReturns(t *testing.T, fn promptFn, config *config.Config) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + ctx, _ = cmdio.SetupTest(ctx) + client, err := fn(ctx, config, true) + require.NoError(t, err) + require.NotNil(t, client) +} + +func TestAccountClientOrPrompt(t *testing.T) { + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + err := os.WriteFile( + configFile, + []byte(` + [account-1111] + host = https://accounts.azuredatabricks.net/ + account_id = 1111 + token = foobar + + [account-1112] + host = https://accounts.azuredatabricks.net/ + account_id = 1112 + token = foobar + `), + 0755) + require.NoError(t, err) + t.Setenv("DATABRICKS_CONFIG_FILE", configFile) + t.Setenv("PATH", "/nothing") + + t.Run("Prompt if nothing is specified", func(t *testing.T) { + expectPrompts(t, accountPromptFn, &config.Config{}) + }) + + t.Run("Prompt if a workspace host is specified", func(t *testing.T) { + expectPrompts(t, accountPromptFn, &config.Config{ + Host: "https://adb-1234567.89.azuredatabricks.net/", + AccountID: "1234", + Token: "foobar", + }) + }) + + t.Run("Prompt if account ID is not specified", func(t *testing.T) { + expectPrompts(t, accountPromptFn, &config.Config{ + Host: "https://accounts.azuredatabricks.net/", + Token: "foobar", + }) + }) + + t.Run("Prompt if no credential provider can be configured", func(t *testing.T) { + expectPrompts(t, accountPromptFn, &config.Config{ + Host: "https://accounts.azuredatabricks.net/", + AccountID: "1234", + }) + }) + + t.Run("Returns if configuration is valid", func(t *testing.T) { + expectReturns(t, accountPromptFn, &config.Config{ + Host: "https://accounts.azuredatabricks.net/", + AccountID: "1234", + Token: "foobar", + }) + }) + + t.Run("Returns if a valid profile is specified", func(t *testing.T) { + expectReturns(t, accountPromptFn, &config.Config{ + Profile: "account-1111", + }) + }) +} + +func TestWorkspaceClientOrPrompt(t *testing.T) { + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + err := os.WriteFile( + configFile, + []byte(` + [workspace-1111] + host = https://adb-1111.11.azuredatabricks.net/ + token = foobar + + [workspace-1112] + host = https://adb-1112.12.azuredatabricks.net/ + token = foobar + `), + 0755) + require.NoError(t, err) + t.Setenv("DATABRICKS_CONFIG_FILE", configFile) + t.Setenv("PATH", "/nothing") + + t.Run("Prompt if nothing is specified", func(t *testing.T) { + expectPrompts(t, workspacePromptFn, &config.Config{}) + }) + + t.Run("Prompt if an account host is specified", func(t *testing.T) { + expectPrompts(t, workspacePromptFn, &config.Config{ + Host: "https://accounts.azuredatabricks.net/", + AccountID: "1234", + Token: "foobar", + }) + }) + + t.Run("Prompt if no credential provider can be configured", func(t *testing.T) { + expectPrompts(t, workspacePromptFn, &config.Config{ + Host: "https://adb-1111.11.azuredatabricks.net/", + }) + }) + + t.Run("Returns if configuration is valid", func(t *testing.T) { + expectReturns(t, workspacePromptFn, &config.Config{ + Host: "https://adb-1111.11.azuredatabricks.net/", + Token: "foobar", + }) + }) + + t.Run("Returns if a valid profile is specified", func(t *testing.T) { + expectReturns(t, workspacePromptFn, &config.Config{ + Profile: "workspace-1111", + }) + }) +} diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index 9d712e35..cf405a7a 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -205,6 +205,13 @@ func Prompt(ctx context.Context) *promptui.Prompt { } } +func RunSelect(ctx context.Context, prompt *promptui.Select) (int, string, error) { + c := fromContext(ctx) + prompt.Stdin = io.NopCloser(c.in) + prompt.Stdout = nopWriteCloser{c.err} + return prompt.Run() +} + func (c *cmdIO) simplePrompt(label string) *promptui.Prompt { return &promptui.Prompt{ Label: label, diff --git a/libs/cmdio/testing.go b/libs/cmdio/testing.go new file mode 100644 index 00000000..43592489 --- /dev/null +++ b/libs/cmdio/testing.go @@ -0,0 +1,46 @@ +package cmdio + +import ( + "bufio" + "context" + "io" +) + +type Test struct { + Done context.CancelFunc + + Stdin *bufio.Writer + Stdout *bufio.Reader + Stderr *bufio.Reader +} + +func SetupTest(ctx context.Context) (context.Context, *Test) { + rin, win := io.Pipe() + rout, wout := io.Pipe() + rerr, werr := io.Pipe() + + cmdio := &cmdIO{ + interactive: true, + in: rin, + out: wout, + err: werr, + } + + ctx, cancel := context.WithCancel(ctx) + ctx = InContext(ctx, cmdio) + + // Wait for context to be done, so we can drain stdin and close the pipes. + go func() { + <-ctx.Done() + rin.Close() + wout.Close() + werr.Close() + }() + + return ctx, &Test{ + Done: cancel, + Stdin: bufio.NewWriter(win), + Stdout: bufio.NewReader(rout), + Stderr: bufio.NewReader(rerr), + } +} From a2775f836f2d24fa592f06954e7b60e9ea2bb698 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 11 Sep 2023 20:03:12 +0200 Subject: [PATCH 093/310] Use interactive prompt to select resource to run if not specified (#762) ## Changes Display an interactive prompt with a list of resources to run if one isn't specified and the command is run interactively. ## Tests Manually confirmed: * The new prompt works * Shell completion still works * Specifying a key argument still works --- bundle/run/job.go | 7 +++++++ bundle/run/keys.go | 22 +++++++++++++++------- bundle/run/pipeline.go | 7 +++++++ bundle/run/runner.go | 3 +++ cmd/bundle/run.go | 30 +++++++++++++++++++++++++----- 5 files changed, 57 insertions(+), 12 deletions(-) diff --git a/bundle/run/job.go b/bundle/run/job.go index f152a17d..319cd146 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -95,6 +95,13 @@ type jobRunner struct { job *resources.Job } +func (r *jobRunner) Name() string { + if r.job == nil || r.job.JobSettings == nil { + return "" + } + return r.job.JobSettings.Name +} + func isFailed(task jobs.RunTask) bool { return task.State.LifeCycleState == jobs.RunLifeCycleStateInternalError || (task.State.LifeCycleState == jobs.RunLifeCycleStateTerminated && diff --git a/bundle/run/keys.go b/bundle/run/keys.go index c8b7a2b5..76ec50ac 100644 --- a/bundle/run/keys.go +++ b/bundle/run/keys.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "golang.org/x/exp/maps" ) // RunnerLookup maps identifiers to a list of workloads that match that identifier. @@ -32,18 +33,20 @@ func ResourceKeys(b *bundle.Bundle) (keyOnly RunnerLookup, keyWithType RunnerLoo return } -// ResourceCompletions returns a list of keys that unambiguously reference resources in the bundle. -func ResourceCompletions(b *bundle.Bundle) []string { - seen := make(map[string]bool) - comps := []string{} +// ResourceCompletionMap returns a map of resource keys to their respective names. +func ResourceCompletionMap(b *bundle.Bundle) map[string]string { + out := make(map[string]string) keyOnly, keyWithType := ResourceKeys(b) + // Keep track of resources we have seen by their fully qualified key. + seen := make(map[string]bool) + // First add resources that can be identified by key alone. for k, v := range keyOnly { // Invariant: len(v) >= 1. See [ResourceKeys]. if len(v) == 1 { seen[v[0].Key()] = true - comps = append(comps, k) + out[k] = v[0].Name() } } @@ -54,8 +57,13 @@ func ResourceCompletions(b *bundle.Bundle) []string { if ok { continue } - comps = append(comps, k) + out[k] = v[0].Name() } - return comps + return out +} + +// ResourceCompletions returns a list of keys that unambiguously reference resources in the bundle. +func ResourceCompletions(b *bundle.Bundle) []string { + return maps.Keys(ResourceCompletionMap(b)) } diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index 7b82c3ea..216712d3 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -136,6 +136,13 @@ type pipelineRunner struct { pipeline *resources.Pipeline } +func (r *pipelineRunner) Name() string { + if r.pipeline == nil || r.pipeline.PipelineSpec == nil { + return "" + } + return r.pipeline.PipelineSpec.Name +} + func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, error) { var pipelineID = r.pipeline.ID diff --git a/bundle/run/runner.go b/bundle/run/runner.go index 227e12d9..7d3c2c29 100644 --- a/bundle/run/runner.go +++ b/bundle/run/runner.go @@ -21,6 +21,9 @@ type Runner interface { // This is used for showing the user hints w.r.t. disambiguation. Key() string + // Name returns the resource's name, if defined. + Name() string + // Run the underlying worklow. Run(ctx context.Context, opts *Options) (output.RunOutput, error) } diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 28b9ae7c..b5a60ee1 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/bundle/run" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" ) @@ -16,9 +17,9 @@ import ( func newRunCommand() *cobra.Command { cmd := &cobra.Command{ Use: "run [flags] KEY", - Short: "Run a workload (e.g. a job or a pipeline)", + Short: "Run a resource (e.g. a job or a pipeline)", - Args: cobra.ExactArgs(1), + Args: cobra.MaximumNArgs(1), PreRunE: ConfigureBundleWithVariables, } @@ -29,9 +30,10 @@ func newRunCommand() *cobra.Command { cmd.Flags().BoolVar(&noWait, "no-wait", false, "Don't wait for the run to complete.") cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) + ctx := cmd.Context() + b := bundle.Get(ctx) - err := bundle.Apply(cmd.Context(), b, bundle.Seq( + err := bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), terraform.Interpolate(), terraform.Write(), @@ -42,13 +44,31 @@ func newRunCommand() *cobra.Command { return err } + // If no arguments are specified, prompt the user to select something to run. + if len(args) == 0 && cmdio.IsInteractive(ctx) { + // Invert completions from KEY -> NAME, to NAME -> KEY. + inv := make(map[string]string) + for k, v := range run.ResourceCompletionMap(b) { + inv[v] = k + } + id, err := cmdio.Select(ctx, inv, "Resource to run") + if err != nil { + return err + } + args = append(args, id) + } + + if len(args) != 1 { + return fmt.Errorf("expected a KEY of the resource to run") + } + runner, err := run.Find(b, args[0]) if err != nil { return err } runOptions.NoWait = noWait - output, err := runner.Run(cmd.Context(), &runOptions) + output, err := runner.Run(ctx, &runOptions) if err != nil { return err } From 3cb74e72a85071a8a04dc20bace5ac99aa1daaed Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 12 Sep 2023 15:28:53 +0200 Subject: [PATCH 094/310] Run environment related tests in a pristine environment (#769) ## Changes If the caller running the test has one or more environment variables that are used in the test already set, they can interfere and make tests fail. ## Tests Ran tests in `./cmd/root` with Databricks related environment variables set. --- cmd/root/auth_test.go | 5 +++++ cmd/root/bundle_test.go | 19 +++++++++++++------ internal/testutil/env.go | 4 ++++ 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go index 70a52d50..30fa9a08 100644 --- a/cmd/root/auth_test.go +++ b/cmd/root/auth_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/databricks-sdk-go/config" "github.com/stretchr/testify/assert" @@ -65,6 +66,8 @@ func expectReturns(t *testing.T, fn promptFn, config *config.Config) { } func TestAccountClientOrPrompt(t *testing.T) { + testutil.CleanupEnvironment(t) + dir := t.TempDir() configFile := filepath.Join(dir, ".databrickscfg") err := os.WriteFile( @@ -127,6 +130,8 @@ func TestAccountClientOrPrompt(t *testing.T) { } func TestWorkspaceClientOrPrompt(t *testing.T) { + testutil.CleanupEnvironment(t) + dir := t.TempDir() configFile := filepath.Join(dir, ".databrickscfg") err := os.WriteFile( diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 09b33d58..3f9641b7 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/testutil" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" ) @@ -56,6 +57,8 @@ func setup(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle { } func TestBundleConfigureDefault(t *testing.T) { + testutil.CleanupEnvironment(t) + cmd := emptyCommand(t) b := setup(t, cmd, "https://x.com") assert.NotPanics(t, func() { @@ -64,6 +67,8 @@ func TestBundleConfigureDefault(t *testing.T) { } func TestBundleConfigureWithMultipleMatches(t *testing.T) { + testutil.CleanupEnvironment(t) + cmd := emptyCommand(t) b := setup(t, cmd, "https://a.com") assert.Panics(t, func() { @@ -72,6 +77,8 @@ func TestBundleConfigureWithMultipleMatches(t *testing.T) { } func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { + testutil.CleanupEnvironment(t) + cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("NOEXIST") @@ -82,6 +89,8 @@ func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { } func TestBundleConfigureWithMismatchedProfile(t *testing.T) { + testutil.CleanupEnvironment(t) + cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("PROFILE-1") @@ -92,6 +101,8 @@ func TestBundleConfigureWithMismatchedProfile(t *testing.T) { } func TestBundleConfigureWithCorrectProfile(t *testing.T) { + testutil.CleanupEnvironment(t) + cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("PROFILE-1") @@ -102,10 +113,8 @@ func TestBundleConfigureWithCorrectProfile(t *testing.T) { } func TestBundleConfigureWithMismatchedProfileEnvVariable(t *testing.T) { + testutil.CleanupEnvironment(t) t.Setenv("DATABRICKS_CONFIG_PROFILE", "PROFILE-1") - t.Cleanup(func() { - t.Setenv("DATABRICKS_CONFIG_PROFILE", "") - }) cmd := emptyCommand(t) b := setup(t, cmd, "https://x.com") @@ -115,10 +124,8 @@ func TestBundleConfigureWithMismatchedProfileEnvVariable(t *testing.T) { } func TestBundleConfigureWithProfileFlagAndEnvVariable(t *testing.T) { + testutil.CleanupEnvironment(t) t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") - t.Cleanup(func() { - t.Setenv("DATABRICKS_CONFIG_PROFILE", "") - }) cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("PROFILE-1") diff --git a/internal/testutil/env.go b/internal/testutil/env.go index 05ffaf00..11a61018 100644 --- a/internal/testutil/env.go +++ b/internal/testutil/env.go @@ -2,6 +2,7 @@ package testutil import ( "os" + "runtime" "strings" "testing" ) @@ -30,4 +31,7 @@ func CleanupEnvironment(t *testing.T) { // because of isolation; the environment is scoped to the process. t.Setenv("PATH", path) t.Setenv("HOME", pwd) + if runtime.GOOS == "windows" { + t.Setenv("USERPROFILE", pwd) + } } From 21ff71ceea0dc1b03747e11ad73fa77cf4ff18e1 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 12 Sep 2023 15:38:43 +0200 Subject: [PATCH 095/310] Add documentation link bundle command group description (#770) Help output: ``` shreyas.goenka@THW32HFW6T ~ % cli bundle -h Databricks Asset Bundles. Documentation URL: https://docs.databricks.com/en/dev-tools/bundles. Usage: databricks bundle [command] ``` --------- Co-authored-by: Pieter Noordhuis --- cmd/bundle/bundle.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/bundle/bundle.go b/cmd/bundle/bundle.go index c933ec9c..d8382d17 100644 --- a/cmd/bundle/bundle.go +++ b/cmd/bundle/bundle.go @@ -7,7 +7,7 @@ import ( func New() *cobra.Command { cmd := &cobra.Command{ Use: "bundle", - Short: "Databricks Asset Bundles", + Short: "Databricks Asset Bundles\n\nOnline documentation: https://docs.databricks.com/en/dev-tools/bundles", } initVariableFlag(cmd) From 96d807fb858ef0a413497ea32c86d117f4f7d91d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 12 Sep 2023 16:35:36 +0200 Subject: [PATCH 096/310] Release v0.205.0 (#771) This release marks the public preview phase of Databricks Asset Bundles. For more information, please refer to our online documentation at https://docs.databricks.com/en/dev-tools/bundles/. CLI: * Prompt once for a client profile ([#727](https://github.com/databricks/cli/pull/727)). Bundles: * Use clearer error message when no interpolation value is found. ([#764](https://github.com/databricks/cli/pull/764)). * Use interactive prompt to select resource to run if not specified ([#762](https://github.com/databricks/cli/pull/762)). * Add documentation link bundle command group description ([#770](https://github.com/databricks/cli/pull/770)). --- CHANGELOG.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ba0dbcdc..867e086b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Version changelog +## 0.205.0 + +This release marks the public preview phase of Databricks Asset Bundles. + +For more information, please refer to our online documentation at +https://docs.databricks.com/en/dev-tools/bundles/. + +CLI: + * Prompt once for a client profile ([#727](https://github.com/databricks/cli/pull/727)). + +Bundles: + * Use clearer error message when no interpolation value is found. ([#764](https://github.com/databricks/cli/pull/764)). + * Use interactive prompt to select resource to run if not specified ([#762](https://github.com/databricks/cli/pull/762)). + * Add documentation link bundle command group description ([#770](https://github.com/databricks/cli/pull/770)). + + ## 0.204.1 Bundles: From be55310cc9640875c7e30ecc114193c439581f0d Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 13 Sep 2023 19:57:31 +0200 Subject: [PATCH 097/310] Use enums for default python template (#765) ## Changes This PR changes schema to use the enum type for the default template yes/no questions. ## Tests Manually --- .../default-python/databricks_template_schema.json | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/libs/template/templates/default-python/databricks_template_schema.json b/libs/template/templates/default-python/databricks_template_schema.json index 22c65f30..db8adcce 100644 --- a/libs/template/templates/default-python/databricks_template_schema.json +++ b/libs/template/templates/default-python/databricks_template_schema.json @@ -7,26 +7,23 @@ "order": 1 }, "include_notebook": { - "todo": "use an enum here, see https://github.com/databricks/cli/pull/668", "type": "string", "default": "yes", - "pattern": "^(yes|no)$", + "enum": ["yes", "no"], "description": "Include a stub (sample) notebook in 'my_project/src'", "order": 2 }, "include_dlt": { - "todo": "use an enum here, see https://github.com/databricks/cli/pull/668", "type": "string", "default": "yes", - "pattern": "^(yes|no)$", + "enum": ["yes", "no"], "description": "Include a stub (sample) DLT pipeline in 'my_project/src'", "order": 3 }, "include_python": { - "todo": "use an enum here, see https://github.com/databricks/cli/pull/668", "type": "string", "default": "yes", - "pattern": "^(yes|no)$", + "enum": ["yes", "no"], "description": "Include a stub (sample) Python package 'my_project/src'", "order": 4 } From fe32c46dc88383e8bc14ddd40339052a0948b944 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 14 Sep 2023 00:50:37 +0200 Subject: [PATCH 098/310] Make bundle deploy work if no resources are defined (#767) ## Changes This PR sets "resource" to nil in the terraform representation if no resources are defined in the bundle configuration. This solves two problems: 1. Makes bundle deploy work without any resources specified. 2. Previously if a `resources` block was removed after a deployment, that would fail with an error. Now the resources would get destroyed as expected. Also removes `TerraformHasNoResources` which is no longer needed. ## Tests New e2e tests. --- bundle/bundle.go | 4 -- bundle/deploy/terraform/apply.go | 4 -- bundle/deploy/terraform/convert.go | 10 +++- bundle/deploy/terraform/convert_test.go | 22 ++++---- bundle/deploy/terraform/write.go | 3 +- .../databricks_template_schema.json | 8 +++ .../template/databricks.yml.tmpl | 8 +++ .../template/foo.py | 1 + .../template/resources.yml.tmpl | 7 +++ .../bundles/empty_bundle/databricks.yml | 2 + .../deploy_then_remove_resources_test.go | 55 +++++++++++++++++++ internal/bundle/empty_bundle_test.go | 37 +++++++++++++ 12 files changed, 138 insertions(+), 23 deletions(-) create mode 100644 internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json create mode 100644 internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/deploy_then_remove_resources/template/foo.py create mode 100644 internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl create mode 100644 internal/bundle/bundles/empty_bundle/databricks.yml create mode 100644 internal/bundle/deploy_then_remove_resources_test.go create mode 100644 internal/bundle/empty_bundle_test.go diff --git a/bundle/bundle.go b/bundle/bundle.go index 4fc60539..61bf1ffe 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -38,10 +38,6 @@ type Bundle struct { // Stores an initialized copy of this bundle's Terraform wrapper. Terraform *tfexec.Terraform - // Indicates that the Terraform definition based on this bundle is empty, - // i.e. that it would deploy no resources. - TerraformHasNoResources bool - // Stores the locker responsible for acquiring/releasing a deployment lock. Locker *locker.Locker diff --git a/bundle/deploy/terraform/apply.go b/bundle/deploy/terraform/apply.go index 53cffbba..ab868f76 100644 --- a/bundle/deploy/terraform/apply.go +++ b/bundle/deploy/terraform/apply.go @@ -16,10 +16,6 @@ func (w *apply) Name() string { } func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) error { - if b.TerraformHasNoResources { - cmdio.LogString(ctx, "Note: there are no resources to deploy for this bundle") - return nil - } tf := b.Terraform if tf == nil { return fmt.Errorf("terraform not initialized") diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 0956ea7b..7d95e719 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -49,7 +49,7 @@ func convPermission(ac resources.Permission) schema.ResourcePermissionsAccessCon // // NOTE: THIS IS CURRENTLY A HACK. WE NEED A BETTER WAY TO // CONVERT TO/FROM TERRAFORM COMPATIBLE FORMAT. -func BundleToTerraform(config *config.Root) (*schema.Root, bool) { +func BundleToTerraform(config *config.Root) *schema.Root { tfroot := schema.NewRoot() tfroot.Provider = schema.NewProviders() tfroot.Resource = schema.NewResources() @@ -174,7 +174,13 @@ func BundleToTerraform(config *config.Root) (*schema.Root, bool) { } } - return tfroot, noResources + // We explicitly set "resource" to nil to omit it from a JSON encoding. + // This is required because the terraform CLI requires >= 1 resources defined + // if the "resource" property is used in a .tf.json file. + if noResources { + tfroot.Resource = nil + } + return tfroot } func TerraformToBundle(state *tfjson.State, config *config.Root) error { diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index ad626606..b6b29f35 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -51,7 +51,7 @@ func TestConvertJob(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.Equal(t, "my job", out.Resource.Job["my_job"].Name) assert.Len(t, out.Resource.Job["my_job"].JobCluster, 1) assert.Equal(t, "https://github.com/foo/bar", out.Resource.Job["my_job"].GitSource.Url) @@ -79,7 +79,7 @@ func TestConvertJobPermissions(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.NotEmpty(t, out.Resource.Permissions["job_my_job"].JobId) assert.Len(t, out.Resource.Permissions["job_my_job"].AccessControl, 1) @@ -115,7 +115,7 @@ func TestConvertJobTaskLibraries(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.Equal(t, "my job", out.Resource.Job["my_job"].Name) require.Len(t, out.Resource.Job["my_job"].Task, 1) require.Len(t, out.Resource.Job["my_job"].Task[0].Library, 1) @@ -149,7 +149,7 @@ func TestConvertPipeline(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.Equal(t, "my pipeline", out.Resource.Pipeline["my_pipeline"].Name) assert.Len(t, out.Resource.Pipeline["my_pipeline"].Library, 2) assert.Nil(t, out.Data) @@ -173,7 +173,7 @@ func TestConvertPipelinePermissions(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.NotEmpty(t, out.Resource.Permissions["pipeline_my_pipeline"].PipelineId) assert.Len(t, out.Resource.Permissions["pipeline_my_pipeline"].AccessControl, 1) @@ -208,7 +208,7 @@ func TestConvertModel(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.Equal(t, "name", out.Resource.MlflowModel["my_model"].Name) assert.Equal(t, "description", out.Resource.MlflowModel["my_model"].Description) assert.Len(t, out.Resource.MlflowModel["my_model"].Tags, 2) @@ -237,7 +237,7 @@ func TestConvertModelPermissions(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.NotEmpty(t, out.Resource.Permissions["mlflow_model_my_model"].RegisteredModelId) assert.Len(t, out.Resource.Permissions["mlflow_model_my_model"].AccessControl, 1) @@ -261,7 +261,7 @@ func TestConvertExperiment(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.Equal(t, "name", out.Resource.MlflowExperiment["my_experiment"].Name) assert.Nil(t, out.Data) } @@ -284,7 +284,7 @@ func TestConvertExperimentPermissions(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.NotEmpty(t, out.Resource.Permissions["mlflow_experiment_my_experiment"].ExperimentId) assert.Len(t, out.Resource.Permissions["mlflow_experiment_my_experiment"].AccessControl, 1) @@ -327,7 +327,7 @@ func TestConvertModelServing(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) resource := out.Resource.ModelServing["my_model_serving_endpoint"] assert.Equal(t, "name", resource.Name) assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName) @@ -357,7 +357,7 @@ func TestConvertModelServingPermissions(t *testing.T) { }, } - out, _ := BundleToTerraform(&config) + out := BundleToTerraform(&config) assert.NotEmpty(t, out.Resource.Permissions["model_serving_my_model_serving_endpoint"].ServingEndpointId) assert.Len(t, out.Resource.Permissions["model_serving_my_model_serving_endpoint"].AccessControl, 1) diff --git a/bundle/deploy/terraform/write.go b/bundle/deploy/terraform/write.go index eca79ad2..b53f9069 100644 --- a/bundle/deploy/terraform/write.go +++ b/bundle/deploy/terraform/write.go @@ -21,8 +21,7 @@ func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - root, noResources := BundleToTerraform(&b.Config) - b.TerraformHasNoResources = noResources + root := BundleToTerraform(&b.Config) f, err := os.Create(filepath.Join(dir, "bundle.tf.json")) if err != nil { return err diff --git a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json b/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json new file mode 100644 index 00000000..cfed842c --- /dev/null +++ b/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json @@ -0,0 +1,8 @@ +{ + "properties": { + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + } + } +} diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl b/internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl new file mode 100644 index 00000000..c0e840c8 --- /dev/null +++ b/internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl @@ -0,0 +1,8 @@ +bundle: + name: deploy-then-remove + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + +include: + - "./*.yml" diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/foo.py b/internal/bundle/bundles/deploy_then_remove_resources/template/foo.py new file mode 100644 index 00000000..11b15b1a --- /dev/null +++ b/internal/bundle/bundles/deploy_then_remove_resources/template/foo.py @@ -0,0 +1 @@ +print("hello") diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl b/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl new file mode 100644 index 00000000..b74344e4 --- /dev/null +++ b/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl @@ -0,0 +1,7 @@ +resources: + pipelines: + bar: + name: test-bundle-pipeline-{{.unique_id}} + libraries: + - notebook: + path: "./foo.py" diff --git a/internal/bundle/bundles/empty_bundle/databricks.yml b/internal/bundle/bundles/empty_bundle/databricks.yml new file mode 100644 index 00000000..efc62782 --- /dev/null +++ b/internal/bundle/bundles/empty_bundle/databricks.yml @@ -0,0 +1,2 @@ +bundle: + name: abc diff --git a/internal/bundle/deploy_then_remove_resources_test.go b/internal/bundle/deploy_then_remove_resources_test.go new file mode 100644 index 00000000..73860593 --- /dev/null +++ b/internal/bundle/deploy_then_remove_resources_test.go @@ -0,0 +1,55 @@ +package bundle + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/databricks-sdk-go" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccBundleDeployThenRemoveResources(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, "deploy_then_remove_resources", map[string]any{ + "unique_id": uniqueId, + }) + require.NoError(t, err) + + // deploy pipeline + err = deployBundle(t, bundleRoot) + require.NoError(t, err) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + // assert pipeline is created + pipelineName := "test-bundle-pipeline-" + uniqueId + pipeline, err := w.Pipelines.GetByName(context.Background(), pipelineName) + require.NoError(t, err) + assert.Equal(t, pipeline.Name, pipelineName) + + // delete resources.yml + err = os.Remove(filepath.Join(bundleRoot, "resources.yml")) + require.NoError(t, err) + + // deploy again + err = deployBundle(t, bundleRoot) + require.NoError(t, err) + + // assert pipeline is deleted + _, err = w.Pipelines.GetByName(context.Background(), pipelineName) + assert.ErrorContains(t, err, "does not exist") + + t.Cleanup(func() { + err = destroyBundle(t, bundleRoot) + require.NoError(t, err) + }) +} diff --git a/internal/bundle/empty_bundle_test.go b/internal/bundle/empty_bundle_test.go new file mode 100644 index 00000000..9b39368f --- /dev/null +++ b/internal/bundle/empty_bundle_test.go @@ -0,0 +1,37 @@ +package bundle + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccEmptyBundleDeploy(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + // create empty bundle + tmpDir := t.TempDir() + f, err := os.Create(filepath.Join(tmpDir, "databricks.yml")) + require.NoError(t, err) + + bundleRoot := fmt.Sprintf(`bundle: + name: %s`, uuid.New().String()) + _, err = f.WriteString(bundleRoot) + require.NoError(t, err) + f.Close() + + // deploy empty bundle + err = deployBundle(t, tmpDir) + require.NoError(t, err) + + t.Cleanup(func() { + err = destroyBundle(t, tmpDir) + require.NoError(t, err) + }) +} From 953dcb4972fe10320d4f5fa173851a86b4429083 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 14 Sep 2023 12:14:13 +0200 Subject: [PATCH 099/310] Added support for experimental scripts section (#632) ## Changes Added support for experimental scripts section It allows execution of arbitrary bash commands during certain bundle lifecycle steps. ## Tests Example of configuration ```yaml bundle: name: wheel-task workspace: host: *** experimental: scripts: prebuild: | echo 'Prebuild 1' echo 'Prebuild 2' postbuild: "echo 'Postbuild 1' && echo 'Postbuild 2'" predeploy: | echo 'Checking go version...' go version postdeploy: | echo 'Checking python version...' python --version resources: jobs: test_job: name: "[${bundle.environment}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: "***" python_wheel_task: package_name: "my_test_code" entry_point: "run" libraries: - whl: ./dist/*.whl ``` Output ```bash andrew.nester@HFW9Y94129 wheel % databricks bundle deploy artifacts.whl.AutoDetect: Detecting Python wheel project... artifacts.whl.AutoDetect: Found Python wheel project at /Users/andrew.nester/dabs/wheel 'Prebuild 1' 'Prebuild 2' artifacts.whl.Build(my_test_code): Building... artifacts.whl.Build(my_test_code): Build succeeded 'Postbuild 1' 'Postbuild 2' 'Checking go version...' go version go1.19.9 darwin/arm64 Starting upload of bundle files Uploaded bundle files at /Users/andrew.nester@databricks.com/.bundle/wheel-task/default/files! artifacts.Upload(my_test_code-0.0.0a0-py3-none-any.whl): Uploading... artifacts.Upload(my_test_code-0.0.0a0-py3-none-any.whl): Upload succeeded Starting resource deployment Resource deployment completed! 'Checking python version...' Python 2.7.18 ``` --- bundle/config/experimental.go | 18 +++++++ bundle/config/mutator/mutator.go | 3 ++ bundle/config/root.go | 2 + bundle/phases/build.go | 4 ++ bundle/phases/deploy.go | 4 ++ bundle/phases/initialize.go | 3 ++ bundle/scripts/scripts.go | 91 ++++++++++++++++++++++++++++++++ bundle/scripts/scripts_test.go | 32 +++++++++++ 8 files changed, 157 insertions(+) create mode 100644 bundle/config/experimental.go create mode 100644 bundle/scripts/scripts.go create mode 100644 bundle/scripts/scripts_test.go diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go new file mode 100644 index 00000000..be0e7d8f --- /dev/null +++ b/bundle/config/experimental.go @@ -0,0 +1,18 @@ +package config + +type Experimental struct { + Scripts map[ScriptHook]Command `json:"scripts,omitempty"` +} + +type Command string +type ScriptHook string + +// These hook names are subject to change and currently experimental +const ( + ScriptPreInit ScriptHook = "preinit" + ScriptPostInit ScriptHook = "postinit" + ScriptPreBuild ScriptHook = "prebuild" + ScriptPostBuild ScriptHook = "postbuild" + ScriptPreDeploy ScriptHook = "predeploy" + ScriptPostDeploy ScriptHook = "postdeploy" +) diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index ff1f96f5..aa762e8e 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -2,10 +2,13 @@ package mutator import ( "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/scripts" ) func DefaultMutators() []bundle.Mutator { return []bundle.Mutator{ + scripts.Execute(config.ScriptPreInit), ProcessRootIncludes(), DefineDefaultTarget(), LoadGitDetails(), diff --git a/bundle/config/root.go b/bundle/config/root.go index 0377f60a..465d8a62 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -84,6 +84,8 @@ type Root struct { // RunAs section allows to define an execution identity for jobs and pipelines runs RunAs *jobs.JobRunAs `json:"run_as,omitempty"` + + Experimental *Experimental `json:"experimental,omitempty"` } func Load(path string) (*Root, error) { diff --git a/bundle/phases/build.go b/bundle/phases/build.go index fe90c369..760967fc 100644 --- a/bundle/phases/build.go +++ b/bundle/phases/build.go @@ -3,7 +3,9 @@ package phases import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/interpolation" + "github.com/databricks/cli/bundle/scripts" ) // The build phase builds artifacts. @@ -11,9 +13,11 @@ func Build() bundle.Mutator { return newPhase( "build", []bundle.Mutator{ + scripts.Execute(config.ScriptPreBuild), artifacts.DetectPackages(), artifacts.InferMissingProperties(), artifacts.BuildAll(), + scripts.Execute(config.ScriptPostBuild), interpolation.Interpolate( interpolation.IncludeLookupsInPath("artifacts"), ), diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 5a9a7f2f..a8ca7518 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -3,17 +3,20 @@ package phases import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/bundle/deploy/lock" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/bundle/python" + "github.com/databricks/cli/bundle/scripts" ) // The deploy phase deploys artifacts and resources. func Deploy() bundle.Mutator { deployMutator := bundle.Seq( + scripts.Execute(config.ScriptPreDeploy), lock.Acquire(), bundle.Defer( bundle.Seq( @@ -31,6 +34,7 @@ func Deploy() bundle.Mutator { ), lock.Release(lock.GoalDeploy), ), + scripts.Execute(config.ScriptPostDeploy), ) return newPhase( diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 546a8478..431fe27d 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -2,10 +2,12 @@ package phases import ( "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/interpolation" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/bundle/scripts" ) // The initialize phase fills in defaults and connects to the workspace. @@ -30,6 +32,7 @@ func Initialize() bundle.Mutator { mutator.ProcessTargetMode(), mutator.TranslatePaths(), terraform.Initialize(), + scripts.Execute(config.ScriptPostInit), }, ) } diff --git a/bundle/scripts/scripts.go b/bundle/scripts/scripts.go new file mode 100644 index 00000000..1a8a471c --- /dev/null +++ b/bundle/scripts/scripts.go @@ -0,0 +1,91 @@ +package scripts + +import ( + "bufio" + "context" + "fmt" + "io" + "os/exec" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/log" +) + +func Execute(hook config.ScriptHook) bundle.Mutator { + return &script{ + scriptHook: hook, + } +} + +type script struct { + scriptHook config.ScriptHook +} + +func (m *script) Name() string { + return fmt.Sprintf("scripts.%s", m.scriptHook) +} + +func (m *script) Apply(ctx context.Context, b *bundle.Bundle) error { + cmd, out, err := executeHook(ctx, b, m.scriptHook) + if err != nil { + return err + } + if cmd == nil { + log.Debugf(ctx, "No script defined for %s, skipping", m.scriptHook) + return nil + } + + cmdio.LogString(ctx, fmt.Sprintf("Executing '%s' script", m.scriptHook)) + + reader := bufio.NewReader(out) + line, err := reader.ReadString('\n') + for err == nil { + cmdio.LogString(ctx, strings.TrimSpace(line)) + line, err = reader.ReadString('\n') + } + + return cmd.Wait() +} + +func executeHook(ctx context.Context, b *bundle.Bundle, hook config.ScriptHook) (*exec.Cmd, io.Reader, error) { + command := getCommmand(b, hook) + if command == "" { + return nil, nil, nil + } + + interpreter, err := findInterpreter() + if err != nil { + return nil, nil, err + } + + cmd := exec.CommandContext(ctx, interpreter, "-c", string(command)) + cmd.Dir = b.Config.Path + + outPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, nil, err + } + + errPipe, err := cmd.StderrPipe() + if err != nil { + return nil, nil, err + } + + return cmd, io.MultiReader(outPipe, errPipe), cmd.Start() +} + +func getCommmand(b *bundle.Bundle, hook config.ScriptHook) config.Command { + if b.Config.Experimental == nil || b.Config.Experimental.Scripts == nil { + return "" + } + + return b.Config.Experimental.Scripts[hook] +} + +func findInterpreter() (string, error) { + // At the moment we just return 'sh' on all platforms and use it to execute scripts + return "sh", nil +} diff --git a/bundle/scripts/scripts_test.go b/bundle/scripts/scripts_test.go new file mode 100644 index 00000000..8b7aa0d1 --- /dev/null +++ b/bundle/scripts/scripts_test.go @@ -0,0 +1,32 @@ +package scripts + +import ( + "bufio" + "context" + "strings" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/stretchr/testify/require" +) + +func TestExecutesHook(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Experimental: &config.Experimental{ + Scripts: map[config.ScriptHook]config.Command{ + config.ScriptPreBuild: "echo 'Hello'", + }, + }, + }, + } + _, out, err := executeHook(context.Background(), b, config.ScriptPreBuild) + require.NoError(t, err) + + reader := bufio.NewReader(out) + line, err := reader.ReadString('\n') + + require.NoError(t, err) + require.Equal(t, "Hello", strings.TrimSpace(line)) +} From 327ab0e598cbcf4aff0e386b40fa9c9da982dd08 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 14 Sep 2023 17:53:20 +0200 Subject: [PATCH 100/310] Error when unknown keys are encounters during template execution (#766) ## Tests New unit test and manually --- internal/init_test.go | 15 +++++++++++++ .../databricks_template_schema.json | 8 +++++++ .../field-does-not-exist/template/bar.tmpl | 3 +++ libs/template/renderer.go | 21 +++++++++++++++++++ libs/template/renderer_test.go | 12 +++++++++++ 5 files changed, 59 insertions(+) create mode 100644 internal/init_test.go create mode 100644 internal/testdata/init/field-does-not-exist/databricks_template_schema.json create mode 100644 internal/testdata/init/field-does-not-exist/template/bar.tmpl diff --git a/internal/init_test.go b/internal/init_test.go new file mode 100644 index 00000000..a2eda983 --- /dev/null +++ b/internal/init_test.go @@ -0,0 +1,15 @@ +package internal + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + tmpDir := t.TempDir() + _, _, err := RequireErrorRun(t, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) + assert.EqualError(t, err, "failed to compute file content for bar.tmpl. variable \"does_not_exist\" not defined") +} diff --git a/internal/testdata/init/field-does-not-exist/databricks_template_schema.json b/internal/testdata/init/field-does-not-exist/databricks_template_schema.json new file mode 100644 index 00000000..c37fc089 --- /dev/null +++ b/internal/testdata/init/field-does-not-exist/databricks_template_schema.json @@ -0,0 +1,8 @@ +{ + "properties": { + "foo": { + "type": "string", + "default": "abc" + } + } +} diff --git a/internal/testdata/init/field-does-not-exist/template/bar.tmpl b/internal/testdata/init/field-does-not-exist/template/bar.tmpl new file mode 100644 index 00000000..95f8d250 --- /dev/null +++ b/internal/testdata/init/field-does-not-exist/template/bar.tmpl @@ -0,0 +1,3 @@ +{{.foo}} +{{.does_not_exist}} +hello, world diff --git a/libs/template/renderer.go b/libs/template/renderer.go index f674ea0f..09ccc3f5 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -8,6 +8,7 @@ import ( "os" "path" "path/filepath" + "regexp" "slices" "sort" "strings" @@ -102,6 +103,12 @@ func (r *renderer) executeTemplate(templateDefinition string) (string, error) { return "", err } + // The template execution will error instead of printing on unknown + // map keys if the "missingkey=error" option is set. + // We do this here instead of doing this once for r.baseTemplate because + // the Template.Clone() method does not clone options. + tmpl = tmpl.Option("missingkey=error") + // Parse the template text tmpl, err = tmpl.Parse(templateDefinition) if err != nil { @@ -112,6 +119,20 @@ func (r *renderer) executeTemplate(templateDefinition string) (string, error) { result := strings.Builder{} err = tmpl.Execute(&result, r.config) if err != nil { + // Parse and return a more readable error for missing values that are used + // by the template definition but are not provided in the passed config. + target := &template.ExecError{} + if errors.As(err, target) { + captureRegex := regexp.MustCompile(`map has no entry for key "(.*)"`) + matches := captureRegex.FindStringSubmatch(target.Err.Error()) + if len(matches) != 2 { + return "", err + } + return "", template.ExecError{ + Name: target.Name, + Err: fmt.Errorf("variable %q not defined", matches[1]), + } + } return "", err } return result.String(), nil diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 21dd1e4f..8f8a8291 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -189,6 +189,18 @@ My email is {{template "email"}} assert.Contains(t, statement, `My email is hrithik.roshan@databricks.com`) } +func TestRendererExecuteTemplateWithUnknownProperty(t *testing.T) { + templateText := `{{.does_not_exist}}` + + r := renderer{ + config: map[string]any{}, + baseTemplate: template.New("base"), + } + + _, err := r.executeTemplate(templateText) + assert.ErrorContains(t, err, "variable \"does_not_exist\" not defined") +} + func TestRendererIsSkipped(t *testing.T) { skipPatterns := []string{"a*", "*yz", "def", "a/b/*"} From 2c58deb2c5a340f6db7e6c6f803ed49ddb4d7367 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 15 Sep 2023 11:14:51 +0200 Subject: [PATCH 101/310] Fall back to full Git clone if shallow clone is not supported (#775) ## Changes Git repos hosted over HTTP do not support shallow cloning. This PR adds retry logic if we detect shallow cloning is not supported. Note I saw the match string `dumb http transport does not support shallow capabilities` being reported in for different hosts on the internet, so this should work accross a large class of git servers. Howerver, it's not strictly necessary to have the `--depth` flag so we can remove it if this issue is reported again. ## Tests Tested manually. `bundle init` successfully downloads the private HTTP repo reported during by internal user. --- libs/git/clone.go | 50 ++++++++++++++++++++++++++++-------------- libs/git/clone_test.go | 14 ++++++++++-- 2 files changed, 46 insertions(+), 18 deletions(-) diff --git a/libs/git/clone.go b/libs/git/clone.go index 8b075cde..af7ffa4b 100644 --- a/libs/git/clone.go +++ b/libs/git/clone.go @@ -25,31 +25,23 @@ type cloneOptions struct { // Local path to clone repository at TargetPath string + + // If true, the repository is shallow cloned + Shallow bool } func (opts cloneOptions) args() []string { - args := []string{"clone", opts.RepositoryUrl, opts.TargetPath, "--depth=1", "--no-tags"} + args := []string{"clone", opts.RepositoryUrl, opts.TargetPath, "--no-tags"} if opts.Reference != "" { args = append(args, "--branch", opts.Reference) } + if opts.Shallow { + args = append(args, "--depth=1") + } return args } -func Clone(ctx context.Context, url, reference, targetPath string) error { - // We assume only the repository name has been if input does not contain any - // `/` characters and the url is only made up of alphanumeric characters and - // ".", "_" and "-". This repository is resolved again databricks github account. - fullUrl := url - if githubRepoRegex.MatchString(url) { - fullUrl = strings.Join([]string{githubUrl, databricksOrg, url}, "/") - } - - opts := cloneOptions{ - Reference: reference, - RepositoryUrl: fullUrl, - TargetPath: targetPath, - } - +func (opts cloneOptions) clone(ctx context.Context) error { cmd := exec.CommandContext(ctx, "git", opts.args()...) var cmdErr bytes.Buffer cmd.Stderr = &cmdErr @@ -70,3 +62,29 @@ func Clone(ctx context.Context, url, reference, targetPath string) error { } return nil } + +func Clone(ctx context.Context, url, reference, targetPath string) error { + // We assume only the repository name has been if input does not contain any + // `/` characters and the url is only made up of alphanumeric characters and + // ".", "_" and "-". This repository is resolved again databricks github account. + fullUrl := url + if githubRepoRegex.MatchString(url) { + fullUrl = strings.Join([]string{githubUrl, databricksOrg, url}, "/") + } + + opts := cloneOptions{ + Reference: reference, + RepositoryUrl: fullUrl, + TargetPath: targetPath, + Shallow: true, + } + + err := opts.clone(ctx) + // Git repos hosted via HTTP do not support shallow cloning. We try with + // a deep clone this time + if err != nil && strings.Contains(err.Error(), "dumb http transport does not support shallow capabilities") { + opts.Shallow = false + return opts.clone(ctx) + } + return err +} diff --git a/libs/git/clone_test.go b/libs/git/clone_test.go index 8101178f..bed5fa54 100644 --- a/libs/git/clone_test.go +++ b/libs/git/clone_test.go @@ -10,17 +10,27 @@ import ( func TestGitCloneArgs(t *testing.T) { // case: No branch / tag specified. In this case git clones the default branch - assert.Equal(t, []string{"clone", "abc", "/def", "--depth=1", "--no-tags"}, cloneOptions{ + assert.Equal(t, []string{"clone", "abc", "/def", "--no-tags", "--depth=1"}, cloneOptions{ Reference: "", RepositoryUrl: "abc", TargetPath: "/def", + Shallow: true, }.args()) // case: A branch is specified. - assert.Equal(t, []string{"clone", "abc", "/def", "--depth=1", "--no-tags", "--branch", "my-branch"}, cloneOptions{ + assert.Equal(t, []string{"clone", "abc", "/def", "--no-tags", "--branch", "my-branch", "--depth=1"}, cloneOptions{ Reference: "my-branch", RepositoryUrl: "abc", TargetPath: "/def", + Shallow: true, + }.args()) + + // case: deep cloning + assert.Equal(t, []string{"clone", "abc", "/def", "--no-tags"}, cloneOptions{ + Reference: "", + RepositoryUrl: "abc", + TargetPath: "/def", + Shallow: false, }.args()) } From b3b00fd226c2928b2c26c8c64f0ea54227b27c3f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 15 Sep 2023 16:54:23 +0200 Subject: [PATCH 102/310] Update Go SDK to v0.19.3 (unreleased) (#778) ## Changes This bump includes: * A fix for token refreshes on Azure * A fix for retrying requests without a request body (e.g. GET) Full comparison at https://github.com/databricks/databricks-sdk-go/compare/v0.19.1...dacb7f4fc878. ## Tests n/a --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 14c85e67..0d274e8b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.19.1 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.19.3-0.20230914130855-dacb7f4fc878 // Apache 2.0 github.com/fatih/color v1.15.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.3.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 20c985b0..a7f61bd8 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/databricks/databricks-sdk-go v0.19.1 h1:hP7xZb+Hd8n0grnEcf2FOMn6lWox7vp5KAan3D2hnzM= -github.com/databricks/databricks-sdk-go v0.19.1/go.mod h1:Bt/3i3ry/rQdE6Y+psvkAENlp+LzJHaQK5PsLIstQb4= +github.com/databricks/databricks-sdk-go v0.19.3-0.20230914130855-dacb7f4fc878 h1:BteIFhP/8wlfEF3CMX8YFMb4fRD4T0dvcROmzZTeyWw= +github.com/databricks/databricks-sdk-go v0.19.3-0.20230914130855-dacb7f4fc878/go.mod h1:Bt/3i3ry/rQdE6Y+psvkAENlp+LzJHaQK5PsLIstQb4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 43e2eefc27106763e33a7416bb789fe019a6a0e4 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 18 Sep 2023 16:13:50 +0200 Subject: [PATCH 103/310] Enable environment overrides for job tasks (#779) ## Changes Follow up for https://github.com/databricks/cli/pull/658 When a job definition has multiple job tasks using the same key, it's considered invalid. Instead we should combine those definitions with the same key into one. This is consistent with environment overrides. This way, the override ends up in the original job tasks, and we've got a clear way to put them all together. ## Tests Added unit tests --- bundle/config/resources.go | 11 +++++ bundle/config/resources/job.go | 33 +++++++++++++ bundle/config/resources/job_test.go | 47 +++++++++++++++++++ bundle/config/root.go | 5 ++ .../tests/override_job_tasks/databricks.yml | 44 +++++++++++++++++ bundle/tests/override_job_tasks_test.go | 39 +++++++++++++++ 6 files changed, 179 insertions(+) create mode 100644 bundle/tests/override_job_tasks/databricks.yml create mode 100644 bundle/tests/override_job_tasks_test.go diff --git a/bundle/config/resources.go b/bundle/config/resources.go index c239b510..48621e44 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -141,3 +141,14 @@ func (r *Resources) MergeJobClusters() error { } return nil } + +// MergeTasks iterates over all jobs and merges their tasks. +// This is called after applying the target overrides. +func (r *Resources) MergeTasks() error { + for _, job := range r.Jobs { + if err := job.MergeTasks(); err != nil { + return err + } + } + return nil +} diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index 66705afb..7fc5b761 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -47,3 +47,36 @@ func (j *Job) MergeJobClusters() error { j.JobClusters = output return nil } + +// MergeTasks merges tasks with the same key. +// The tasks field is a slice, and as such, overrides are appended to it. +// We can identify a task by its task key, however, so we can use this key +// to figure out which definitions are actually overrides and merge them. +func (j *Job) MergeTasks() error { + keys := make(map[string]*jobs.Task) + tasks := make([]jobs.Task, 0, len(j.Tasks)) + + // Target overrides are always appended, so we can iterate in natural order to + // first find the base definition, and merge instances we encounter later. + for i := range j.Tasks { + key := j.Tasks[i].TaskKey + + // Register the task with key if not yet seen before. + ref, ok := keys[key] + if !ok { + tasks = append(tasks, j.Tasks[i]) + keys[key] = &j.Tasks[i] + continue + } + + // Merge this instance into the reference. + err := mergo.Merge(ref, &j.Tasks[i], mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return err + } + } + + // Overwrite resulting slice. + j.Tasks = tasks + return nil +} diff --git a/bundle/config/resources/job_test.go b/bundle/config/resources/job_test.go index 2ff3205e..818d2ac2 100644 --- a/bundle/config/resources/job_test.go +++ b/bundle/config/resources/job_test.go @@ -55,3 +55,50 @@ func TestJobMergeJobClusters(t *testing.T) { jc1 := j.JobClusters[1].NewCluster assert.Equal(t, "10.4.x-scala2.12", jc1.SparkVersion) } + +func TestJobMergeTasks(t *testing.T) { + j := &Job{ + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "foo", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + }, + { + TaskKey: "bar", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "10.4.x-scala2.12", + }, + }, + { + TaskKey: "foo", + NewCluster: &compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + } + + err := j.MergeTasks() + require.NoError(t, err) + + assert.Len(t, j.Tasks, 2) + assert.Equal(t, "foo", j.Tasks[0].TaskKey) + assert.Equal(t, "bar", j.Tasks[1].TaskKey) + + // This task was merged with a subsequent one. + task0 := j.Tasks[0].NewCluster + assert.Equal(t, "13.3.x-scala2.12", task0.SparkVersion) + assert.Equal(t, "i3.2xlarge", task0.NodeTypeId) + assert.Equal(t, 4, task0.NumWorkers) + + // This task was left untouched. + task1 := j.Tasks[1].NewCluster + assert.Equal(t, "10.4.x-scala2.12", task1.SparkVersion) +} diff --git a/bundle/config/root.go b/bundle/config/root.go index 465d8a62..32883c74 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -242,6 +242,11 @@ func (r *Root) MergeTargetOverrides(target *Target) error { if err != nil { return err } + + err = r.Resources.MergeTasks() + if err != nil { + return err + } } if target.Variables != nil { diff --git a/bundle/tests/override_job_tasks/databricks.yml b/bundle/tests/override_job_tasks/databricks.yml new file mode 100644 index 00000000..ddee2879 --- /dev/null +++ b/bundle/tests/override_job_tasks/databricks.yml @@ -0,0 +1,44 @@ +bundle: + name: override_job_tasks + +workspace: + host: https://acme.cloud.databricks.com/ + +resources: + jobs: + foo: + name: job + tasks: + - task_key: key1 + new_cluster: + spark_version: 13.3.x-scala2.12 + spark_python_task: + python_file: ./test1.py + - task_key: key2 + new_cluster: + spark_version: 13.3.x-scala2.12 + spark_python_task: + python_file: ./test2.py + +targets: + development: + resources: + jobs: + foo: + tasks: + - task_key: key1 + new_cluster: + node_type_id: i3.xlarge + num_workers: 1 + + staging: + resources: + jobs: + foo: + tasks: + - task_key: key2 + new_cluster: + node_type_id: i3.2xlarge + num_workers: 4 + spark_python_task: + python_file: ./test3.py diff --git a/bundle/tests/override_job_tasks_test.go b/bundle/tests/override_job_tasks_test.go new file mode 100644 index 00000000..82da04da --- /dev/null +++ b/bundle/tests/override_job_tasks_test.go @@ -0,0 +1,39 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOverrideTasksDev(t *testing.T) { + b := loadTarget(t, "./override_job_tasks", "development") + assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) + assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 2) + + tasks := b.Config.Resources.Jobs["foo"].Tasks + assert.Equal(t, tasks[0].TaskKey, "key1") + assert.Equal(t, tasks[0].NewCluster.NodeTypeId, "i3.xlarge") + assert.Equal(t, tasks[0].NewCluster.NumWorkers, 1) + assert.Equal(t, tasks[0].SparkPythonTask.PythonFile, "./test1.py") + + assert.Equal(t, tasks[1].TaskKey, "key2") + assert.Equal(t, tasks[1].NewCluster.SparkVersion, "13.3.x-scala2.12") + assert.Equal(t, tasks[1].SparkPythonTask.PythonFile, "./test2.py") +} + +func TestOverrideTasksStaging(t *testing.T) { + b := loadTarget(t, "./override_job_tasks", "staging") + assert.Equal(t, "job", b.Config.Resources.Jobs["foo"].Name) + assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 2) + + tasks := b.Config.Resources.Jobs["foo"].Tasks + assert.Equal(t, tasks[0].TaskKey, "key1") + assert.Equal(t, tasks[0].NewCluster.SparkVersion, "13.3.x-scala2.12") + assert.Equal(t, tasks[0].SparkPythonTask.PythonFile, "./test1.py") + + assert.Equal(t, tasks[1].TaskKey, "key2") + assert.Equal(t, tasks[1].NewCluster.NodeTypeId, "i3.2xlarge") + assert.Equal(t, tasks[1].NewCluster.NumWorkers, 4) + assert.Equal(t, tasks[1].SparkPythonTask.PythonFile, "./test3.py") +} From 3a812a61e587bae7c318bee530b435c115a6263f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 19 Sep 2023 21:54:24 +0200 Subject: [PATCH 104/310] Increase timeout waiting for job run to 1 day (#786) ## Changes It's not uncommon for job runs to take more than 2 hours. On the client side, we should not stop waiting for a job to complete if it is intentionally running for a long time. If a job isn't supposed to run this long, the user can specify a run timeout in the job specification itself. ## Tests n/a --- bundle/run/job.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle/run/job.go b/bundle/run/job.go index 319cd146..b94e8fef 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -86,7 +86,7 @@ func (o *JobOptions) toPayload(jobID int64) (*jobs.RunNow, error) { } // Default timeout for waiting for a job run to complete. -var jobRunTimeout time.Duration = 2 * time.Hour +var jobRunTimeout time.Duration = 24 * time.Hour type jobRunner struct { key From 46996b884dc90d79fc5436535313faf1fc4e80f6 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 20 Sep 2023 13:46:47 +0200 Subject: [PATCH 105/310] Release v0.205.1 (#787) Bundles: * Use enums for default python template ([#765](https://github.com/databricks/cli/pull/765)). * Make bundle deploy work if no resources are defined ([#767](https://github.com/databricks/cli/pull/767)). * Added support for experimental scripts section ([#632](https://github.com/databricks/cli/pull/632)). * Error when unknown keys are encounters during template execution ([#766](https://github.com/databricks/cli/pull/766)). * Fall back to full Git clone if shallow clone is not supported ([#775](https://github.com/databricks/cli/pull/775)). * Enable environment overrides for job tasks ([#779](https://github.com/databricks/cli/pull/779)). * Increase timeout waiting for job run to 1 day ([#786](https://github.com/databricks/cli/pull/786)). Internal: * Update Go SDK to v0.19.3 (unreleased) ([#778](https://github.com/databricks/cli/pull/778)). --- CHANGELOG.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 867e086b..7f86c953 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Version changelog +## 0.205.1 + +Bundles: + * Use enums for default python template ([#765](https://github.com/databricks/cli/pull/765)). + * Make bundle deploy work if no resources are defined ([#767](https://github.com/databricks/cli/pull/767)). + * Added support for experimental scripts section ([#632](https://github.com/databricks/cli/pull/632)). + * Error when unknown keys are encounters during template execution ([#766](https://github.com/databricks/cli/pull/766)). + * Fall back to full Git clone if shallow clone is not supported ([#775](https://github.com/databricks/cli/pull/775)). + * Enable environment overrides for job tasks ([#779](https://github.com/databricks/cli/pull/779)). + * Increase timeout waiting for job run to 1 day ([#786](https://github.com/databricks/cli/pull/786)). + +Internal: + * Update Go SDK to v0.19.3 (unreleased) ([#778](https://github.com/databricks/cli/pull/778)). + + + ## 0.205.0 This release marks the public preview phase of Databricks Asset Bundles. From 4a9dcd3231d54fcc55f1442627203d1bd2741108 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 21 Sep 2023 14:21:39 +0200 Subject: [PATCH 106/310] Added setup Python action (#789) ## Changes Added setup Python action --------- Co-authored-by: Pieter Noordhuis --- .github/workflows/push.yml | 5 +++++ python/env_test.go | 1 + python/runner.go | 7 ++++++- python/runner_test.go | 5 +++++ 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index f0fa2ee6..d1beea44 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -38,6 +38,11 @@ jobs: with: go-version: 1.21.0 + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Set go env run: | echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV diff --git a/python/env_test.go b/python/env_test.go index 5983ce38..487e15b1 100644 --- a/python/env_test.go +++ b/python/env_test.go @@ -9,6 +9,7 @@ import ( ) func TestFreeze(t *testing.T) { + t.Skip("Skipping test until fixing Python installation on GitHub Windows environment") // remove this once equivalent tests for windows have been set up // or this test has been fixed for windows diff --git a/python/runner.go b/python/runner.go index b2946b29..bdf386a0 100644 --- a/python/runner.go +++ b/python/runner.go @@ -82,7 +82,7 @@ func DetectExecutable(ctx context.Context) (string, error) { if err != nil { return "", err } - pyExec = trimmedS(out) + pyExec = getFirstMatch(string(out)) return pyExec, nil } @@ -92,6 +92,11 @@ func execAndPassErr(ctx context.Context, name string, args ...string) ([]byte, e return out, nicerErr(err) } +func getFirstMatch(out string) string { + res := strings.Split(out, "\n") + return strings.Trim(res[0], "\n\r") +} + func nicerErr(err error) error { if err == nil { return nil diff --git a/python/runner_test.go b/python/runner_test.go index b43d218c..3968e27a 100644 --- a/python/runner_test.go +++ b/python/runner_test.go @@ -44,6 +44,11 @@ func TestDetectVirtualEnvFalse(t *testing.T) { assert.Equal(t, "", venvDir) } +func TestGetFirstMatch(t *testing.T) { + matches := "C:\\hostedtoolcache\\windows\\Python\\3.9.13\\x64\\python3.exe\r\nC:\\ProgramData\\Chocolatey\\bin\\python3.exe" + assert.Equal(t, getFirstMatch(matches), "C:\\hostedtoolcache\\windows\\Python\\3.9.13\\x64\\python3.exe") +} + func TestMakeDetectableVenv(t *testing.T) { var temp string defer testTempdir(t, &temp)() From aa9c2a1eabf9fedee1565ab089a06f8e16c06f5f Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 21 Sep 2023 14:38:45 +0200 Subject: [PATCH 107/310] Prompt for profile only in interactive mode (#788) ## Changes Do not prompt for profiles if not in interactive mode ## Tests Running sample Go code ``` cmd := exec.Command("databricks", "auth", "login", "--host", "***") out, err := cmd.CombinedOutput() ``` Before the change ``` Error: ^D exit status 1 ``` After ``` No error (empty output) ``` --- cmd/auth/login.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/cmd/auth/login.go b/cmd/auth/login.go index cf1d5c30..a14c5ebe 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -48,7 +48,7 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { profileFlag := cmd.Flag("profile") if profileFlag != nil && profileFlag.Value.String() != "" { profileName = profileFlag.Value.String() - } else { + } else if cmdio.IsInTTY(ctx) { prompt := cmdio.Prompt(ctx) prompt.Label = "Databricks Profile Name" prompt.Default = persistentAuth.ProfileName() @@ -120,13 +120,16 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { cfg.ClusterID = clusterId } - cfg.Profile = profileName - err = databrickscfg.SaveToProfile(ctx, &cfg) - if err != nil { - return err + if profileName != "" { + cfg.Profile = profileName + err = databrickscfg.SaveToProfile(ctx, &cfg) + if err != nil { + return err + } + + cmdio.LogString(ctx, fmt.Sprintf("Profile %s was successfully saved", profileName)) } - cmdio.LogString(ctx, fmt.Sprintf("Profile %s was successfully saved", profileName)) return nil } From c65e59751baa2ac30025e36a5935c93f922ce628 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 21 Sep 2023 16:46:35 +0200 Subject: [PATCH 108/310] Release v0.205.2 (#791) CLI: * Prompt for profile only in interactive mode ([#788](https://github.com/databricks/cli/pull/788)). Internal: * Added setup Python action ([#789](https://github.com/databricks/cli/pull/789)). --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f86c953..e525ff74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Version changelog +## 0.205.2 + +CLI: + * Prompt for profile only in interactive mode ([#788](https://github.com/databricks/cli/pull/788)). + +Internal: + * Added setup Python action ([#789](https://github.com/databricks/cli/pull/789)). + + ## 0.205.1 Bundles: From ee30277119a7770e3c867e3d4fcda1ff03354457 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 21 Sep 2023 21:21:20 +0200 Subject: [PATCH 109/310] Enable target overrides for pipeline clusters (#792) ## Changes This is a follow-up to #658 and #779 for jobs. This change applies label normalization the same way the backend does. ## Tests Unit and config loading tests. --- bundle/config/resources.go | 21 +++-- bundle/config/resources/pipeline.go | 50 ++++++++++++ bundle/config/resources/pipeline_test.go | 76 +++++++++++++++++++ bundle/config/root.go | 7 +- .../override_pipeline_cluster/databricks.yml | 33 ++++++++ .../tests/override_pipeline_cluster_test.go | 29 +++++++ 6 files changed, 199 insertions(+), 17 deletions(-) create mode 100644 bundle/config/resources/pipeline_test.go create mode 100644 bundle/tests/override_pipeline_cluster/databricks.yml create mode 100644 bundle/tests/override_pipeline_cluster_test.go diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 48621e44..ad1d6e9a 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -131,24 +131,23 @@ func (r *Resources) SetConfigFilePath(path string) { } } -// MergeJobClusters iterates over all jobs and merges their job clusters. -// This is called after applying the target overrides. -func (r *Resources) MergeJobClusters() error { +// Merge iterates over all resources and merges chunks of the +// resource configuration that can be merged. For example, for +// jobs, this merges job cluster definitions and tasks that +// use the same `job_cluster_key`, or `task_key`, respectively. +func (r *Resources) Merge() error { for _, job := range r.Jobs { if err := job.MergeJobClusters(); err != nil { return err } - } - return nil -} - -// MergeTasks iterates over all jobs and merges their tasks. -// This is called after applying the target overrides. -func (r *Resources) MergeTasks() error { - for _, job := range r.Jobs { if err := job.MergeTasks(); err != nil { return err } } + for _, pipeline := range r.Pipelines { + if err := pipeline.MergeClusters(); err != nil { + return err + } + } return nil } diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index d3a51c57..94c0f2b0 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -1,8 +1,11 @@ package resources import ( + "strings" + "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/imdario/mergo" ) type Pipeline struct { @@ -13,3 +16,50 @@ type Pipeline struct { *pipelines.PipelineSpec } + +// MergeClusters merges cluster definitions with same label. +// The clusters field is a slice, and as such, overrides are appended to it. +// We can identify a cluster by its label, however, so we can use this label +// to figure out which definitions are actually overrides and merge them. +// +// Note: the cluster label is optional and defaults to 'default'. +// We therefore ALSO merge all clusters without a label. +func (p *Pipeline) MergeClusters() error { + clusters := make(map[string]*pipelines.PipelineCluster) + output := make([]pipelines.PipelineCluster, 0, len(p.Clusters)) + + // Normalize cluster labels. + // If empty, this defaults to "default". + // To make matching case insensitive, labels are lowercased. + for i := range p.Clusters { + label := p.Clusters[i].Label + if label == "" { + label = "default" + } + p.Clusters[i].Label = strings.ToLower(label) + } + + // Target overrides are always appended, so we can iterate in natural order to + // first find the base definition, and merge instances we encounter later. + for i := range p.Clusters { + label := p.Clusters[i].Label + + // Register pipeline cluster with label if not yet seen before. + ref, ok := clusters[label] + if !ok { + output = append(output, p.Clusters[i]) + clusters[label] = &output[len(output)-1] + continue + } + + // Merge this instance into the reference. + err := mergo.Merge(ref, &p.Clusters[i], mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return err + } + } + + // Overwrite resulting slice. + p.Clusters = output + return nil +} diff --git a/bundle/config/resources/pipeline_test.go b/bundle/config/resources/pipeline_test.go new file mode 100644 index 00000000..316e3d14 --- /dev/null +++ b/bundle/config/resources/pipeline_test.go @@ -0,0 +1,76 @@ +package resources + +import ( + "strings" + "testing" + + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPipelineMergeClusters(t *testing.T) { + p := &Pipeline{ + PipelineSpec: &pipelines.PipelineSpec{ + Clusters: []pipelines.PipelineCluster{ + { + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + PolicyId: "1234", + }, + { + Label: "maintenance", + NodeTypeId: "i3.2xlarge", + }, + { + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + } + + err := p.MergeClusters() + require.NoError(t, err) + + assert.Len(t, p.Clusters, 2) + assert.Equal(t, "default", p.Clusters[0].Label) + assert.Equal(t, "maintenance", p.Clusters[1].Label) + + // The default cluster was merged with a subsequent one. + pc0 := p.Clusters[0] + assert.Equal(t, "i3.2xlarge", pc0.NodeTypeId) + assert.Equal(t, 4, pc0.NumWorkers) + assert.Equal(t, "1234", pc0.PolicyId) + + // The maintenance cluster was left untouched. + pc1 := p.Clusters[1] + assert.Equal(t, "i3.2xlarge", pc1.NodeTypeId) +} + +func TestPipelineMergeClustersCaseInsensitive(t *testing.T) { + p := &Pipeline{ + PipelineSpec: &pipelines.PipelineSpec{ + Clusters: []pipelines.PipelineCluster{ + { + Label: "default", + NumWorkers: 2, + }, + { + Label: "DEFAULT", + NumWorkers: 4, + }, + }, + }, + } + + err := p.MergeClusters() + require.NoError(t, err) + + assert.Len(t, p.Clusters, 1) + + // The default cluster was merged with a subsequent one. + pc0 := p.Clusters[0] + assert.Equal(t, "default", strings.ToLower(pc0.Label)) + assert.Equal(t, 4, pc0.NumWorkers) +} diff --git a/bundle/config/root.go b/bundle/config/root.go index 32883c74..3c79fb0b 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -238,12 +238,7 @@ func (r *Root) MergeTargetOverrides(target *Target) error { return err } - err = r.Resources.MergeJobClusters() - if err != nil { - return err - } - - err = r.Resources.MergeTasks() + err = r.Resources.Merge() if err != nil { return err } diff --git a/bundle/tests/override_pipeline_cluster/databricks.yml b/bundle/tests/override_pipeline_cluster/databricks.yml new file mode 100644 index 00000000..8930f30e --- /dev/null +++ b/bundle/tests/override_pipeline_cluster/databricks.yml @@ -0,0 +1,33 @@ +bundle: + name: override_pipeline_cluster + +workspace: + host: https://acme.cloud.databricks.com/ + +resources: + pipelines: + foo: + name: job + clusters: + - label: default + spark_conf: + foo: bar + +targets: + development: + resources: + pipelines: + foo: + clusters: + - label: default + node_type_id: i3.xlarge + num_workers: 1 + + staging: + resources: + pipelines: + foo: + clusters: + - label: default + node_type_id: i3.2xlarge + num_workers: 4 diff --git a/bundle/tests/override_pipeline_cluster_test.go b/bundle/tests/override_pipeline_cluster_test.go new file mode 100644 index 00000000..591fe423 --- /dev/null +++ b/bundle/tests/override_pipeline_cluster_test.go @@ -0,0 +1,29 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOverridePipelineClusterDev(t *testing.T) { + b := loadTarget(t, "./override_pipeline_cluster", "development") + assert.Equal(t, "job", b.Config.Resources.Pipelines["foo"].Name) + assert.Len(t, b.Config.Resources.Pipelines["foo"].Clusters, 1) + + c := b.Config.Resources.Pipelines["foo"].Clusters[0] + assert.Equal(t, map[string]string{"foo": "bar"}, c.SparkConf) + assert.Equal(t, "i3.xlarge", c.NodeTypeId) + assert.Equal(t, 1, c.NumWorkers) +} + +func TestOverridePipelineClusterStaging(t *testing.T) { + b := loadTarget(t, "./override_pipeline_cluster", "staging") + assert.Equal(t, "job", b.Config.Resources.Pipelines["foo"].Name) + assert.Len(t, b.Config.Resources.Pipelines["foo"].Clusters, 1) + + c := b.Config.Resources.Pipelines["foo"].Clusters[0] + assert.Equal(t, map[string]string{"foo": "bar"}, c.SparkConf) + assert.Equal(t, "i3.2xlarge", c.NodeTypeId) + assert.Equal(t, 4, c.NumWorkers) +} From 757d5efe8dd193d7cef792a4524481f136f20677 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:53:38 +0200 Subject: [PATCH 110/310] Add support for regex patterns in template schema (#768) ## Changes This PR introduces support for regex pattern validation in our custom jsonschema validator. This allows us to fail early if a user enters an invalid value for a field. For example, now this is what initializing the default template looks like with an invalid project name: ``` shreyas.goenka@THW32HFW6T bricks % cli bundle init Template to use [default-python]: Unique name for this project [my_project]: (_*_) Error: invalid value for project_name: (_*_). Must consist of letter and underscores only. ``` ## Tests New unit tests and manually. --- libs/jsonschema/extension.go | 4 + libs/jsonschema/instance.go | 12 +++ libs/jsonschema/instance_test.go | 40 +++++++++ libs/jsonschema/schema.go | 38 +++++++++ libs/jsonschema/schema_test.go | 83 +++++++++++++++++++ ...st-schema-pattern-with-custom-message.json | 9 ++ .../test-schema-pattern.json | 8 ++ libs/jsonschema/utils.go | 30 +++++++ libs/jsonschema/utils_test.go | 37 +++++++++ libs/template/config.go | 5 ++ .../databricks_template_schema.json | 4 +- .../default-python/template/__preamble.tmpl | 7 -- 12 files changed, 269 insertions(+), 8 deletions(-) create mode 100644 libs/jsonschema/testdata/instance-validate/test-schema-pattern-with-custom-message.json create mode 100644 libs/jsonschema/testdata/instance-validate/test-schema-pattern.json diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go index bbbde695..57f3e873 100644 --- a/libs/jsonschema/extension.go +++ b/libs/jsonschema/extension.go @@ -11,4 +11,8 @@ type Extension struct { // If not defined, the field is ordered alphabetically after all fields // that do have an order defined. Order *int `json:"order,omitempty"` + + // PatternMatchFailureMessage is a user defined message that is displayed to the + // user if a JSON schema pattern match fails. + PatternMatchFailureMessage string `json:"pattern_match_failure_message,omitempty"` } diff --git a/libs/jsonschema/instance.go b/libs/jsonschema/instance.go index 229a45b5..6b3e3af4 100644 --- a/libs/jsonschema/instance.go +++ b/libs/jsonschema/instance.go @@ -45,6 +45,7 @@ func (s *Schema) ValidateInstance(instance map[string]any) error { s.validateEnum, s.validateRequired, s.validateTypes, + s.validatePattern, } { err := fn(instance) if err != nil { @@ -111,3 +112,14 @@ func (s *Schema) validateEnum(instance map[string]any) error { } return nil } + +func (s *Schema) validatePattern(instance map[string]any) error { + for k, v := range instance { + fieldInfo, ok := s.Properties[k] + if !ok { + continue + } + return ValidatePatternMatch(k, v, fieldInfo) + } + return nil +} diff --git a/libs/jsonschema/instance_test.go b/libs/jsonschema/instance_test.go index ffd10ca4..3a357d71 100644 --- a/libs/jsonschema/instance_test.go +++ b/libs/jsonschema/instance_test.go @@ -153,3 +153,43 @@ func TestValidateInstanceEnum(t *testing.T) { assert.EqualError(t, schema.validateEnum(invalidIntInstance), "expected value of property bar to be one of [2 4 6]. Found: 1") assert.EqualError(t, schema.ValidateInstance(invalidIntInstance), "expected value of property bar to be one of [2 4 6]. Found: 1") } + +func TestValidateInstancePattern(t *testing.T) { + schema, err := Load("./testdata/instance-validate/test-schema-pattern.json") + require.NoError(t, err) + + validInstance := map[string]any{ + "foo": "axyzc", + } + assert.NoError(t, schema.validatePattern(validInstance)) + assert.NoError(t, schema.ValidateInstance(validInstance)) + + invalidInstanceValue := map[string]any{ + "foo": "xyz", + } + assert.EqualError(t, schema.validatePattern(invalidInstanceValue), "invalid value for foo: \"xyz\". Expected to match regex pattern: a.*c") + assert.EqualError(t, schema.ValidateInstance(invalidInstanceValue), "invalid value for foo: \"xyz\". Expected to match regex pattern: a.*c") + + invalidInstanceType := map[string]any{ + "foo": 1, + } + assert.EqualError(t, schema.validatePattern(invalidInstanceType), "invalid value for foo: 1. Expected a value of type string") + assert.EqualError(t, schema.ValidateInstance(invalidInstanceType), "incorrect type for property foo: expected type string, but value is 1") +} + +func TestValidateInstancePatternWithCustomMessage(t *testing.T) { + schema, err := Load("./testdata/instance-validate/test-schema-pattern-with-custom-message.json") + require.NoError(t, err) + + validInstance := map[string]any{ + "foo": "axyzc", + } + assert.NoError(t, schema.validatePattern(validInstance)) + assert.NoError(t, schema.ValidateInstance(validInstance)) + + invalidInstanceValue := map[string]any{ + "foo": "xyz", + } + assert.EqualError(t, schema.validatePattern(invalidInstanceValue), "invalid value for foo: \"xyz\". Please enter a string starting with 'a' and ending with 'c'") + assert.EqualError(t, schema.ValidateInstance(invalidInstanceValue), "invalid value for foo: \"xyz\". Please enter a string starting with 'a' and ending with 'c'") +} diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index 108102a6..dc319bfe 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "regexp" "slices" ) @@ -45,6 +46,11 @@ type Schema struct { // List of valid values for a JSON instance for this schema. Enum []any `json:"enum,omitempty"` + // A pattern is a regular expression the object will be validated against. + // Can only be used with type "string". The regex syntax supported is available + // here: https://github.com/google/re2/wiki/Syntax + Pattern string `json:"pattern,omitempty"` + // Extension embeds our custom JSON schema extensions. Extension } @@ -112,6 +118,38 @@ func (schema *Schema) validate() error { return fmt.Errorf("list of enum values for property %s does not contain default value %v: %v", name, property.Default, property.Enum) } } + + // Validate usage of "pattern" is consistent. + for name, property := range schema.Properties { + pattern := property.Pattern + if pattern == "" { + continue + } + + // validate property type is string + if property.Type != StringType { + return fmt.Errorf("property %q has a non-empty regex pattern %q specified. Patterns are only supported for string properties", name, pattern) + } + + // validate regex pattern syntax + r, err := regexp.Compile(pattern) + if err != nil { + return fmt.Errorf("invalid regex pattern %q provided for property %q: %w", pattern, name, err) + } + + // validate default value against the pattern + if property.Default != nil && !r.MatchString(property.Default.(string)) { + return fmt.Errorf("default value %q for property %q does not match specified regex pattern: %q", property.Default, name, pattern) + } + + // validate enum values against the pattern + for i, enum := range property.Enum { + if !r.MatchString(enum.(string)) { + return fmt.Errorf("enum value %q at index %v for property %q does not match specified regex pattern: %q", enum, i, name, pattern) + } + } + } + return nil } diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go index db559ea8..aff2d962 100644 --- a/libs/jsonschema/schema_test.go +++ b/libs/jsonschema/schema_test.go @@ -139,3 +139,86 @@ func TestSchemaValidateErrorWhenDefaultValueIsNotInEnums(t *testing.T) { err = validSchema.validate() assert.NoError(t, err) } + +func TestSchemaValidatePatternType(t *testing.T) { + s := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "number", + Pattern: "abc", + }, + }, + } + assert.EqualError(t, s.validate(), "property \"foo\" has a non-empty regex pattern \"abc\" specified. Patterns are only supported for string properties") + + s = &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Pattern: "abc", + }, + }, + } + assert.NoError(t, s.validate()) +} + +func TestSchemaValidateIncorrectRegex(t *testing.T) { + s := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + // invalid regex, missing the closing brace + Pattern: "(abc", + }, + }, + } + assert.EqualError(t, s.validate(), "invalid regex pattern \"(abc\" provided for property \"foo\": error parsing regexp: missing closing ): `(abc`") +} + +func TestSchemaValidatePatternDefault(t *testing.T) { + s := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Pattern: "abc", + Default: "def", + }, + }, + } + assert.EqualError(t, s.validate(), "default value \"def\" for property \"foo\" does not match specified regex pattern: \"abc\"") + + s = &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Pattern: "a.*d", + Default: "axyzd", + }, + }, + } + assert.NoError(t, s.validate()) +} + +func TestSchemaValidatePatternEnum(t *testing.T) { + s := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Pattern: "a.*c", + Enum: []any{"abc", "def", "abbc"}, + }, + }, + } + assert.EqualError(t, s.validate(), "enum value \"def\" at index 1 for property \"foo\" does not match specified regex pattern: \"a.*c\"") + + s = &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Pattern: "a.*d", + Enum: []any{"abd", "axybgd", "abbd"}, + }, + }, + } + assert.NoError(t, s.validate()) +} diff --git a/libs/jsonschema/testdata/instance-validate/test-schema-pattern-with-custom-message.json b/libs/jsonschema/testdata/instance-validate/test-schema-pattern-with-custom-message.json new file mode 100644 index 00000000..29296d06 --- /dev/null +++ b/libs/jsonschema/testdata/instance-validate/test-schema-pattern-with-custom-message.json @@ -0,0 +1,9 @@ +{ + "properties": { + "foo": { + "type": "string", + "pattern": "a.*c", + "pattern_match_failure_message": "Please enter a string starting with 'a' and ending with 'c'" + } + } +} diff --git a/libs/jsonschema/testdata/instance-validate/test-schema-pattern.json b/libs/jsonschema/testdata/instance-validate/test-schema-pattern.json new file mode 100644 index 00000000..e7b49d15 --- /dev/null +++ b/libs/jsonschema/testdata/instance-validate/test-schema-pattern.json @@ -0,0 +1,8 @@ +{ + "properties": { + "foo": { + "type": "string", + "pattern": "a.*c" + } + } +} diff --git a/libs/jsonschema/utils.go b/libs/jsonschema/utils.go index 66db9603..7bb666c7 100644 --- a/libs/jsonschema/utils.go +++ b/libs/jsonschema/utils.go @@ -3,6 +3,7 @@ package jsonschema import ( "errors" "fmt" + "regexp" "strconv" ) @@ -111,3 +112,32 @@ func FromString(s string, T Type) (any, error) { } return v, err } + +func ValidatePatternMatch(name string, value any, propertySchema *Schema) error { + if propertySchema.Pattern == "" { + // Return early if no pattern is specified + return nil + } + + // Expect type of value to be a string + stringValue, ok := value.(string) + if !ok { + return fmt.Errorf("invalid value for %s: %v. Expected a value of type string", name, value) + } + + match, err := regexp.MatchString(propertySchema.Pattern, stringValue) + if err != nil { + return err + } + if match { + // successful match + return nil + } + + // If custom user error message is defined, return error with the custom message + msg := propertySchema.PatternMatchFailureMessage + if msg == "" { + msg = fmt.Sprintf("Expected to match regex pattern: %s", propertySchema.Pattern) + } + return fmt.Errorf("invalid value for %s: %q. %s", name, value, msg) +} diff --git a/libs/jsonschema/utils_test.go b/libs/jsonschema/utils_test.go index 29529aaa..4c43e57d 100644 --- a/libs/jsonschema/utils_test.go +++ b/libs/jsonschema/utils_test.go @@ -128,3 +128,40 @@ func TestTemplateToStringSlice(t *testing.T) { assert.NoError(t, err) assert.Equal(t, []string{"1.1", "2.2", "3.3"}, s) } + +func TestValidatePropertyPatternMatch(t *testing.T) { + var err error + + // Expect no error if no pattern is specified. + err = ValidatePatternMatch("foo", 1, &Schema{Type: "integer"}) + assert.NoError(t, err) + + // Expect error because value is not a string. + err = ValidatePatternMatch("bar", 1, &Schema{Type: "integer", Pattern: "abc"}) + assert.EqualError(t, err, "invalid value for bar: 1. Expected a value of type string") + + // Expect error because the pattern is invalid. + err = ValidatePatternMatch("bar", "xyz", &Schema{Type: "string", Pattern: "(abc"}) + assert.EqualError(t, err, "error parsing regexp: missing closing ): `(abc`") + + // Expect no error because the pattern matches. + err = ValidatePatternMatch("bar", "axyzd", &Schema{Type: "string", Pattern: "(a*.d)"}) + assert.NoError(t, err) + + // Expect custom error message on match fail + err = ValidatePatternMatch("bar", "axyze", &Schema{ + Type: "string", + Pattern: "(a*.d)", + Extension: Extension{ + PatternMatchFailureMessage: "my custom msg", + }, + }) + assert.EqualError(t, err, "invalid value for bar: \"axyze\". my custom msg") + + // Expect generic message on match fail + err = ValidatePatternMatch("bar", "axyze", &Schema{ + Type: "string", + Pattern: "(a*.d)", + }) + assert.EqualError(t, err, "invalid value for bar: \"axyze\". Expected to match regex pattern: (a*.d)") +} diff --git a/libs/template/config.go b/libs/template/config.go index 21618ac9..2062f320 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -121,6 +121,11 @@ func (c *config) promptForValues() error { } + // Validate the property matches any specified regex pattern. + if err := jsonschema.ValidatePatternMatch(name, userInput, property); err != nil { + return err + } + // Convert user input string back to a value c.values[name], err = jsonschema.FromString(userInput, property.Type) if err != nil { diff --git a/libs/template/templates/default-python/databricks_template_schema.json b/libs/template/templates/default-python/databricks_template_schema.json index db8adcce..8784841e 100644 --- a/libs/template/templates/default-python/databricks_template_schema.json +++ b/libs/template/templates/default-python/databricks_template_schema.json @@ -4,7 +4,9 @@ "type": "string", "default": "my_project", "description": "Unique name for this project", - "order": 1 + "order": 1, + "pattern": "^[A-Za-z0-9_]*$", + "pattern_match_failure_message": "Must consist of letter and underscores only." }, "include_notebook": { "type": "string", diff --git a/libs/template/templates/default-python/template/__preamble.tmpl b/libs/template/templates/default-python/template/__preamble.tmpl index a86d3bff..54732493 100644 --- a/libs/template/templates/default-python/template/__preamble.tmpl +++ b/libs/template/templates/default-python/template/__preamble.tmpl @@ -4,13 +4,6 @@ This file only template directives; it is skipped for the actual output. {{skip "__preamble"}} -{{ $value := .project_name }} -{{with (regexp "^[A-Za-z0-9_]*$")}} - {{if not (.MatchString $value)}} - {{fail "Invalid project_name: %s. Must consist of letter and underscores only." $value}} - {{end}} -{{end}} - {{$notDLT := not (eq .include_dlt "yes")}} {{$notNotebook := not (eq .include_notebook "yes")}} {{$notPython := not (eq .include_python "yes")}} From 0c1516c4baf166a4cd744b06123cbe999751ffea Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Tue, 26 Sep 2023 11:12:34 +0200 Subject: [PATCH 111/310] Make the default `databricks bundle init` template more self-explanatory (#796) This makes the default-python template more self-explanatory and adds a few other tweaks for a better out-of-the-box experience. --- libs/template/renderer_test.go | 3 ++- .../default-python/library/versions.tmpl | 7 ++++++ .../{{.project_name}}/.vscode/settings.json | 5 ++++- .../template/{{.project_name}}/README.md.tmpl | 2 +- .../requirements-dev.txt.tmpl | 22 +++++++++++++++++++ .../resources/{{.project_name}}_job.yml.tmpl | 6 +++-- .../template/{{.project_name}}/setup.py.tmpl | 22 ++++++++++++++----- .../{{.project_name}}/tests/main_test.py.tmpl | 16 ++++++++++++++ 8 files changed, 72 insertions(+), 11 deletions(-) create mode 100644 libs/template/templates/default-python/library/versions.tmpl create mode 100644 libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 8f8a8291..070fc5d2 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -41,6 +41,7 @@ func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target st templatePath, err := prepareBuiltinTemplates("default-python", tempDir) require.NoError(t, err) + libraryPath := filepath.Join(templatePath, "library") w := &databricks.WorkspaceClient{ Config: &workspaceConfig.Config{Host: "https://myhost.com"}, @@ -52,7 +53,7 @@ func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target st ctx = root.SetWorkspaceClient(ctx, w) helpers := loadHelpers(ctx) - renderer, err := newRenderer(ctx, settings, helpers, templatePath, "./testdata/template-in-path/library", tempDir) + renderer, err := newRenderer(ctx, settings, helpers, templatePath, libraryPath, tempDir) require.NoError(t, err) // Evaluate template diff --git a/libs/template/templates/default-python/library/versions.tmpl b/libs/template/templates/default-python/library/versions.tmpl new file mode 100644 index 00000000..f9a879d2 --- /dev/null +++ b/libs/template/templates/default-python/library/versions.tmpl @@ -0,0 +1,7 @@ +{{define "latest_lts_dbr_version" -}} + 13.3.x-scala2.12 +{{- end}} + +{{define "latest_lts_db_connect_version_spec" -}} + >=13.3,<13.4 +{{- end}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json index 16cb2c96..f19498da 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json +++ b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json @@ -8,7 +8,10 @@ ], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, + "python.analysis.extraPaths": ["src"], "files.exclude": { - "**/*.egg-info": true + "**/*.egg-info": true, + "**/__pycache__": true, + ".pytest_cache": true, }, } diff --git a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl index 1bcd7af4..b451d03b 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl @@ -30,7 +30,7 @@ The '{{.project_name}}' project was generated by using the default-python templa 5. To run a job or pipeline, use the "run" comand: ``` - $ databricks bundle run {{.project_name}}_job + $ databricks bundle run ``` 6. Optionally, install developer tools such as the Databricks extension for Visual Studio Code from diff --git a/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl new file mode 100644 index 00000000..2d4c0f64 --- /dev/null +++ b/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl @@ -0,0 +1,22 @@ +## requirements-dev.txt: dependencies for local development. +## +## For defining dependencies used by jobs in Databricks Workflows, see +## https://docs.databricks.com/dev-tools/bundles/library-dependencies.html + +## pytest is the default package used for testing +pytest + +## databricks-connect can be used to run parts of this project locally. +## See https://docs.databricks.com/dev-tools/databricks-connect.html. +## +## databricks-connect is automatically installed if you're using Databricks +## extension for Visual Studio Code +## (https://docs.databricks.com/dev-tools/vscode-ext/dev-tasks/databricks-connect.html). +## +## To manually install databricks-connect, either follow the instructions +## at https://docs.databricks.com/dev-tools/databricks-connect.html +## to install the package system-wide. Or uncomment the line below to install a +## version of db-connect that corresponds to the Databricks Runtime version used +## for this project. +# +# databricks-connect{{template "latest_lts_db_connect_version_spec"}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl index 1792f947..23bdee49 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl @@ -49,6 +49,9 @@ resources: package_name: {{.project_name}} entry_point: main libraries: + # By default we just include the .whl file generated for the {{.project_name}} package. + # See https://docs.databricks.com/dev-tools/bundles/library-dependencies.html + # for more information on how to add other libraries. - whl: ../dist/*.whl {{else}} @@ -56,8 +59,7 @@ resources: job_clusters: - job_cluster_key: job_cluster new_cluster: - {{- /* we should always use an LTS version in our templates */}} - spark_version: 13.3.x-scala2.12 + spark_version: {{template "latest_lts_dbr_version"}} node_type_id: {{smallest_node_type}} autoscale: min_workers: 1 diff --git a/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl index efd59882..4eb6b8f9 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl @@ -1,8 +1,9 @@ """ -Setup script for {{.project_name}}. +setup.py configuration script describing how to build and package this project. -This script packages and distributes the associated wheel file(s). -Source code is in ./src/. Run 'python setup.py sdist bdist_wheel' to build. +This file is primarily used by the setuptools library and typically should not +be executed directly. See README.md for how to deploy, test, and run +the {{.project_name}} project. """ from setuptools import setup, find_packages @@ -16,9 +17,18 @@ setup( version={{.project_name}}.__version__, url="https://databricks.com", author="{{user_name}}", - description="my test wheel", + description="wheel file based on {{.project_name}}/src", packages=find_packages(where='./src'), package_dir={'': 'src'}, - entry_points={"entry_points": "main={{.project_name}}.main:main"}, - install_requires=["setuptools"], + entry_points={ + "packages": [ + "main={{.project_name}}.main:main" + ] + }, + install_requires=[ + # Dependencies in case the output wheel file is used as a library dependency. + # For defining dependencies, when this package is used in Databricks, see: + # https://docs.databricks.com/dev-tools/bundles/library-dependencies.html + "setuptools" + ], ) diff --git a/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl index f1750046..a7a6afe0 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl @@ -1,5 +1,21 @@ +from databricks.connect import DatabricksSession +from pyspark.sql import SparkSession from {{.project_name}} import main +# Create a new Databricks Connect session. If this fails, +# check that you have configured Databricks Connect correctly. +# See https://docs.databricks.com/dev-tools/databricks-connect.html. +{{/* + The below works around a problematic error message from Databricks Connect. + The standard SparkSession is supported in all configurations (workspace, IDE, + all runtime versions, CLI). But on the CLI it currently gives a confusing + error message if SPARK_REMOTE is not set. We can't directly use + DatabricksSession.builder in main.py, so we're re-assigning it here so + everything works out of the box, even for CLI users who don't set SPARK_REMOTE. +*/}} +SparkSession.builder = DatabricksSession.builder +SparkSession.builder.getOrCreate() + def test_main(): taxis = main.get_taxis() assert taxis.count() > 5 From e1b5912f59f04929a5a1c1a05c70a0850779f192 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 16:27:02 +0200 Subject: [PATCH 112/310] Bump golang.org/x/term from 0.11.0 to 0.12.0 (#798) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.11.0 to 0.12.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.11.0&new-version=0.12.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 0d274e8b..964484b0 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( golang.org/x/mod v0.12.0 golang.org/x/oauth2 v0.11.0 golang.org/x/sync v0.3.0 - golang.org/x/term v0.11.0 + golang.org/x/term v0.12.0 golang.org/x/text v0.12.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) @@ -52,7 +52,7 @@ require ( go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.12.0 // indirect golang.org/x/net v0.14.0 // indirect - golang.org/x/sys v0.11.0 // indirect + golang.org/x/sys v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/api v0.138.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index a7f61bd8..a28f5578 100644 --- a/go.sum +++ b/go.sum @@ -231,12 +231,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From 0daa0022af2f56e088321ef4b2d4855f52aec4aa Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 26 Sep 2023 16:32:20 +0200 Subject: [PATCH 113/310] Make a notebook wrapper for Python wheel tasks optional (#797) ## Changes Instead of always using notebook wrapper for Python wheel tasks, let's make this an opt-in option. Now by default Python wheel tasks will be deployed as is to Databricks platform. If notebook wrapper required (DBR < 13.1 or other configuration differences), users can provide a following experimental setting ``` experimental: python_wheel_wrapper: true ``` Fixes #783, https://github.com/databricks/databricks-asset-bundles-dais2023/issues/8 ## Tests Added unit tests. Integration tests passed for both cases ``` helpers.go:163: [databricks stdout]: Hello from my func helpers.go:163: [databricks stdout]: Got arguments: helpers.go:163: [databricks stdout]: ['my_test_code', 'one', 'two'] ... Bundle remote directory is ***/.bundle/ac05d5e8-ed4b-4e34-b3f2-afa73f62b021 Deleted snapshot file at /var/folders/nt/xjv68qzs45319w4k36dhpylc0000gp/T/TestAccPythonWheelTaskDeployAndRunWithWrapper3733431114/001/.databricks/bundle/default/sync-snapshots/cac1e02f3941a97b.json Successfully deleted files! --- PASS: TestAccPythonWheelTaskDeployAndRunWithWrapper (214.18s) PASS coverage: 93.5% of statements in ./... ok github.com/databricks/cli/internal/bundle 214.495s coverage: 93.5% of statements in ./... ``` ``` helpers.go:163: [databricks stdout]: Hello from my func helpers.go:163: [databricks stdout]: Got arguments: helpers.go:163: [databricks stdout]: ['my_test_code', 'one', 'two'] ... Bundle remote directory is ***/.bundle/0ef67aaf-5960-4049-bf1d-dc9e29157421 Deleted snapshot file at /var/folders/nt/xjv68qzs45319w4k36dhpylc0000gp/T/TestAccPythonWheelTaskDeployAndRunWithoutWrapper2340216760/001/.databricks/bundle/default/sync-snapshots/edf0b322cee93b13.json Successfully deleted files! --- PASS: TestAccPythonWheelTaskDeployAndRunWithoutWrapper (192.36s) PASS coverage: 93.5% of statements in ./... ok github.com/databricks/cli/internal/bundle 195.130s coverage: 93.5% of statements in ./... ``` --- bundle/config/experimental.go | 8 ++ bundle/config/mutator/if.go | 35 ++++++ bundle/config/mutator/noop.go | 21 ++++ bundle/python/conditional_transform_test.go | 114 ++++++++++++++++++ bundle/python/transform.go | 16 ++- bundle/python/transform_test.go | 26 ++-- .../databricks_template_schema.json | 4 + .../template/databricks.yml.tmpl | 6 + internal/bundle/python_wheel_test.go | 19 ++- 9 files changed, 226 insertions(+), 23 deletions(-) create mode 100644 bundle/config/mutator/if.go create mode 100644 bundle/config/mutator/noop.go create mode 100644 bundle/python/conditional_transform_test.go diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index be0e7d8f..62d1ae73 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -2,6 +2,14 @@ package config type Experimental struct { Scripts map[ScriptHook]Command `json:"scripts,omitempty"` + + // By default Python wheel tasks deployed as is to Databricks platform. + // If notebook wrapper required (for example, used in DBR < 13.1 or other configuration differences), users can provide a following experimental setting + // experimental: + // python_wheel_wrapper: true + // In this case the configured wheel task will be deployed as a notebook task which install defined wheel in runtime and executes it. + // For more details see https://github.com/databricks/cli/pull/797 and https://github.com/databricks/cli/pull/635 + PythonWheelWrapper bool `json:"python_wheel_wrapper,omitempty"` } type Command string diff --git a/bundle/config/mutator/if.go b/bundle/config/mutator/if.go new file mode 100644 index 00000000..462d8f00 --- /dev/null +++ b/bundle/config/mutator/if.go @@ -0,0 +1,35 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" +) + +type ifMutator struct { + condition func(*bundle.Bundle) bool + onTrueMutator bundle.Mutator + onFalseMutator bundle.Mutator +} + +func If( + condition func(*bundle.Bundle) bool, + onTrueMutator bundle.Mutator, + onFalseMutator bundle.Mutator, +) bundle.Mutator { + return &ifMutator{ + condition, onTrueMutator, onFalseMutator, + } +} + +func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) error { + if m.condition(b) { + return bundle.Apply(ctx, b, m.onTrueMutator) + } else { + return bundle.Apply(ctx, b, m.onFalseMutator) + } +} + +func (m *ifMutator) Name() string { + return "If" +} diff --git a/bundle/config/mutator/noop.go b/bundle/config/mutator/noop.go new file mode 100644 index 00000000..91c16385 --- /dev/null +++ b/bundle/config/mutator/noop.go @@ -0,0 +1,21 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" +) + +type noop struct{} + +func (*noop) Apply(context.Context, *bundle.Bundle) error { + return nil +} + +func (*noop) Name() string { + return "NoOp" +} + +func NoOp() bundle.Mutator { + return &noop{} +} diff --git a/bundle/python/conditional_transform_test.go b/bundle/python/conditional_transform_test.go new file mode 100644 index 00000000..5bf33721 --- /dev/null +++ b/bundle/python/conditional_transform_test.go @@ -0,0 +1,114 @@ +package python + +import ( + "context" + "path" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestNoTransformByDefault(t *testing.T) { + tmpDir := t.TempDir() + + b := &bundle.Bundle{ + Config: config.Root{ + Path: tmpDir, + Bundle: config.Bundle{ + Target: "development", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{ + PackageName: "test_package", + EntryPoint: "main", + }, + Libraries: []compute.Library{ + {Whl: "/Workspace/Users/test@test.com/bundle/dist/test.whl"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + trampoline := TransformWheelTask() + err := bundle.Apply(context.Background(), b, trampoline) + require.NoError(t, err) + + task := b.Config.Resources.Jobs["job1"].Tasks[0] + require.NotNil(t, task.PythonWheelTask) + require.Equal(t, "test_package", task.PythonWheelTask.PackageName) + require.Equal(t, "main", task.PythonWheelTask.EntryPoint) + require.Equal(t, "/Workspace/Users/test@test.com/bundle/dist/test.whl", task.Libraries[0].Whl) + + require.Nil(t, task.NotebookTask) +} + +func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { + tmpDir := t.TempDir() + + b := &bundle.Bundle{ + Config: config.Root{ + Path: tmpDir, + Bundle: config.Bundle{ + Target: "development", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{ + PackageName: "test_package", + EntryPoint: "main", + }, + Libraries: []compute.Library{ + {Whl: "/Workspace/Users/test@test.com/bundle/dist/test.whl"}, + }, + }, + }, + }, + }, + }, + }, + Experimental: &config.Experimental{ + PythonWheelWrapper: true, + }, + }, + } + + trampoline := TransformWheelTask() + err := bundle.Apply(context.Background(), b, trampoline) + require.NoError(t, err) + + task := b.Config.Resources.Jobs["job1"].Tasks[0] + require.Nil(t, task.PythonWheelTask) + require.NotNil(t, task.NotebookTask) + + dir, err := b.InternalDir(context.Background()) + require.NoError(t, err) + + internalDirRel, err := filepath.Rel(b.Config.Path, dir) + require.NoError(t, err) + + require.Equal(t, path.Join(filepath.ToSlash(internalDirRel), "notebook_job1_key1"), task.NotebookTask.NotebookPath) + + require.Empty(t, task.Libraries) +} diff --git a/bundle/python/transform.go b/bundle/python/transform.go index d8eb33f5..f6207a59 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -49,10 +49,16 @@ dbutils.notebook.exit(s) // which installs uploaded wheels using %pip and then calling corresponding // entry point. func TransformWheelTask() bundle.Mutator { - return mutator.NewTrampoline( - "python_wheel", - &pythonTrampoline{}, - NOTEBOOK_TEMPLATE, + return mutator.If( + func(b *bundle.Bundle) bool { + return b.Config.Experimental != nil && b.Config.Experimental.PythonWheelWrapper + }, + mutator.NewTrampoline( + "python_wheel", + &pythonTrampoline{}, + NOTEBOOK_TEMPLATE, + ), + mutator.NoOp(), ) } @@ -113,7 +119,7 @@ func (t *pythonTrampoline) generateParameters(task *jobs.PythonWheelTask) (strin if task.Parameters != nil && task.NamedParameters != nil { return "", fmt.Errorf("not allowed to pass both paramaters and named_parameters") } - params := append([]string{"python"}, task.Parameters...) + params := append([]string{task.PackageName}, task.Parameters...) for k, v := range task.NamedParameters { params = append(params, fmt.Sprintf("%s=%s", k, v)) } diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index a7448f23..1ccdba56 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -25,26 +25,26 @@ type testCaseNamed struct { } var paramsTestCases []testCase = []testCase{ - {[]string{}, `"python"`}, - {[]string{"a"}, `"python", "a"`}, - {[]string{"a", "b"}, `"python", "a", "b"`}, - {[]string{"123!@#$%^&*()-="}, `"python", "123!@#$%^&*()-="`}, - {[]string{`{"a": 1}`}, `"python", "{\"a\": 1}"`}, + {[]string{}, `"my_test_code"`}, + {[]string{"a"}, `"my_test_code", "a"`}, + {[]string{"a", "b"}, `"my_test_code", "a", "b"`}, + {[]string{"123!@#$%^&*()-="}, `"my_test_code", "123!@#$%^&*()-="`}, + {[]string{`{"a": 1}`}, `"my_test_code", "{\"a\": 1}"`}, } var paramsTestCasesNamed []testCaseNamed = []testCaseNamed{ - {map[string]string{}, `"python"`}, - {map[string]string{"a": "1"}, `"python", "a=1"`}, - {map[string]string{"a": "'1'"}, `"python", "a='1'"`}, - {map[string]string{"a": `"1"`}, `"python", "a=\"1\""`}, - {map[string]string{"a": "1", "b": "2"}, `"python", "a=1", "b=2"`}, - {map[string]string{"data": `{"a": 1}`}, `"python", "data={\"a\": 1}"`}, + {map[string]string{}, `"my_test_code"`}, + {map[string]string{"a": "1"}, `"my_test_code", "a=1"`}, + {map[string]string{"a": "'1'"}, `"my_test_code", "a='1'"`}, + {map[string]string{"a": `"1"`}, `"my_test_code", "a=\"1\""`}, + {map[string]string{"a": "1", "b": "2"}, `"my_test_code", "a=1", "b=2"`}, + {map[string]string{"data": `{"a": 1}`}, `"my_test_code", "data={\"a\": 1}"`}, } func TestGenerateParameters(t *testing.T) { trampoline := pythonTrampoline{} for _, c := range paramsTestCases { - task := &jobs.PythonWheelTask{Parameters: c.Actual} + task := &jobs.PythonWheelTask{PackageName: "my_test_code", Parameters: c.Actual} result, err := trampoline.generateParameters(task) require.NoError(t, err) require.Equal(t, c.Expected, result) @@ -54,7 +54,7 @@ func TestGenerateParameters(t *testing.T) { func TestGenerateNamedParameters(t *testing.T) { trampoline := pythonTrampoline{} for _, c := range paramsTestCasesNamed { - task := &jobs.PythonWheelTask{NamedParameters: c.Actual} + task := &jobs.PythonWheelTask{PackageName: "my_test_code", NamedParameters: c.Actual} result, err := trampoline.generateParameters(task) require.NoError(t, err) diff --git a/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json b/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json index f7f4b634..0695eb2b 100644 --- a/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json +++ b/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json @@ -16,6 +16,10 @@ "unique_id": { "type": "string", "description": "Unique ID for job name" + }, + "python_wheel_wrapper": { + "type": "boolean", + "description": "Whether or not to enable python wheel wrapper" } } } diff --git a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl b/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl index e715cdf1..8729dcba 100644 --- a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl @@ -4,6 +4,11 @@ bundle: workspace: root_path: "~/.bundle/{{.unique_id}}" +{{if .python_wheel_wrapper}} +experimental: + python_wheel_wrapper: true +{{end}} + resources: jobs: some_other_job: @@ -14,6 +19,7 @@ resources: num_workers: 1 spark_version: "{{.spark_version}}" node_type_id: "{{.node_type_id}}" + data_security_mode: USER_ISOLATION python_wheel_task: package_name: my_test_code entry_point: run diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index ee5d897d..fd5c9acc 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccPythonWheelTaskDeployAndRun(t *testing.T) { +func runPythonWheelTest(t *testing.T, pythonWheelWrapper bool) { env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) @@ -22,9 +22,10 @@ func TestAccPythonWheelTaskDeployAndRun(t *testing.T) { } bundleRoot, err := initTestTemplate(t, "python_wheel_task", map[string]any{ - "node_type_id": nodeTypeId, - "unique_id": uuid.New().String(), - "spark_version": "13.2.x-snapshot-scala2.12", + "node_type_id": nodeTypeId, + "unique_id": uuid.New().String(), + "spark_version": "13.2.x-snapshot-scala2.12", + "python_wheel_wrapper": pythonWheelWrapper, }) require.NoError(t, err) @@ -39,5 +40,13 @@ func TestAccPythonWheelTaskDeployAndRun(t *testing.T) { require.NoError(t, err) require.Contains(t, out, "Hello from my func") require.Contains(t, out, "Got arguments:") - require.Contains(t, out, "['python', 'one', 'two']") + require.Contains(t, out, "['my_test_code', 'one', 'two']") +} + +func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { + runPythonWheelTest(t, false) +} + +func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { + runPythonWheelTest(t, true) } From 7cc8b4c17f0171e04de9881c5fac04b9a2373f6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 16:52:37 +0200 Subject: [PATCH 114/310] Bump github.com/hashicorp/terraform-exec from 0.18.1 to 0.19.0 (#801) Bumps [github.com/hashicorp/terraform-exec](https://github.com/hashicorp/terraform-exec) from 0.18.1 to 0.19.0.
Release notes

Sourced from github.com/hashicorp/terraform-exec's releases.

v0.19.0

ENHANCEMENTS:

  • Add support for terraform test command (#398)
  • Add support for -refresh-only flag for Plan and Apply methods. (#402)
  • Add support for -destroy flag for Apply (#292)

BUG FIXES:

  • Fix bug in which the TF_WORKSPACE env var was set to an empty string, instead of being unset as intended. (#388)
Changelog

Sourced from github.com/hashicorp/terraform-exec's changelog.

0.19.0 (August 31, 2023)

ENHANCEMENTS:

  • Add support for terraform test command (#398)
  • Add support for -refresh-only flag for Plan and Apply methods. (#402)
  • Add support for -destroy flag for Apply (#292)

BUG FIXES:

  • Fix bug in which the TF_WORKSPACE env var was set to an empty string, instead of being unset as intended. (#388)
Commits
  • 44a44d7 v0.19.0 [skip ci]
  • b55ace7 Bump github.com/hashicorp/hc-install from 0.5.2 to 0.6.0 (#407)
  • 1581bc7 Fix test file extensions in e2e tests (#408)
  • abef36d Bump github.com/zclconf/go-cty from 1.13.3 to 1.14.0 (#406)
  • 08caa9a Update CHANGELOG.md
  • f1b3c61 Update CHANGELOG.md
  • ec5a394 feat: add refresh-only flag for plan and apply methods (#402)
  • 2210f68 apply: allow use of -destroy flag for compatible terraform versions (#292)
  • 6a6a61a Bump actions/checkout from 3.5.3 to 3.6.0 (#404)
  • cff1cb8 tfexec: Initial test command support (#400)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/terraform-exec&package-manager=go_modules&previous-version=0.18.1&new-version=0.19.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 12 +++++------ go.sum | 65 ++++++++++++++++++++++++++++++++++++---------------------- 2 files changed, 47 insertions(+), 30 deletions(-) diff --git a/go.mod b/go.mod index 964484b0..ca56838a 100644 --- a/go.mod +++ b/go.mod @@ -9,10 +9,10 @@ require ( github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.3.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 - github.com/hashicorp/hc-install v0.5.2 // MPL 2.0 - github.com/hashicorp/terraform-exec v0.18.1 // MPL 2.0 + github.com/hashicorp/hc-install v0.6.0 // MPL 2.0 + github.com/hashicorp/terraform-exec v0.19.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.17.1 // MPL 2.0 - github.com/imdario/mergo v0.3.13 // BSD-3-Clause + github.com/imdario/mergo v0.3.15 // BSD-3-Clause github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.19 // MIT github.com/nwidger/jsoncolor v0.3.2 // MIT @@ -34,8 +34,8 @@ require ( require ( cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -48,7 +48,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/zclconf/go-cty v1.13.2 // indirect + github.com/zclconf/go-cty v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.12.0 // indirect golang.org/x/net v0.14.0 // indirect diff --git a/go.sum b/go.sum index a28f5578..0c8e0476 100644 --- a/go.sum +++ b/go.sum @@ -4,19 +4,21 @@ cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopT cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= -github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs= +github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= @@ -26,7 +28,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -54,12 +55,12 @@ github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= -github.com/go-git/go-git/v5 v5.6.1 h1:q4ZRqQl4pR/ZJHc1L5CFjGA1a10u76aV1iC+nh+bHsk= -github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC2MDs4ee8= +github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A= +github.com/go-git/go-git/v5 v5.8.1/go.mod h1:FHFuoD6yGz5OSKEBK+aWN9Oah0q54Jxl0abmj6GnqAo= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -107,14 +108,14 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.5.2 h1:SfwMFnEXVVirpwkDuSF5kymUOhrUxrTq3udEseZdOD0= -github.com/hashicorp/hc-install v0.5.2/go.mod h1:9QISwe6newMWIfEiXpzuu1k9HAGtQYgnSH8H9T8wmoI= -github.com/hashicorp/terraform-exec v0.18.1 h1:LAbfDvNQU1l0NOQlTuudjczVhHj061fNX5H8XZxHlH4= -github.com/hashicorp/terraform-exec v0.18.1/go.mod h1:58wg4IeuAJ6LVsLUeD2DWZZoc/bYi6dzhLHzxM41980= +github.com/hashicorp/hc-install v0.6.0 h1:fDHnU7JNFNSQebVKYhHZ0va1bC6SrPQ8fpebsvNr2w4= +github.com/hashicorp/hc-install v0.6.0/go.mod h1:10I912u3nntx9Umo1VAeYPUUuehk0aRQJYpMwbX5wQA= +github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= +github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA= github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -146,8 +147,8 @@ github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDj github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= -github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag= +github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= +github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -168,8 +169,8 @@ github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgw github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0= -github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= +github.com/zclconf/go-cty v1.14.0 h1:/Xrd39K7DXbHzlisFP9c4pHao4yyf+/Ug9LEz+Y/yhc= +github.com/zclconf/go-cty v1.14.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -177,6 +178,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -186,6 +189,7 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -201,6 +205,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -212,6 +219,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -226,15 +234,20 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -243,6 +256,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -254,6 +270,8 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -303,7 +321,6 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 99cc01c91c6f3112ab770cb612328ab17ffdc0eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 17:40:46 +0200 Subject: [PATCH 115/310] Bump golang.org/x/oauth2 from 0.11.0 to 0.12.0 (#802) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.11.0 to 0.12.0.
Commits
  • 0708528 go.mod: update golang.org/x dependencies
  • a835fc4 oauth2: move global auth style cache to be per-Config
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.11.0&new-version=0.12.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index ca56838a..2894f2a9 100644 --- a/go.mod +++ b/go.mod @@ -24,10 +24,10 @@ require ( github.com/whilp/git-urls v1.0.0 // MIT golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 golang.org/x/mod v0.12.0 - golang.org/x/oauth2 v0.11.0 + golang.org/x/oauth2 v0.12.0 golang.org/x/sync v0.3.0 golang.org/x/term v0.12.0 - golang.org/x/text v0.12.0 + golang.org/x/text v0.13.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) @@ -50,8 +50,8 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/zclconf/go-cty v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.12.0 // indirect - golang.org/x/net v0.14.0 // indirect + golang.org/x/crypto v0.13.0 // indirect + golang.org/x/net v0.15.0 // indirect golang.org/x/sys v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/api v0.138.0 // indirect diff --git a/go.sum b/go.sum index 0c8e0476..77d7eebe 100644 --- a/go.sum +++ b/go.sum @@ -180,8 +180,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw= golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= @@ -208,12 +208,12 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -259,8 +259,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 3ee89c41dac53f2969a5fbbd6d09fb30c10ee7e0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 27 Sep 2023 10:26:59 +0200 Subject: [PATCH 116/310] Added a warning when Python wheel wrapper needs to be used (#807) ## Changes Added a warning when Python wheel wrapper needs to be used ## Tests Added unit tests + manual run with different bundle configurations --- bundle/phases/initialize.go | 2 + bundle/python/warning.go | 65 +++++++++++ bundle/python/warning_test.go | 199 ++++++++++++++++++++++++++++++++++ 3 files changed, 266 insertions(+) create mode 100644 bundle/python/warning.go create mode 100644 bundle/python/warning_test.go diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 431fe27d..818886db 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/bundle/python" "github.com/databricks/cli/bundle/scripts" ) @@ -31,6 +32,7 @@ func Initialize() bundle.Mutator { mutator.OverrideCompute(), mutator.ProcessTargetMode(), mutator.TranslatePaths(), + python.WrapperWarning(), terraform.Initialize(), scripts.Execute(config.ScriptPostInit), }, diff --git a/bundle/python/warning.go b/bundle/python/warning.go new file mode 100644 index 00000000..9a718ae7 --- /dev/null +++ b/bundle/python/warning.go @@ -0,0 +1,65 @@ +package python + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/cmdio" + "golang.org/x/mod/semver" +) + +type wrapperWarning struct { +} + +func WrapperWarning() bundle.Mutator { + return &wrapperWarning{} +} + +func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) error { + if hasIncompatibleWheelTasks(ctx, b) { + cmdio.LogString(ctx, "Python wheel tasks with local libraries require compute with DBR 13.1+. Please change your cluster configuration or set experimental 'python_wheel_wrapper' setting to 'true'") + } + return nil +} + +func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool { + tasks := libraries.FindAllWheelTasksWithLocalLibraries(b) + for _, task := range tasks { + if task.NewCluster != nil { + if lowerThanExpectedVersion(ctx, task.NewCluster.SparkVersion) { + return true + } + } + + if task.JobClusterKey != "" { + for _, job := range b.Config.Resources.Jobs { + for _, cluster := range job.JobClusters { + if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster != nil { + if lowerThanExpectedVersion(ctx, cluster.NewCluster.SparkVersion) { + return true + } + } + } + } + } + } + + return false +} + +func lowerThanExpectedVersion(ctx context.Context, sparkVersion string) bool { + parts := strings.Split(sparkVersion, ".") + if len(parts) < 2 { + return false + } + + v := "v" + parts[0] + "." + parts[1] + return semver.Compare(v, "v13.1") < 0 +} + +// Name implements bundle.Mutator. +func (m *wrapperWarning) Name() string { + return "PythonWrapperWarning" +} diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go new file mode 100644 index 00000000..46bbd656 --- /dev/null +++ b/bundle/python/warning_test.go @@ -0,0 +1,199 @@ +package python + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestIncompatibleWheelTasksWithNewCluster(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{}, + NewCluster: &compute.ClusterSpec{ + SparkVersion: "12.2.x-scala2.12", + }, + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + { + TaskKey: "key2", + PythonWheelTask: &jobs.PythonWheelTask{}, + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.1.x-scala2.12", + }, + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + require.True(t, hasIncompatibleWheelTasks(context.Background(), b)) +} + +func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + JobClusterKey: "cluster1", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "12.2.x-scala2.12", + }, + }, + { + JobClusterKey: "cluster2", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.1.x-scala2.12", + }, + }, + }, + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{}, + JobClusterKey: "cluster1", + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + { + TaskKey: "key2", + PythonWheelTask: &jobs.PythonWheelTask{}, + JobClusterKey: "cluster2", + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + require.True(t, hasIncompatibleWheelTasks(context.Background(), b)) +} + +func TestNoIncompatibleWheelTasks(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + JobClusterKey: "cluster1", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "12.2.x-scala2.12", + }, + }, + { + JobClusterKey: "cluster2", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.1.x-scala2.12", + }, + }, + }, + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{}, + NewCluster: &compute.ClusterSpec{ + SparkVersion: "12.2.x-scala2.12", + }, + Libraries: []compute.Library{ + {Whl: "/Workspace/Users/me@me.com/dist/test.whl"}, + }, + }, + { + TaskKey: "key2", + PythonWheelTask: &jobs.PythonWheelTask{}, + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + }, + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + { + TaskKey: "key3", + PythonWheelTask: &jobs.PythonWheelTask{}, + NewCluster: &compute.ClusterSpec{ + SparkVersion: "12.2.x-scala2.12", + }, + Libraries: []compute.Library{ + {Whl: "dbfs:/dist/test.whl"}, + }, + }, + { + TaskKey: "key4", + PythonWheelTask: &jobs.PythonWheelTask{}, + JobClusterKey: "cluster1", + Libraries: []compute.Library{ + {Whl: "/Workspace/Users/me@me.com/dist/test.whl"}, + }, + }, + { + TaskKey: "key5", + PythonWheelTask: &jobs.PythonWheelTask{}, + JobClusterKey: "cluster2", + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + require.False(t, hasIncompatibleWheelTasks(context.Background(), b)) +} + +func TestSparkVersionLowerThanExpected(t *testing.T) { + testCases := map[string]bool{ + "13.1.x-scala2.12": false, + "13.2.x-scala2.12": false, + "13.3.x-scala2.12": false, + "14.0.x-scala2.12": false, + "14.1.x-scala2.12": false, + "10.4.x-aarch64-photon-scala2.12": true, + "10.4.x-scala2.12": true, + "13.0.x-scala2.12": true, + "5.0.x-rc-gpu-ml-scala2.11": true, + } + + for k, v := range testCases { + result := lowerThanExpectedVersion(context.Background(), k) + require.Equal(t, v, result, k) + } +} From 7171874db0a42d81e89955d518367c185b21c1c6 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Wed, 27 Sep 2023 11:04:44 +0200 Subject: [PATCH 117/310] Added `process.Background()` and `process.Forwarded()` (#804) ## Changes This PR adds higher-level wrappers for calling subprocesses. One of the steps to get https://github.com/databricks/cli/pull/637 in, as previously discussed. The reason to add `process.Forwarded()` is to proxy Python's `input()` calls from a child process seamlessly. Another use-case is plugging in `less` as a pager for the list results. ## Tests `make test` --- bundle/config/artifact.go | 13 ++--- bundle/scripts/scripts.go | 1 + libs/env/context.go | 19 +++++++ libs/env/context_test.go | 8 +++ libs/git/clone.go | 21 +++----- libs/process/background.go | 59 +++++++++++++++++++++ libs/process/background_test.go | 91 +++++++++++++++++++++++++++++++++ libs/process/forwarded.go | 43 ++++++++++++++++ libs/process/forwarded_test.go | 43 ++++++++++++++++ libs/process/opts.go | 57 +++++++++++++++++++++ libs/process/opts_test.go | 47 +++++++++++++++++ python/runner.go | 6 ++- python/runner_test.go | 6 +-- 13 files changed, 390 insertions(+), 24 deletions(-) create mode 100644 libs/process/background.go create mode 100644 libs/process/background_test.go create mode 100644 libs/process/forwarded.go create mode 100644 libs/process/forwarded_test.go create mode 100644 libs/process/opts.go create mode 100644 libs/process/opts_test.go diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index d7048a02..755116eb 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -4,11 +4,11 @@ import ( "bytes" "context" "fmt" - "os/exec" "path" "strings" "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/process" "github.com/databricks/databricks-sdk-go/service/compute" ) @@ -56,13 +56,14 @@ func (a *Artifact) Build(ctx context.Context) ([]byte, error) { commands := strings.Split(a.BuildCommand, " && ") for _, command := range commands { buildParts := strings.Split(command, " ") - cmd := exec.CommandContext(ctx, buildParts[0], buildParts[1:]...) - cmd.Dir = a.Path - res, err := cmd.CombinedOutput() + var buf bytes.Buffer + _, err := process.Background(ctx, buildParts, + process.WithCombinedOutput(&buf), + process.WithDir(a.Path)) if err != nil { - return res, err + return buf.Bytes(), err } - out = append(out, res) + out = append(out, buf.Bytes()) } return bytes.Join(out, []byte{}), nil } diff --git a/bundle/scripts/scripts.go b/bundle/scripts/scripts.go index 1a8a471c..90c1914f 100644 --- a/bundle/scripts/scripts.go +++ b/bundle/scripts/scripts.go @@ -61,6 +61,7 @@ func executeHook(ctx context.Context, b *bundle.Bundle, hook config.ScriptHook) return nil, nil, err } + // TODO: switch to process.Background(...) cmd := exec.CommandContext(ctx, interpreter, "-c", string(command)) cmd.Dir = b.Config.Path diff --git a/libs/env/context.go b/libs/env/context.go index cf04c1ec..bbe294d7 100644 --- a/libs/env/context.go +++ b/libs/env/context.go @@ -3,6 +3,7 @@ package env import ( "context" "os" + "strings" ) var envContextKey int @@ -61,3 +62,21 @@ func Set(ctx context.Context, key, value string) context.Context { m[key] = value return setMap(ctx, m) } + +// All returns environment variables that are defined in both os.Environ +// and this package. `env.Set(ctx, x, y)` will override x from os.Environ. +func All(ctx context.Context) map[string]string { + m := map[string]string{} + for _, line := range os.Environ() { + split := strings.SplitN(line, "=", 2) + if len(split) != 2 { + continue + } + m[split[0]] = split[1] + } + // override existing environment variables with the ones we set + for k, v := range getMap(ctx) { + m[k] = v + } + return m +} diff --git a/libs/env/context_test.go b/libs/env/context_test.go index 9ff19459..39553448 100644 --- a/libs/env/context_test.go +++ b/libs/env/context_test.go @@ -38,4 +38,12 @@ func TestContext(t *testing.T) { assert.Equal(t, "qux", Get(ctx2, "FOO")) assert.Equal(t, "baz", Get(ctx1, "FOO")) assert.Equal(t, "bar", Get(ctx0, "FOO")) + + ctx3 := Set(ctx2, "BAR", "x=y") + + all := All(ctx3) + assert.NotNil(t, all) + assert.Equal(t, "qux", all["FOO"]) + assert.Equal(t, "x=y", all["BAR"]) + assert.NotEmpty(t, all["PATH"]) } diff --git a/libs/git/clone.go b/libs/git/clone.go index af7ffa4b..e7d001cd 100644 --- a/libs/git/clone.go +++ b/libs/git/clone.go @@ -1,13 +1,14 @@ package git import ( - "bytes" "context" "errors" "fmt" "os/exec" "regexp" "strings" + + "github.com/databricks/cli/libs/process" ) // source: https://stackoverflow.com/questions/59081778/rules-for-special-characters-in-github-repository-name @@ -42,24 +43,18 @@ func (opts cloneOptions) args() []string { } func (opts cloneOptions) clone(ctx context.Context) error { - cmd := exec.CommandContext(ctx, "git", opts.args()...) - var cmdErr bytes.Buffer - cmd.Stderr = &cmdErr - - // start git clone - err := cmd.Start() + // start and wait for git clone to complete + _, err := process.Background(ctx, append([]string{"git"}, opts.args()...)) if errors.Is(err, exec.ErrNotFound) { return fmt.Errorf("please install git CLI to clone a repository: %w", err) } + var processErr *process.ProcessError + if errors.As(err, &processErr) { + return fmt.Errorf("git clone failed: %w. %s", err, processErr.Stderr) + } if err != nil { return fmt.Errorf("git clone failed: %w", err) } - - // wait for git clone to complete - err = cmd.Wait() - if err != nil { - return fmt.Errorf("git clone failed: %w. %s", err, cmdErr.String()) - } return nil } diff --git a/libs/process/background.go b/libs/process/background.go new file mode 100644 index 00000000..26178a1d --- /dev/null +++ b/libs/process/background.go @@ -0,0 +1,59 @@ +package process + +import ( + "bytes" + "context" + "fmt" + "os/exec" + "strings" + + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/log" +) + +type ProcessError struct { + Command string + Err error + Stdout string + Stderr string +} + +func (perr *ProcessError) Unwrap() error { + return perr.Err +} + +func (perr *ProcessError) Error() string { + return fmt.Sprintf("%s: %s", perr.Command, perr.Err) +} + +func Background(ctx context.Context, args []string, opts ...execOption) (string, error) { + commandStr := strings.Join(args, " ") + log.Debugf(ctx, "running: %s", commandStr) + cmd := exec.CommandContext(ctx, args[0], args[1:]...) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + // For background processes, there's no standard input + cmd.Stdin = nil + cmd.Stdout = &stdout + cmd.Stderr = &stderr + // we pull the env through lib/env such that we can run + // parallel tests with anything using libs/process. + for k, v := range env.All(ctx) { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + for _, o := range opts { + err := o(ctx, cmd) + if err != nil { + return "", err + } + } + if err := cmd.Run(); err != nil { + return stdout.String(), &ProcessError{ + Err: err, + Command: commandStr, + Stdout: stdout.String(), + Stderr: stderr.String(), + } + } + return stdout.String(), nil +} diff --git a/libs/process/background_test.go b/libs/process/background_test.go new file mode 100644 index 00000000..94f7e881 --- /dev/null +++ b/libs/process/background_test.go @@ -0,0 +1,91 @@ +package process + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBackgroundUnwrapsNotFound(t *testing.T) { + ctx := context.Background() + _, err := Background(ctx, []string{"/bin/meeecho", "1"}) + assert.ErrorIs(t, err, os.ErrNotExist) +} + +func TestBackground(t *testing.T) { + ctx := context.Background() + res, err := Background(ctx, []string{"echo", "1"}, WithDir("/")) + assert.NoError(t, err) + assert.Equal(t, "1", strings.TrimSpace(res)) +} + +func TestBackgroundOnlyStdoutGetsoutOnSuccess(t *testing.T) { + ctx := context.Background() + res, err := Background(ctx, []string{ + "python3", "-c", "import sys; sys.stderr.write('1'); sys.stdout.write('2')", + }) + assert.NoError(t, err) + assert.Equal(t, "2", res) +} + +func TestBackgroundCombinedOutput(t *testing.T) { + ctx := context.Background() + buf := bytes.Buffer{} + res, err := Background(ctx, []string{ + "python3", "-c", "import sys, time; " + + `sys.stderr.write("1\n"); sys.stderr.flush(); ` + + "time.sleep(0.001); " + + "print('2', flush=True); sys.stdout.flush(); " + + "time.sleep(0.001)", + }, WithCombinedOutput(&buf)) + assert.NoError(t, err) + assert.Equal(t, "2", strings.TrimSpace(res)) + assert.Equal(t, "1\n2\n", strings.ReplaceAll(buf.String(), "\r", "")) +} + +func TestBackgroundCombinedOutputFailure(t *testing.T) { + ctx := context.Background() + buf := bytes.Buffer{} + res, err := Background(ctx, []string{ + "python3", "-c", "import sys, time; " + + `sys.stderr.write("1\n"); sys.stderr.flush(); ` + + "time.sleep(0.001); " + + "print('2', flush=True); sys.stdout.flush(); " + + "time.sleep(0.001); " + + "sys.exit(42)", + }, WithCombinedOutput(&buf)) + var processErr *ProcessError + if assert.ErrorAs(t, err, &processErr) { + assert.Equal(t, "1", strings.TrimSpace(processErr.Stderr)) + assert.Equal(t, "2", strings.TrimSpace(processErr.Stdout)) + } + assert.Equal(t, "2", strings.TrimSpace(res)) + assert.Equal(t, "1\n2\n", strings.ReplaceAll(buf.String(), "\r", "")) +} + +func TestBackgroundNoStdin(t *testing.T) { + ctx := context.Background() + res, err := Background(ctx, []string{"cat"}) + assert.NoError(t, err) + assert.Equal(t, "", res) +} + +func TestBackgroundFails(t *testing.T) { + ctx := context.Background() + _, err := Background(ctx, []string{"ls", "/dev/null/x"}) + assert.NotNil(t, err) +} + +func TestBackgroundFailsOnOption(t *testing.T) { + ctx := context.Background() + _, err := Background(ctx, []string{"ls", "/dev/null/x"}, func(_ context.Context, c *exec.Cmd) error { + return fmt.Errorf("nope") + }) + assert.EqualError(t, err, "nope") +} diff --git a/libs/process/forwarded.go b/libs/process/forwarded.go new file mode 100644 index 00000000..df3c2dbd --- /dev/null +++ b/libs/process/forwarded.go @@ -0,0 +1,43 @@ +package process + +import ( + "context" + "fmt" + "io" + "os/exec" + "strings" + + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/log" +) + +func Forwarded(ctx context.Context, args []string, src io.Reader, outWriter, errWriter io.Writer, opts ...execOption) error { + commandStr := strings.Join(args, " ") + log.Debugf(ctx, "starting: %s", commandStr) + cmd := exec.CommandContext(ctx, args[0], args[1:]...) + + // empirical tests showed buffered copies being more responsive + cmd.Stdout = outWriter + cmd.Stderr = errWriter + cmd.Stdin = src + // we pull the env through lib/env such that we can run + // parallel tests with anything using libs/process. + for k, v := range env.All(ctx) { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + + // apply common options + for _, o := range opts { + err := o(ctx, cmd) + if err != nil { + return err + } + } + + err := cmd.Start() + if err != nil { + return err + } + + return cmd.Wait() +} diff --git a/libs/process/forwarded_test.go b/libs/process/forwarded_test.go new file mode 100644 index 00000000..ddb79818 --- /dev/null +++ b/libs/process/forwarded_test.go @@ -0,0 +1,43 @@ +package process + +import ( + "bytes" + "context" + "os/exec" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestForwarded(t *testing.T) { + ctx := context.Background() + var buf bytes.Buffer + err := Forwarded(ctx, []string{ + "python3", "-c", "print(input('input: '))", + }, strings.NewReader("abc\n"), &buf, &buf) + assert.NoError(t, err) + + assert.Equal(t, "input: abc", strings.TrimSpace(buf.String())) +} + +func TestForwardedFails(t *testing.T) { + ctx := context.Background() + var buf bytes.Buffer + err := Forwarded(ctx, []string{ + "_non_existent_", + }, strings.NewReader("abc\n"), &buf, &buf) + assert.NotNil(t, err) +} + +func TestForwardedFailsOnStdinPipe(t *testing.T) { + ctx := context.Background() + var buf bytes.Buffer + err := Forwarded(ctx, []string{ + "_non_existent_", + }, strings.NewReader("abc\n"), &buf, &buf, func(_ context.Context, c *exec.Cmd) error { + c.Stdin = strings.NewReader("x") + return nil + }) + assert.NotNil(t, err) +} diff --git a/libs/process/opts.go b/libs/process/opts.go new file mode 100644 index 00000000..e201c666 --- /dev/null +++ b/libs/process/opts.go @@ -0,0 +1,57 @@ +package process + +import ( + "bytes" + "context" + "fmt" + "io" + "os/exec" +) + +type execOption func(context.Context, *exec.Cmd) error + +func WithEnv(key, value string) execOption { + return func(ctx context.Context, c *exec.Cmd) error { + v := fmt.Sprintf("%s=%s", key, value) + c.Env = append(c.Env, v) + return nil + } +} + +func WithEnvs(envs map[string]string) execOption { + return func(ctx context.Context, c *exec.Cmd) error { + for k, v := range envs { + err := WithEnv(k, v)(ctx, c) + if err != nil { + return err + } + } + return nil + } +} + +func WithDir(dir string) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Dir = dir + return nil + } +} + +func WithStdoutPipe(dst *io.ReadCloser) execOption { + return func(_ context.Context, c *exec.Cmd) error { + outPipe, err := c.StdoutPipe() + if err != nil { + return err + } + *dst = outPipe + return nil + } +} + +func WithCombinedOutput(buf *bytes.Buffer) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stdout = io.MultiWriter(buf, c.Stdout) + c.Stderr = io.MultiWriter(buf, c.Stderr) + return nil + } +} diff --git a/libs/process/opts_test.go b/libs/process/opts_test.go new file mode 100644 index 00000000..3a819fbb --- /dev/null +++ b/libs/process/opts_test.go @@ -0,0 +1,47 @@ +package process + +import ( + "context" + "os/exec" + "runtime" + "sort" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" + "github.com/stretchr/testify/assert" +) + +func TestWithEnvs(t *testing.T) { + if runtime.GOOS == "windows" { + // Skipping test on windows for now because of the following error: + // /bin/sh -c echo $FOO $BAR: exec: "/bin/sh": file does not exist + t.SkipNow() + } + ctx := context.Background() + ctx2 := env.Set(ctx, "FOO", "foo") + res, err := Background(ctx2, []string{"/bin/sh", "-c", "echo $FOO $BAR"}, WithEnvs(map[string]string{ + "BAR": "delirium", + })) + assert.NoError(t, err) + assert.Equal(t, "foo delirium\n", res) +} + +func TestWorksWithLibsEnv(t *testing.T) { + testutil.CleanupEnvironment(t) + ctx := context.Background() + + cmd := &exec.Cmd{} + err := WithEnvs(map[string]string{ + "CCC": "DDD", + "EEE": "FFF", + })(ctx, cmd) + assert.NoError(t, err) + + vars := cmd.Environ() + sort.Strings(vars) + + assert.True(t, len(vars) >= 2) + assert.Equal(t, "CCC=DDD", vars[0]) + assert.Equal(t, "EEE=FFF", vars[1]) +} diff --git a/python/runner.go b/python/runner.go index bdf386a0..ebf24717 100644 --- a/python/runner.go +++ b/python/runner.go @@ -8,6 +8,8 @@ import ( "os/exec" "runtime" "strings" + + "github.com/databricks/cli/libs/process" ) func PyInline(ctx context.Context, inlinePy string) (string, error) { @@ -88,8 +90,8 @@ func DetectExecutable(ctx context.Context) (string, error) { func execAndPassErr(ctx context.Context, name string, args ...string) ([]byte, error) { // TODO: move out to a separate package, once we have Maven integration - out, err := exec.CommandContext(ctx, name, args...).Output() - return out, nicerErr(err) + out, err := process.Background(ctx, append([]string{name}, args...)) + return []byte(out), nicerErr(err) } func getFirstMatch(out string) string { diff --git a/python/runner_test.go b/python/runner_test.go index 3968e27a..fc8f2508 100644 --- a/python/runner_test.go +++ b/python/runner_test.go @@ -20,7 +20,7 @@ func TestExecAndPassError(t *testing.T) { } _, err := execAndPassErr(context.Background(), "which", "__non_existing__") - assert.EqualError(t, err, "exit status 1") + assert.EqualError(t, err, "which __non_existing__: exit status 1") } func TestDetectPython(t *testing.T) { @@ -77,7 +77,7 @@ func testTempdir(t *testing.T, dir *string) func() { func TestPyError(t *testing.T) { _, err := Py(context.Background(), "__non_existing__.py") - assert.Contains(t, err.Error(), "can't open file") + assert.Contains(t, err.Error(), "exit status 2") } func TestPyInline(t *testing.T) { @@ -90,5 +90,5 @@ func TestPyInlineStderr(t *testing.T) { DetectExecutable(context.Background()) inline := "import sys; sys.stderr.write('___msg___'); sys.exit(1)" _, err := PyInline(context.Background(), inline) - assert.EqualError(t, err, "___msg___") + assert.ErrorContains(t, err, "___msg___") } From f31e8b446c1ac080d8c878dd367799504c7f8851 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 27 Sep 2023 12:57:34 +0200 Subject: [PATCH 118/310] Release v0.206.0 (#811) Bundles: * Enable target overrides for pipeline clusters ([#792](https://github.com/databricks/cli/pull/792)). * Add support for regex patterns in template schema ([#768](https://github.com/databricks/cli/pull/768)). * Make the default `databricks bundle init` template more self-explanatory ([#796](https://github.com/databricks/cli/pull/796)). * Make a notebook wrapper for Python wheel tasks optional ([#797](https://github.com/databricks/cli/pull/797)). * Added a warning when Python wheel wrapper needs to be used ([#807](https://github.com/databricks/cli/pull/807)). Internal: * Added `process.Background()` and `process.Forwarded()` ([#804](https://github.com/databricks/cli/pull/804)). Dependency updates: * Bump golang.org/x/term from 0.11.0 to 0.12.0 ([#798](https://github.com/databricks/cli/pull/798)). * Bump github.com/hashicorp/terraform-exec from 0.18.1 to 0.19.0 ([#801](https://github.com/databricks/cli/pull/801)). * Bump golang.org/x/oauth2 from 0.11.0 to 0.12.0 ([#802](https://github.com/databricks/cli/pull/802)). --- CHANGELOG.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e525ff74..17e88159 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,22 @@ # Version changelog +## 0.206.0 + +Bundles: + * Enable target overrides for pipeline clusters ([#792](https://github.com/databricks/cli/pull/792)). + * Add support for regex patterns in template schema ([#768](https://github.com/databricks/cli/pull/768)). + * Make the default `databricks bundle init` template more self-explanatory ([#796](https://github.com/databricks/cli/pull/796)). + * Make a notebook wrapper for Python wheel tasks optional ([#797](https://github.com/databricks/cli/pull/797)). + * Added a warning when Python wheel wrapper needs to be used ([#807](https://github.com/databricks/cli/pull/807)). + +Internal: + * Added `process.Background()` and `process.Forwarded()` ([#804](https://github.com/databricks/cli/pull/804)). + +Dependency updates: + * Bump golang.org/x/term from 0.11.0 to 0.12.0 ([#798](https://github.com/databricks/cli/pull/798)). + * Bump github.com/hashicorp/terraform-exec from 0.18.1 to 0.19.0 ([#801](https://github.com/databricks/cli/pull/801)). + * Bump golang.org/x/oauth2 from 0.11.0 to 0.12.0 ([#802](https://github.com/databricks/cli/pull/802)). + ## 0.205.2 CLI: From 30b4b8ce58200a25d766327a10283f9e62b78910 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 29 Sep 2023 08:58:40 +0200 Subject: [PATCH 119/310] Allow digits in the generated short name (#820) ## Changes Digits were previously replaced by `_`. ## Tests Additional test cases with uncommon variations of email addresses. --- .../config/mutator/populate_current_user.go | 16 ++--- .../mutator/populate_current_user_test.go | 60 +++++++++++++++---- 2 files changed, 57 insertions(+), 19 deletions(-) diff --git a/bundle/config/mutator/populate_current_user.go b/bundle/config/mutator/populate_current_user.go index bba0457c..b604d671 100644 --- a/bundle/config/mutator/populate_current_user.go +++ b/bundle/config/mutator/populate_current_user.go @@ -38,15 +38,17 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error return nil } +func replaceNonAlphanumeric(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + return '_' +} + // Get a short-form username, based on the user's primary email address. // We leave the full range of unicode letters in tact, but remove all "special" characters, // including dots, which are not supported in e.g. experiment names. func getShortUserName(emailAddress string) string { - r := []rune(strings.Split(emailAddress, "@")[0]) - for i := 0; i < len(r); i++ { - if !unicode.IsLetter(r[i]) { - r[i] = '_' - } - } - return string(r) + local, _, _ := strings.Cut(emailAddress, "@") + return strings.Map(replaceNonAlphanumeric, local) } diff --git a/bundle/config/mutator/populate_current_user_test.go b/bundle/config/mutator/populate_current_user_test.go index 79ec52b8..bbb65e07 100644 --- a/bundle/config/mutator/populate_current_user_test.go +++ b/bundle/config/mutator/populate_current_user_test.go @@ -1,6 +1,10 @@ package mutator -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/assert" +) func TestPopulateCurrentUser(t *testing.T) { // We need to implement workspace client mocking to implement this test. @@ -13,28 +17,60 @@ func TestGetShortUserName(t *testing.T) { expected string }{ { - name: "test alphanumeric characters", - email: "test.user@example.com", - expected: "test_user", + email: "test.user.1234@example.com", + expected: "test_user_1234", }, { - name: "test unicode characters", email: "tést.üser@example.com", expected: "tést_üser", }, { - name: "test special characters", email: "test$.user@example.com", expected: "test__user", }, + { + email: `jöhn.dœ@domain.com`, // Using non-ASCII characters. + expected: "jöhn_dœ", + }, + { + email: `first+tag@email.com`, // The plus (+) sign is used for "sub-addressing" in some email services. + expected: "first_tag", + }, + { + email: `email@sub.domain.com`, // Using a sub-domain. + expected: "email", + }, + { + email: `"_quoted"@domain.com`, // Quoted strings can be part of the local-part. + expected: "__quoted_", + }, + { + email: `name-o'mally@website.org`, // Single quote in the local-part. + expected: "name_o_mally", + }, + { + email: `user%domain@external.com`, // Percent sign can be used for email routing in legacy systems. + expected: "user_domain", + }, + { + email: `long.name.with.dots@domain.net`, // Multiple dots in the local-part. + expected: "long_name_with_dots", + }, + { + email: `me&you@together.com`, // Using an ampersand (&) in the local-part. + expected: "me_you", + }, + { + email: `user!def!xyz@domain.org`, // The exclamation mark can be valid in some legacy systems. + expected: "user_def_xyz", + }, + { + email: `admin@ιντερνετ.com`, // Domain in non-ASCII characters (IDN or Internationalized Domain Name). + expected: "admin", + }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := getShortUserName(tt.email) - if result != tt.expected { - t.Errorf("getShortUserName(%q) = %q; expected %q", tt.email, result, tt.expected) - } - }) + assert.Equal(t, tt.expected, getShortUserName(tt.email)) } } From 3685eb16f4041b20a97da0ec15f2ea1a7fe4e935 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 29 Sep 2023 10:38:06 +0200 Subject: [PATCH 120/310] Run tests to verify backend tag validation behavior (#814) ## Changes Validation rules on tags are different per cloud (they are passed through to the underlying clusters and as such must comply with cloud-specific validation rules). This change adds tests to confirm the current behavior to ensure the normalization we can apply is in line with how the backend behaves. ## Tests The new integration tests pass (tested locally). --- internal/tags_test.go | 259 +++++++++++++++++++++++++++++++ internal/testutil/cloud.go | 48 ++++++ internal/testutil/env.go | 9 ++ internal/testutil/requirement.go | 19 +++ 4 files changed, 335 insertions(+) create mode 100644 internal/tags_test.go create mode 100644 internal/testutil/cloud.go create mode 100644 internal/testutil/requirement.go diff --git a/internal/tags_test.go b/internal/tags_test.go new file mode 100644 index 00000000..2dd3759a --- /dev/null +++ b/internal/tags_test.go @@ -0,0 +1,259 @@ +package internal + +import ( + "context" + "strings" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func testTags(t *testing.T, tags map[string]string) error { + var nodeTypeId string + switch testutil.GetCloud(t) { + case testutil.AWS: + nodeTypeId = "i3.xlarge" + case testutil.Azure: + nodeTypeId = "Standard_DS4_v2" + case testutil.GCP: + nodeTypeId = "n1-standard-4" + } + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + ctx := context.Background() + resp, err := w.Jobs.Create(ctx, jobs.CreateJob{ + Name: RandomName("test-tags-"), + Tasks: []jobs.Task{ + { + TaskKey: "test", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NumWorkers: 1, + NodeTypeId: nodeTypeId, + }, + SparkPythonTask: &jobs.SparkPythonTask{ + PythonFile: "/doesnt_exist.py", + }, + }, + }, + Tags: tags, + }) + + if resp != nil { + t.Cleanup(func() { + w.Jobs.DeleteByJobId(ctx, resp.JobId) + }) + } + + return err +} + +func testTagKey(t *testing.T, key string) error { + return testTags(t, map[string]string{ + key: "value", + }) +} + +func testTagValue(t *testing.T, value string) error { + return testTags(t, map[string]string{ + "key": value, + }) +} + +type tagTestCase struct { + name string + value string + fn func(t *testing.T, value string) error + err string +} + +func runTagTestCases(t *testing.T, cases []tagTestCase) { + for i := range cases { + tc := cases[i] + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := tc.fn(t, tc.value) + if tc.err == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + msg := strings.ReplaceAll(err.Error(), "\n", " ") + require.Contains(t, msg, tc.err) + } + }) + } +} + +func TestAccTagKeyAWS(t *testing.T) { + testutil.Require(t, testutil.AWS) + t.Parallel() + + runTagTestCases(t, []tagTestCase{ + { + name: "invalid", + value: "café", + fn: testTagKey, + err: ` The key must match the regular expression ^[\d \w\+\-=\.:\/@]*$.`, + }, + { + name: "unicode", + value: "🍎", + fn: testTagKey, + err: ` contains non-latin1 characters.`, + }, + { + name: "empty", + value: "", + fn: testTagKey, + err: ` the minimal length is 1, and the maximum length is 127.`, + }, + { + name: "valid", + value: "cafe", + fn: testTagKey, + err: ``, + }, + }) +} + +func TestAccTagValueAWS(t *testing.T) { + testutil.Require(t, testutil.AWS) + t.Parallel() + + runTagTestCases(t, []tagTestCase{ + { + name: "invalid", + value: "café", + fn: testTagValue, + err: ` The value must match the regular expression ^[\d \w\+\-=\.:/@]*$.`, + }, + { + name: "unicode", + value: "🍎", + fn: testTagValue, + err: ` contains non-latin1 characters.`, + }, + { + name: "valid", + value: "cafe", + fn: testTagValue, + err: ``, + }, + }) +} + +func TestAccTagKeyAzure(t *testing.T) { + testutil.Require(t, testutil.Azure) + t.Parallel() + + runTagTestCases(t, []tagTestCase{ + { + name: "invalid", + value: "café?", + fn: testTagKey, + err: ` The key must match the regular expression ^[^<>\*&%;\\\/\+\?]*$.`, + }, + { + name: "unicode", + value: "🍎", + fn: testTagKey, + err: ` contains non-latin1 characters.`, + }, + { + name: "empty", + value: "", + fn: testTagKey, + err: ` the minimal length is 1, and the maximum length is 512.`, + }, + { + name: "valid", + value: "cafe", + fn: testTagKey, + err: ``, + }, + }) +} + +func TestAccTagValueAzure(t *testing.T) { + testutil.Require(t, testutil.Azure) + t.Parallel() + + runTagTestCases(t, []tagTestCase{ + { + name: "unicode", + value: "🍎", + fn: testTagValue, + err: ` contains non-latin1 characters.`, + }, + { + name: "valid", + value: "cafe", + fn: testTagValue, + err: ``, + }, + }) +} + +func TestAccTagKeyGCP(t *testing.T) { + testutil.Require(t, testutil.GCP) + t.Parallel() + + runTagTestCases(t, []tagTestCase{ + { + name: "invalid", + value: "café?", + fn: testTagKey, + err: ` The key must match the regular expression ^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$.`, + }, + { + name: "unicode", + value: "🍎", + fn: testTagKey, + err: ` contains non-latin1 characters.`, + }, + { + name: "empty", + value: "", + fn: testTagKey, + err: ` the minimal length is 1, and the maximum length is 63.`, + }, + { + name: "valid", + value: "cafe", + fn: testTagKey, + err: ``, + }, + }) +} + +func TestAccTagValueGCP(t *testing.T) { + testutil.Require(t, testutil.GCP) + t.Parallel() + + runTagTestCases(t, []tagTestCase{ + { + name: "invalid", + value: "café", + fn: testTagValue, + err: ` The value must match the regular expression ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$.`, + }, + { + name: "unicode", + value: "🍎", + fn: testTagValue, + err: ` contains non-latin1 characters.`, + }, + { + name: "valid", + value: "cafe", + fn: testTagValue, + err: ``, + }, + }) +} diff --git a/internal/testutil/cloud.go b/internal/testutil/cloud.go new file mode 100644 index 00000000..50bbf67f --- /dev/null +++ b/internal/testutil/cloud.go @@ -0,0 +1,48 @@ +package testutil + +import ( + "testing" +) + +type Cloud int + +const ( + AWS Cloud = iota + Azure + GCP +) + +// Implement [Requirement]. +func (c Cloud) Verify(t *testing.T) { + if c != GetCloud(t) { + t.Skipf("Skipping %s-specific test", c) + } +} + +func (c Cloud) String() string { + switch c { + case AWS: + return "AWS" + case Azure: + return "Azure" + case GCP: + return "GCP" + default: + return "unknown" + } +} + +func GetCloud(t *testing.T) Cloud { + env := GetEnvOrSkipTest(t, "CLOUD_ENV") + switch env { + case "aws": + return AWS + case "azure": + return Azure + case "gcp": + return GCP + default: + t.Fatalf("Unknown cloud environment: %s", env) + } + return -1 +} diff --git a/internal/testutil/env.go b/internal/testutil/env.go index 11a61018..39201c5b 100644 --- a/internal/testutil/env.go +++ b/internal/testutil/env.go @@ -35,3 +35,12 @@ func CleanupEnvironment(t *testing.T) { t.Setenv("USERPROFILE", pwd) } } + +// GetEnvOrSkipTest proceeds with test only with that env variable +func GetEnvOrSkipTest(t *testing.T, name string) string { + value := os.Getenv(name) + if value == "" { + t.Skipf("Environment variable %s is missing", name) + } + return value +} diff --git a/internal/testutil/requirement.go b/internal/testutil/requirement.go new file mode 100644 index 00000000..53855e0b --- /dev/null +++ b/internal/testutil/requirement.go @@ -0,0 +1,19 @@ +package testutil + +import ( + "testing" +) + +// Requirement is the interface for test requirements. +type Requirement interface { + Verify(t *testing.T) +} + +// Require should be called at the beginning of a test to ensure that all +// requirements are met before running the test. +// If any requirement is not met, the test will be skipped. +func Require(t *testing.T, requirements ...Requirement) { + for _, r := range requirements { + r.Verify(t) + } +} From 4226c88e98351b878f2e6946af1909f5d9bf5b1e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 29 Sep 2023 10:49:08 +0200 Subject: [PATCH 121/310] Library to validate and normalize cloud specific tags (#819) ## Changes Prompted by the proposed fix for a tagging-related problem in #810, I investigated how tag validation works. This turned out to be quite a bit more complex than anticipated. Tags at the job level (or cluster level) are passed through to the underlying compute infrastructure and as such are tested against cloud-specific validation rules. GCP appears to be the most restrictive. It would be disappointing to always restrict to `\w+`, so this package implements validation and normalization rules for each cloud. It can pick the right cloud to use using a Go SDK configuration. ## Tests Exhaustive unit tests. The regular expressions were pulled by #814. --- libs/tags/aws.go | 36 +++++++++++++++ libs/tags/aws_test.go | 49 +++++++++++++++++++++ libs/tags/azure.go | 25 +++++++++++ libs/tags/azure_test.go | 34 +++++++++++++++ libs/tags/cloud.go | 32 ++++++++++++++ libs/tags/cloud_test.go | 32 ++++++++++++++ libs/tags/gcp.go | 63 +++++++++++++++++++++++++++ libs/tags/gcp_test.go | 65 +++++++++++++++++++++++++++ libs/tags/latin.go | 11 +++++ libs/tags/latin_test.go | 16 +++++++ libs/tags/tag.go | 57 ++++++++++++++++++++++++ libs/tags/transform.go | 87 +++++++++++++++++++++++++++++++++++++ libs/tags/transform_test.go | 25 +++++++++++ 13 files changed, 532 insertions(+) create mode 100644 libs/tags/aws.go create mode 100644 libs/tags/aws_test.go create mode 100644 libs/tags/azure.go create mode 100644 libs/tags/azure_test.go create mode 100644 libs/tags/cloud.go create mode 100644 libs/tags/cloud_test.go create mode 100644 libs/tags/gcp.go create mode 100644 libs/tags/gcp_test.go create mode 100644 libs/tags/latin.go create mode 100644 libs/tags/latin_test.go create mode 100644 libs/tags/tag.go create mode 100644 libs/tags/transform.go create mode 100644 libs/tags/transform_test.go diff --git a/libs/tags/aws.go b/libs/tags/aws.go new file mode 100644 index 00000000..44d69c68 --- /dev/null +++ b/libs/tags/aws.go @@ -0,0 +1,36 @@ +package tags + +import ( + "regexp" + "unicode" + + "golang.org/x/text/unicode/rangetable" +) + +// The union of all characters allowed in AWS tags. +// This must be used only after filtering out non-Latin1 characters, +// because the [unicode] classes include non-Latin1 characters. +var awsChars = rangetable.Merge( + unicode.Digit, + unicode.Space, + unicode.Letter, + rangetable.New('+', '-', '=', '.', ':', '/', '@'), +) + +var awsTag = &tag{ + keyLength: 127, + keyPattern: regexp.MustCompile(`^[\d \w\+\-=\.:\/@]*$`), + keyNormalize: chain( + normalizeMarks(), + replaceNotIn(latin1, '_'), + replaceNotIn(awsChars, '_'), + ), + + valueLength: 255, + valuePattern: regexp.MustCompile(`^[\d \w\+\-=\.:/@]*$`), + valueNormalize: chain( + normalizeMarks(), + replaceNotIn(latin1, '_'), + replaceNotIn(awsChars, '_'), + ), +} diff --git a/libs/tags/aws_test.go b/libs/tags/aws_test.go new file mode 100644 index 00000000..2a2bb7e7 --- /dev/null +++ b/libs/tags/aws_test.go @@ -0,0 +1,49 @@ +package tags + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAwsNormalizeKey(t *testing.T) { + assert.Equal(t, "1 a b c", awsTag.NormalizeKey("1 a b c")) + assert.Equal(t, "+-=.:/@__", awsTag.NormalizeKey("+-=.:/@?)")) + assert.Equal(t, "test", awsTag.NormalizeKey("test")) + + // Remove marks; unicode becomes underscore. + assert.Equal(t, "cafe _", awsTag.NormalizeKey("café 🍎")) + + // Replace forbidden characters with underscore. + assert.Equal(t, "cafe __", awsTag.NormalizeKey("café 🍎?")) +} + +func TestAwsNormalizeValue(t *testing.T) { + assert.Equal(t, "1 a b c", awsTag.NormalizeValue("1 a b c")) + assert.Equal(t, "+-=.:/@__", awsTag.NormalizeValue("+-=.:/@?)")) + assert.Equal(t, "test", awsTag.NormalizeValue("test")) + + // Remove marks; unicode becomes underscore. + assert.Equal(t, "cafe _", awsTag.NormalizeValue("café 🍎")) + + // Replace forbidden characters with underscore. + assert.Equal(t, "cafe __", awsTag.NormalizeValue("café 🍎?")) +} + +func TestAwsValidateKey(t *testing.T) { + assert.ErrorContains(t, awsTag.ValidateKey(""), "not be empty") + assert.ErrorContains(t, awsTag.ValidateKey(strings.Repeat("a", 512)), "length") + assert.ErrorContains(t, awsTag.ValidateKey("café 🍎"), "latin") + assert.ErrorContains(t, awsTag.ValidateKey("????"), "pattern") + assert.NoError(t, awsTag.ValidateKey(strings.Repeat("a", 127))) + assert.NoError(t, awsTag.ValidateKey(awsTag.NormalizeKey("café 🍎"))) +} + +func TestAwsValidateValue(t *testing.T) { + assert.ErrorContains(t, awsTag.ValidateValue(strings.Repeat("a", 512)), "length") + assert.ErrorContains(t, awsTag.ValidateValue("café 🍎"), "latin1") + assert.ErrorContains(t, awsTag.ValidateValue("????"), "pattern") + assert.NoError(t, awsTag.ValidateValue(strings.Repeat("a", 127))) + assert.NoError(t, awsTag.ValidateValue(awsTag.NormalizeValue("café 🍎"))) +} diff --git a/libs/tags/azure.go b/libs/tags/azure.go new file mode 100644 index 00000000..e98a5eb2 --- /dev/null +++ b/libs/tags/azure.go @@ -0,0 +1,25 @@ +package tags + +import ( + "regexp" + + "golang.org/x/text/unicode/rangetable" +) + +// All characters that may not be used in Azure tag keys. +var azureForbiddenChars = rangetable.New('<', '>', '*', '&', '%', ';', '\\', '/', '+', '?') + +var azureTag = &tag{ + keyLength: 512, + keyPattern: regexp.MustCompile(`^[^<>\*&%;\\\/\+\?]*$`), + keyNormalize: chain( + replaceNotIn(latin1, '_'), + replaceIn(azureForbiddenChars, '_'), + ), + + valueLength: 256, + valuePattern: regexp.MustCompile(`^.*$`), + valueNormalize: chain( + replaceNotIn(latin1, '_'), + ), +} diff --git a/libs/tags/azure_test.go b/libs/tags/azure_test.go new file mode 100644 index 00000000..1deb5d6e --- /dev/null +++ b/libs/tags/azure_test.go @@ -0,0 +1,34 @@ +package tags + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAzureNormalizeKey(t *testing.T) { + assert.Equal(t, "test", azureTag.NormalizeKey("test")) + assert.Equal(t, "café __", azureTag.NormalizeKey("café 🍎?")) +} + +func TestAzureNormalizeValue(t *testing.T) { + assert.Equal(t, "test", azureTag.NormalizeValue("test")) + assert.Equal(t, "café _?", azureTag.NormalizeValue("café 🍎?")) +} + +func TestAzureValidateKey(t *testing.T) { + assert.ErrorContains(t, azureTag.ValidateKey(""), "not be empty") + assert.ErrorContains(t, azureTag.ValidateKey(strings.Repeat("a", 513)), "length") + assert.ErrorContains(t, azureTag.ValidateKey("café 🍎"), "latin") + assert.ErrorContains(t, azureTag.ValidateKey("????"), "pattern") + assert.NoError(t, azureTag.ValidateKey(strings.Repeat("a", 127))) + assert.NoError(t, azureTag.ValidateKey(azureTag.NormalizeKey("café 🍎"))) +} + +func TestAzureValidateValue(t *testing.T) { + assert.ErrorContains(t, azureTag.ValidateValue(strings.Repeat("a", 513)), "length") + assert.ErrorContains(t, azureTag.ValidateValue("café 🍎"), "latin") + assert.NoError(t, azureTag.ValidateValue(strings.Repeat("a", 127))) + assert.NoError(t, azureTag.ValidateValue(azureTag.NormalizeValue("café 🍎"))) +} diff --git a/libs/tags/cloud.go b/libs/tags/cloud.go new file mode 100644 index 00000000..f423efa5 --- /dev/null +++ b/libs/tags/cloud.go @@ -0,0 +1,32 @@ +package tags + +import "github.com/databricks/databricks-sdk-go/config" + +type Cloud interface { + // ValidateKey checks if a tag key can be used with the cloud provider. + ValidateKey(key string) error + + // ValidateValue checks if a tag value can be used with the cloud provider. + ValidateValue(value string) error + + // NormalizeKey normalizes a tag key for the cloud provider. + NormalizeKey(key string) string + + // NormalizeValue normalizes a tag value for the cloud provider. + NormalizeValue(value string) string +} + +func ForCloud(cfg *config.Config) Cloud { + var t *tag + switch { + case cfg.IsAws(): + t = awsTag + case cfg.IsAzure(): + t = azureTag + case cfg.IsGcp(): + t = gcpTag + default: + panic("unknown cloud provider") + } + return t +} diff --git a/libs/tags/cloud_test.go b/libs/tags/cloud_test.go new file mode 100644 index 00000000..a1d04d88 --- /dev/null +++ b/libs/tags/cloud_test.go @@ -0,0 +1,32 @@ +package tags + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/config" + "github.com/stretchr/testify/assert" +) + +func TestForCloudAws(t *testing.T) { + c := &config.Config{ + Host: "https://dbc-XXXXXXXX-YYYY.cloud.databricks.com/", + } + + assert.Equal(t, awsTag, ForCloud(c)) +} + +func TestForCloudAzure(t *testing.T) { + c := &config.Config{ + Host: "https://adb-xxx.y.azuredatabricks.net/", + } + + assert.Equal(t, azureTag, ForCloud(c)) +} + +func TestForCloudGcp(t *testing.T) { + c := &config.Config{ + Host: "https://123.4.gcp.databricks.com/", + } + + assert.Equal(t, gcpTag, ForCloud(c)) +} diff --git a/libs/tags/gcp.go b/libs/tags/gcp.go new file mode 100644 index 00000000..f30ca4ca --- /dev/null +++ b/libs/tags/gcp.go @@ -0,0 +1,63 @@ +package tags + +import ( + "regexp" + "unicode" +) + +// Tag keys and values on GCP are limited to 63 characters and must match the +// regular expression `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$`. +// For normalization, we define one table for the outer characters and +// one table for the inner characters. The outer table is used to trim +// leading and trailing characters, and the inner table is used to +// replace invalid characters with underscores. + +var gcpOuter = &unicode.RangeTable{ + R16: []unicode.Range16{ + // 0-9 + {0x0030, 0x0039, 1}, + // A-Z + {0x0041, 0x005A, 1}, + // a-z + {0x0061, 0x007A, 1}, + }, + LatinOffset: 3, +} + +var gcpInner = &unicode.RangeTable{ + R16: []unicode.Range16{ + // Hyphen-minus (dash) + {0x002D, 0x002D, 1}, + // Full stop (period) + {0x002E, 0x002E, 1}, + // 0-9 + {0x0030, 0x0039, 1}, + // A-Z + {0x0041, 0x005A, 1}, + // Low line (underscore) + {0x005F, 0x005F, 1}, + // a-z + {0x0061, 0x007A, 1}, + }, + LatinOffset: 6, +} + +var gcpTag = &tag{ + keyLength: 63, + keyPattern: regexp.MustCompile(`^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$`), + keyNormalize: chain( + normalizeMarks(), + replaceNotIn(latin1, '_'), + replaceNotIn(gcpInner, '_'), + trimIfNotIn(gcpOuter), + ), + + valueLength: 63, + valuePattern: regexp.MustCompile(`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$`), + valueNormalize: chain( + normalizeMarks(), + replaceNotIn(latin1, '_'), + replaceNotIn(gcpInner, '_'), + trimIfNotIn(gcpOuter), + ), +} diff --git a/libs/tags/gcp_test.go b/libs/tags/gcp_test.go new file mode 100644 index 00000000..89f4fd8e --- /dev/null +++ b/libs/tags/gcp_test.go @@ -0,0 +1,65 @@ +package tags + +import ( + "strings" + "testing" + "unicode" + + "github.com/stretchr/testify/assert" +) + +func TestGcpOuter(t *testing.T) { + assert.True(t, unicode.In('A', gcpOuter)) + assert.True(t, unicode.In('Z', gcpOuter)) + assert.True(t, unicode.In('a', gcpOuter)) + assert.True(t, unicode.In('z', gcpOuter)) + assert.True(t, unicode.In('0', gcpOuter)) + assert.True(t, unicode.In('9', gcpOuter)) + assert.False(t, unicode.In('-', gcpOuter)) + assert.False(t, unicode.In('.', gcpOuter)) + assert.False(t, unicode.In('_', gcpOuter)) + assert.False(t, unicode.In('!', gcpOuter)) +} + +func TestGcpInner(t *testing.T) { + assert.True(t, unicode.In('A', gcpInner)) + assert.True(t, unicode.In('Z', gcpInner)) + assert.True(t, unicode.In('a', gcpInner)) + assert.True(t, unicode.In('z', gcpInner)) + assert.True(t, unicode.In('0', gcpInner)) + assert.True(t, unicode.In('9', gcpInner)) + assert.True(t, unicode.In('-', gcpInner)) + assert.True(t, unicode.In('.', gcpInner)) + assert.True(t, unicode.In('_', gcpInner)) + assert.False(t, unicode.In('!', gcpInner)) +} + +func TestGcpNormalizeKey(t *testing.T) { + assert.Equal(t, "test", gcpTag.NormalizeKey("test")) + assert.Equal(t, "cafe", gcpTag.NormalizeKey("café 🍎?")) + assert.Equal(t, "cafe_foo", gcpTag.NormalizeKey("__café_foo__")) + +} + +func TestGcpNormalizeValue(t *testing.T) { + assert.Equal(t, "test", gcpTag.NormalizeValue("test")) + assert.Equal(t, "cafe", gcpTag.NormalizeValue("café 🍎?")) + assert.Equal(t, "cafe_foo", gcpTag.NormalizeValue("__café_foo__")) +} + +func TestGcpValidateKey(t *testing.T) { + assert.ErrorContains(t, gcpTag.ValidateKey(""), "not be empty") + assert.ErrorContains(t, gcpTag.ValidateKey(strings.Repeat("a", 64)), "length") + assert.ErrorContains(t, gcpTag.ValidateKey("café 🍎"), "latin") + assert.ErrorContains(t, gcpTag.ValidateKey("????"), "pattern") + assert.NoError(t, gcpTag.ValidateKey(strings.Repeat("a", 32))) + assert.NoError(t, gcpTag.ValidateKey(gcpTag.NormalizeKey("café 🍎"))) +} + +func TestGcpValidateValue(t *testing.T) { + assert.ErrorContains(t, gcpTag.ValidateValue(strings.Repeat("a", 64)), "length") + assert.ErrorContains(t, gcpTag.ValidateValue("café 🍎"), "latin") + assert.ErrorContains(t, gcpTag.ValidateValue("????"), "pattern") + assert.NoError(t, gcpTag.ValidateValue(strings.Repeat("a", 32))) + assert.NoError(t, gcpTag.ValidateValue(gcpTag.NormalizeValue("café 🍎"))) +} diff --git a/libs/tags/latin.go b/libs/tags/latin.go new file mode 100644 index 00000000..df9ad403 --- /dev/null +++ b/libs/tags/latin.go @@ -0,0 +1,11 @@ +package tags + +import "unicode" + +// Range table for all characters in the Latin1 character set. +var latin1 = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x0000, 0x00ff, 1}, + }, + LatinOffset: 1, +} diff --git a/libs/tags/latin_test.go b/libs/tags/latin_test.go new file mode 100644 index 00000000..c3234a44 --- /dev/null +++ b/libs/tags/latin_test.go @@ -0,0 +1,16 @@ +package tags + +import ( + "testing" + "unicode" + + "github.com/stretchr/testify/assert" +) + +func TestLatinTable(t *testing.T) { + assert.True(t, unicode.In('\u0000', latin1)) + assert.True(t, unicode.In('A', latin1)) + assert.True(t, unicode.In('Z', latin1)) + assert.True(t, unicode.In('\u00ff', latin1)) + assert.False(t, unicode.In('\u0100', latin1)) +} diff --git a/libs/tags/tag.go b/libs/tags/tag.go new file mode 100644 index 00000000..4e9b329c --- /dev/null +++ b/libs/tags/tag.go @@ -0,0 +1,57 @@ +package tags + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +// The tag type holds the validation and normalization rules for +// a cloud provider's resource tags as applied by Databricks. +type tag struct { + keyLength int + keyPattern *regexp.Regexp + keyNormalize transformer + + valueLength int + valuePattern *regexp.Regexp + valueNormalize transformer +} + +func (t *tag) ValidateKey(s string) error { + if len(s) == 0 { + return fmt.Errorf("key must not be empty") + } + if len(s) > t.keyLength { + return fmt.Errorf("key length %d exceeds maximum of %d", len(s), t.keyLength) + } + if strings.ContainsFunc(s, func(r rune) bool { return !unicode.Is(latin1, r) }) { + return fmt.Errorf("key contains non-latin1 characters") + } + if !t.keyPattern.MatchString(s) { + return fmt.Errorf("key %q does not match pattern %q", s, t.keyPattern) + } + return nil +} + +func (t *tag) ValidateValue(s string) error { + if len(s) > t.valueLength { + return fmt.Errorf("value length %d exceeds maximum of %d", len(s), t.valueLength) + } + if strings.ContainsFunc(s, func(r rune) bool { return !unicode.Is(latin1, r) }) { + return fmt.Errorf("value contains non-latin1 characters") + } + if !t.valuePattern.MatchString(s) { + return fmt.Errorf("value %q does not match pattern %q", s, t.valuePattern) + } + return nil +} + +func (t *tag) NormalizeKey(s string) string { + return t.keyNormalize.transform(s) +} + +func (t *tag) NormalizeValue(s string) string { + return t.valueNormalize.transform(s) +} diff --git a/libs/tags/transform.go b/libs/tags/transform.go new file mode 100644 index 00000000..71d01b35 --- /dev/null +++ b/libs/tags/transform.go @@ -0,0 +1,87 @@ +package tags + +import ( + "strings" + "unicode" + + "golang.org/x/text/runes" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +type transformer interface { + transform(string) string +} + +type chainTransformer []transformer + +func (c chainTransformer) transform(s string) string { + for _, t := range c { + s = t.transform(s) + } + return s +} + +func chain(t ...transformer) transformer { + return chainTransformer(t) +} + +// Implement [transformer] interface with text/transform package. +type textTransformer struct { + transform.Transformer +} + +func (t textTransformer) transform(s string) string { + s, _, _ = transform.String(t, s) + return s +} + +func normalizeMarks() transformer { + // Decompose unicode characters, then remove all non-spacing marks, then recompose. + // This turns 'é' into 'e' and 'ü' into 'u'. + return textTransformer{ + transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC), + } +} + +// Replaces characters in the given set with replacement. +type replaceTransformer struct { + set runes.Set + replacement rune +} + +func (t replaceTransformer) transform(s string) string { + return strings.Map(func(r rune) rune { + if t.set.Contains(r) { + return t.replacement + } + return r + }, s) +} + +func replaceIn(table *unicode.RangeTable, replacement rune) transformer { + return replaceTransformer{runes.In(table), replacement} +} + +func replaceNotIn(table *unicode.RangeTable, replacement rune) transformer { + return replaceTransformer{runes.NotIn(table), replacement} +} + +// Trims the given string of all characters in the given set. +type trimTransformer struct { + set runes.Set +} + +func (t trimTransformer) transform(s string) string { + return strings.TrimFunc(s, func(r rune) bool { + return t.set.Contains(r) + }) +} + +func trimIfIn(table *unicode.RangeTable) transformer { + return trimTransformer{runes.In(table)} +} + +func trimIfNotIn(table *unicode.RangeTable) transformer { + return trimTransformer{runes.NotIn(table)} +} diff --git a/libs/tags/transform_test.go b/libs/tags/transform_test.go new file mode 100644 index 00000000..6481b6d9 --- /dev/null +++ b/libs/tags/transform_test.go @@ -0,0 +1,25 @@ +package tags + +import ( + "testing" + "unicode" + + "github.com/stretchr/testify/assert" +) + +func TestNormalizeMarks(t *testing.T) { + x := normalizeMarks() + assert.Equal(t, "cafe", x.transform("café")) + assert.Equal(t, "cafe 🍎", x.transform("café 🍎")) + assert.Equal(t, "Foo Bar", x.transform("Foo Bar")) +} + +func TestReplace(t *testing.T) { + assert.Equal(t, "___abc___", replaceIn(unicode.Digit, '_').transform("000abc999")) + assert.Equal(t, "___000___", replaceNotIn(unicode.Digit, '_').transform("abc000abc")) +} + +func TestTrim(t *testing.T) { + assert.Equal(t, "abc", trimIfIn(unicode.Digit).transform("000abc999")) + assert.Equal(t, "000", trimIfNotIn(unicode.Digit).transform("abc000abc")) +} From 775251d0dc2ff053d3f7880a7e78b5bca7b93100 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 29 Sep 2023 14:19:05 +0200 Subject: [PATCH 122/310] Emit an error when incompatible all purpose cluster used with Python wheel tasks (#823) ## Changes Follow up for https://github.com/databricks/cli/pull/807 to also validate configuration if existing cluster id is used. ## Tests Added unit tests --- bundle/python/warning.go | 29 ++++++- bundle/python/warning_test.go | 158 ++++++++++++++++++++++++++++++++++ 2 files changed, 185 insertions(+), 2 deletions(-) diff --git a/bundle/python/warning.go b/bundle/python/warning.go index 9a718ae7..443b8fd2 100644 --- a/bundle/python/warning.go +++ b/bundle/python/warning.go @@ -2,11 +2,13 @@ package python import ( "context" + "fmt" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "golang.org/x/mod/semver" ) @@ -19,7 +21,7 @@ func WrapperWarning() bundle.Mutator { func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) error { if hasIncompatibleWheelTasks(ctx, b) { - cmdio.LogString(ctx, "Python wheel tasks with local libraries require compute with DBR 13.1+. Please change your cluster configuration or set experimental 'python_wheel_wrapper' setting to 'true'") + return fmt.Errorf("python wheel tasks with local libraries require compute with DBR 13.1+. Please change your cluster configuration or set experimental 'python_wheel_wrapper' setting to 'true'") } return nil } @@ -44,6 +46,20 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool { } } } + + if task.ExistingClusterId != "" { + version, err := getSparkVersionForCluster(ctx, b.WorkspaceClient(), task.ExistingClusterId) + + // If there's error getting spark version for cluster, do not mark it as incompatible + if err != nil { + log.Warnf(ctx, "unable to get spark version for cluster %s, err: %s", task.ExistingClusterId, err.Error()) + return false + } + + if lowerThanExpectedVersion(ctx, version) { + return true + } + } } return false @@ -63,3 +79,12 @@ func lowerThanExpectedVersion(ctx context.Context, sparkVersion string) bool { func (m *wrapperWarning) Name() string { return "PythonWrapperWarning" } + +func getSparkVersionForCluster(ctx context.Context, w *databricks.WorkspaceClient, clusterId string) (string, error) { + details, err := w.Clusters.GetByClusterId(ctx, clusterId) + if err != nil { + return "", err + } + + return details.SparkVersion, nil +} diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go index 46bbd656..83bc142f 100644 --- a/bundle/python/warning_test.go +++ b/bundle/python/warning_test.go @@ -12,6 +12,117 @@ import ( "github.com/stretchr/testify/require" ) +type MockClusterService struct{} + +// ChangeOwner implements compute.ClustersService. +func (MockClusterService) ChangeOwner(ctx context.Context, request compute.ChangeClusterOwner) error { + panic("unimplemented") +} + +// Create implements compute.ClustersService. +func (MockClusterService) Create(ctx context.Context, request compute.CreateCluster) (*compute.CreateClusterResponse, error) { + panic("unimplemented") +} + +// Delete implements compute.ClustersService. +func (MockClusterService) Delete(ctx context.Context, request compute.DeleteCluster) error { + panic("unimplemented") +} + +// Edit implements compute.ClustersService. +func (MockClusterService) Edit(ctx context.Context, request compute.EditCluster) error { + panic("unimplemented") +} + +// Events implements compute.ClustersService. +func (MockClusterService) Events(ctx context.Context, request compute.GetEvents) (*compute.GetEventsResponse, error) { + panic("unimplemented") +} + +// Get implements compute.ClustersService. +func (MockClusterService) Get(ctx context.Context, request compute.GetClusterRequest) (*compute.ClusterDetails, error) { + clusterDetails := map[string]*compute.ClusterDetails{ + "test-key-1": { + SparkVersion: "12.2.x-scala2.12", + }, + "test-key-2": { + SparkVersion: "13.2.x-scala2.12", + }, + } + + return clusterDetails[request.ClusterId], nil +} + +// GetPermissionLevels implements compute.ClustersService. +func (MockClusterService) GetPermissionLevels(ctx context.Context, request compute.GetClusterPermissionLevelsRequest) (*compute.GetClusterPermissionLevelsResponse, error) { + panic("unimplemented") +} + +// GetPermissions implements compute.ClustersService. +func (MockClusterService) GetPermissions(ctx context.Context, request compute.GetClusterPermissionsRequest) (*compute.ClusterPermissions, error) { + panic("unimplemented") +} + +// List implements compute.ClustersService. +func (MockClusterService) List(ctx context.Context, request compute.ListClustersRequest) (*compute.ListClustersResponse, error) { + panic("unimplemented") +} + +// ListNodeTypes implements compute.ClustersService. +func (MockClusterService) ListNodeTypes(ctx context.Context) (*compute.ListNodeTypesResponse, error) { + panic("unimplemented") +} + +// ListZones implements compute.ClustersService. +func (MockClusterService) ListZones(ctx context.Context) (*compute.ListAvailableZonesResponse, error) { + panic("unimplemented") +} + +// PermanentDelete implements compute.ClustersService. +func (MockClusterService) PermanentDelete(ctx context.Context, request compute.PermanentDeleteCluster) error { + panic("unimplemented") +} + +// Pin implements compute.ClustersService. +func (MockClusterService) Pin(ctx context.Context, request compute.PinCluster) error { + panic("unimplemented") +} + +// Resize implements compute.ClustersService. +func (MockClusterService) Resize(ctx context.Context, request compute.ResizeCluster) error { + panic("unimplemented") +} + +// Restart implements compute.ClustersService. +func (MockClusterService) Restart(ctx context.Context, request compute.RestartCluster) error { + panic("unimplemented") +} + +// SetPermissions implements compute.ClustersService. +func (MockClusterService) SetPermissions(ctx context.Context, request compute.ClusterPermissionsRequest) (*compute.ClusterPermissions, error) { + panic("unimplemented") +} + +// SparkVersions implements compute.ClustersService. +func (MockClusterService) SparkVersions(ctx context.Context) (*compute.GetSparkVersionsResponse, error) { + panic("unimplemented") +} + +// Start implements compute.ClustersService. +func (MockClusterService) Start(ctx context.Context, request compute.StartCluster) error { + panic("unimplemented") +} + +// Unpin implements compute.ClustersService. +func (MockClusterService) Unpin(ctx context.Context, request compute.UnpinCluster) error { + panic("unimplemented") +} + +// UpdatePermissions implements compute.ClustersService. +func (MockClusterService) UpdatePermissions(ctx context.Context, request compute.ClusterPermissionsRequest) (*compute.ClusterPermissions, error) { + panic("unimplemented") +} + func TestIncompatibleWheelTasksWithNewCluster(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ @@ -100,6 +211,43 @@ func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) { require.True(t, hasIncompatibleWheelTasks(context.Background(), b)) } +func TestIncompatibleWheelTasksWithExistingClusterId(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{}, + ExistingClusterId: "test-key-1", + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + { + TaskKey: "key2", + PythonWheelTask: &jobs.PythonWheelTask{}, + ExistingClusterId: "test-key-2", + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + b.WorkspaceClient().Clusters.WithImpl(MockClusterService{}) + + require.True(t, hasIncompatibleWheelTasks(context.Background(), b)) +} + func TestNoIncompatibleWheelTasks(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ @@ -168,6 +316,14 @@ func TestNoIncompatibleWheelTasks(t *testing.T) { {Whl: "./dist/test.whl"}, }, }, + { + TaskKey: "key6", + PythonWheelTask: &jobs.PythonWheelTask{}, + ExistingClusterId: "test-key-2", + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, }, }, }, @@ -176,6 +332,8 @@ func TestNoIncompatibleWheelTasks(t *testing.T) { }, } + b.WorkspaceClient().Clusters.WithImpl(MockClusterService{}) + require.False(t, hasIncompatibleWheelTasks(context.Background(), b)) } From f1b068cefe04fd9d9cbbc5ce32b75e9e1e7cc2aa Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 2 Oct 2023 08:58:51 +0200 Subject: [PATCH 123/310] Use normalized short name for tag value in development mode (#821) ## Changes The jobs backend propagates job tags to the underlying cloud provider's resources. As such, they need to match the constraints a cloud provider places on tag values. The display name can contain anything. With this change, we modify the tag value to equal the short name as used in the name prefix. Additionally, we leverage tag normalization as introduced in #819 to make sure characters that aren't accepted are removed before using the value as a tag value. This is a new stab at #810 and should completely eliminate this class of problems. ## Tests Tests pass. --- bundle/bundle.go | 5 ++ .../config/mutator/populate_current_user.go | 5 ++ bundle/config/mutator/process_target_mode.go | 10 ++- .../mutator/process_target_mode_test.go | 65 ++++++++++++++++++- libs/template/renderer_test.go | 2 + 5 files changed, 83 insertions(+), 4 deletions(-) diff --git a/bundle/bundle.go b/bundle/bundle.go index 61bf1ffe..e1625179 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -19,6 +19,7 @@ import ( "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/tags" "github.com/databricks/cli/libs/terraform" "github.com/databricks/databricks-sdk-go" sdkconfig "github.com/databricks/databricks-sdk-go/config" @@ -46,6 +47,10 @@ type Bundle struct { // if true, we skip approval checks for deploy, destroy resources and delete // files AutoApprove bool + + // Tagging is used to normalize tag keys and values. + // The implementation depends on the cloud being targeted. + Tagging tags.Cloud } func Load(ctx context.Context, path string) (*Bundle, error) { diff --git a/bundle/config/mutator/populate_current_user.go b/bundle/config/mutator/populate_current_user.go index b604d671..5b5d3096 100644 --- a/bundle/config/mutator/populate_current_user.go +++ b/bundle/config/mutator/populate_current_user.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/tags" ) type populateCurrentUser struct{} @@ -35,6 +36,10 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error ShortName: getShortUserName(me.UserName), User: me, } + + // Configure tagging object now that we know we have a valid client. + b.Tagging = tags.ForCloud(w.Config) + return nil } diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 93149ad0..2f80fe3b 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -32,14 +32,18 @@ func (m *processTargetMode) Name() string { func transformDevelopmentMode(b *bundle.Bundle) error { r := b.Config.Resources - prefix := "[dev " + b.Config.Workspace.CurrentUser.ShortName + "] " + shortName := b.Config.Workspace.CurrentUser.ShortName + prefix := "[dev " + shortName + "] " + + // Generate a normalized version of the short name that can be used as a tag value. + tagValue := b.Tagging.NormalizeValue(shortName) for i := range r.Jobs { r.Jobs[i].Name = prefix + r.Jobs[i].Name if r.Jobs[i].Tags == nil { r.Jobs[i].Tags = make(map[string]string) } - r.Jobs[i].Tags["dev"] = b.Config.Workspace.CurrentUser.DisplayName + r.Jobs[i].Tags["dev"] = tagValue if r.Jobs[i].MaxConcurrentRuns == 0 { r.Jobs[i].MaxConcurrentRuns = developmentConcurrentRuns } @@ -74,7 +78,7 @@ func transformDevelopmentMode(b *bundle.Bundle) error { } else { r.Experiments[i].Name = dir + "/" + prefix + base } - r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: b.Config.Workspace.CurrentUser.DisplayName}) + r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: tagValue}) } for i := range r.ModelServingEndpoints { diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 4ea33c70..a0b2bac8 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -9,6 +9,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/tags" + sdkconfig "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" @@ -59,6 +61,10 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, }, }, + // Use AWS implementation for testing. + Tagging: tags.ForCloud(&sdkconfig.Config{ + Host: "https://company.cloud.databricks.com", + }), } } @@ -68,14 +74,71 @@ func TestProcessTargetModeDevelopment(t *testing.T) { m := ProcessTargetMode() err := m.Apply(context.Background(), bundle) require.NoError(t, err) + + // Job 1 assert.Equal(t, "[dev lennart] job1", bundle.Config.Resources.Jobs["job1"].Name) + assert.Equal(t, bundle.Config.Resources.Jobs["job1"].Tags["dev"], "lennart") + + // Pipeline 1 assert.Equal(t, "[dev lennart] pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name) + assert.True(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) + + // Experiment 1 assert.Equal(t, "/Users/lennart.kats@databricks.com/[dev lennart] experiment1", bundle.Config.Resources.Experiments["experiment1"].Name) + assert.Contains(t, bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags, ml.ExperimentTag{Key: "dev", Value: "lennart"}) + + // Experiment 2 assert.Equal(t, "[dev lennart] experiment2", bundle.Config.Resources.Experiments["experiment2"].Name) + assert.Contains(t, bundle.Config.Resources.Experiments["experiment2"].Experiment.Tags, ml.ExperimentTag{Key: "dev", Value: "lennart"}) + + // Model 1 assert.Equal(t, "[dev lennart] model1", bundle.Config.Resources.Models["model1"].Name) + + // Model serving endpoint 1 assert.Equal(t, "dev_lennart_servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "dev", bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key) - assert.True(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) +} + +func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { + bundle := mockBundle(config.Development) + bundle.Tagging = tags.ForCloud(&sdkconfig.Config{ + Host: "https://dbc-XXXXXXXX-YYYY.cloud.databricks.com/", + }) + + bundle.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" + err := ProcessTargetMode().Apply(context.Background(), bundle) + require.NoError(t, err) + + // Assert that tag normalization took place. + assert.Equal(t, "Hello world__", bundle.Config.Resources.Jobs["job1"].Tags["dev"]) +} + +func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) { + bundle := mockBundle(config.Development) + bundle.Tagging = tags.ForCloud(&sdkconfig.Config{ + Host: "https://adb-xxx.y.azuredatabricks.net/", + }) + + bundle.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" + err := ProcessTargetMode().Apply(context.Background(), bundle) + require.NoError(t, err) + + // Assert that tag normalization took place (Azure allows more characters than AWS). + assert.Equal(t, "Héllö wörld?!", bundle.Config.Resources.Jobs["job1"].Tags["dev"]) +} + +func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) { + bundle := mockBundle(config.Development) + bundle.Tagging = tags.ForCloud(&sdkconfig.Config{ + Host: "https://123.4.gcp.databricks.com/", + }) + + bundle.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" + err := ProcessTargetMode().Apply(context.Background(), bundle) + require.NoError(t, err) + + // Assert that tag normalization took place. + assert.Equal(t, "Hello_world", bundle.Config.Resources.Jobs["job1"].Tags["dev"]) } func TestProcessTargetModeDefault(t *testing.T) { diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 070fc5d2..254b06cf 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -17,6 +17,7 @@ import ( "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/tags" "github.com/databricks/databricks-sdk-go" workspaceConfig "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/service/iam" @@ -66,6 +67,7 @@ func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target st // Apply initialize / validation mutators b.Config.Workspace.CurrentUser = &bundleConfig.User{User: cachedUser} + b.Tagging = tags.ForCloud(w.Config) b.WorkspaceClient() b.Config.Bundle.Terraform = &bundleConfig.Terraform{ ExecPath: "sh", From 7d0f170eee721ff5d8f1d98c4c515d1a77fbbf90 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:47:09 +0000 Subject: [PATCH 124/310] Added `python.DetectInterpreters` and other utils (#805) This PR adds a few utilities related to Python interpreter detection: - `python.DetectInterpreters` to detect all Python versions available in `$PATH` by executing every matched binary name with `--version` flag. - `python.DetectVirtualEnvPath` to detect if there's any child virtual environment in `src` directory - `python.DetectExecutable` to detect if there's python3 installed either by `which python3` command or by calling `python.DetectInterpreters().AtLeast("v3.8")` To be merged after https://github.com/databricks/cli/pull/804, as one of the steps to get https://github.com/databricks/cli/pull/637 in, as previously discussed. --- bundle/artifacts/whl/build.go | 2 +- bundle/artifacts/whl/infer.go | 2 +- libs/python/detect.go | 34 +++ libs/python/detect_unix_test.go | 39 ++++ libs/python/detect_win_test.go | 24 ++ libs/python/interpreters.go | 216 ++++++++++++++++++ libs/python/interpreters_unix_test.go | 95 ++++++++ libs/python/interpreters_win_test.go | 28 +++ libs/python/testdata/no-python3/python | 6 + libs/python/testdata/no-python3/python3.6 | 3 + libs/python/testdata/no-python3/pythonw | 5 + .../testdata/other-binaries-filtered/python | 6 + .../other-binaries-filtered/python3-whatever | 4 + .../other-binaries-filtered/python3.10 | 3 + .../other-binaries-filtered/python3.10.100 | 3 + .../other-binaries-filtered/python3.11 | 1 + .../other-binaries-filtered/python4.8 | 1 + .../testdata/other-binaries-filtered/python5 | 5 + .../testdata/other-binaries-filtered/python6 | 3 + .../testdata/other-binaries-filtered/python7 | 4 + .../testdata/other-binaries-filtered/pythonw | 5 + .../other-binaries-filtered/real-python3.11.4 | 3 + .../testdata/other-binaries-filtered/whatever | 4 + .../some-dir-with-venv/.venv/pyvenv.cfg | 8 + .../testdata/some-dir-with-venv/__main__.py | 2 + .../python/testdata/world-writeable/python8.4 | 3 + {python => libs/python}/utils.go | 2 - {python => libs/python}/utils_test.go | 0 libs/python/venv.go | 35 +++ libs/python/venv_test.go | 33 +++ python/env.go | 101 -------- python/env_test.go | 41 ---- python/runner.go | 149 ------------ python/runner_test.go | 94 -------- .../simple-python-wheel/databricks.yml | 4 - .../simple-python-wheel/dummy/__init__.py | 0 .../simple-python-wheel/dummy/transforms.py | 1 - python/testdata/simple-python-wheel/setup.py | 8 - python/wheel.go | 93 -------- python/wheel_test.go | 40 ---- 40 files changed, 575 insertions(+), 535 deletions(-) create mode 100644 libs/python/detect.go create mode 100644 libs/python/detect_unix_test.go create mode 100644 libs/python/detect_win_test.go create mode 100644 libs/python/interpreters.go create mode 100644 libs/python/interpreters_unix_test.go create mode 100644 libs/python/interpreters_win_test.go create mode 100755 libs/python/testdata/no-python3/python create mode 100755 libs/python/testdata/no-python3/python3.6 create mode 100755 libs/python/testdata/no-python3/pythonw create mode 100755 libs/python/testdata/other-binaries-filtered/python create mode 100755 libs/python/testdata/other-binaries-filtered/python3-whatever create mode 100755 libs/python/testdata/other-binaries-filtered/python3.10 create mode 100755 libs/python/testdata/other-binaries-filtered/python3.10.100 create mode 120000 libs/python/testdata/other-binaries-filtered/python3.11 create mode 120000 libs/python/testdata/other-binaries-filtered/python4.8 create mode 100755 libs/python/testdata/other-binaries-filtered/python5 create mode 100755 libs/python/testdata/other-binaries-filtered/python6 create mode 100755 libs/python/testdata/other-binaries-filtered/python7 create mode 100755 libs/python/testdata/other-binaries-filtered/pythonw create mode 100755 libs/python/testdata/other-binaries-filtered/real-python3.11.4 create mode 100755 libs/python/testdata/other-binaries-filtered/whatever create mode 100644 libs/python/testdata/some-dir-with-venv/.venv/pyvenv.cfg create mode 100644 libs/python/testdata/some-dir-with-venv/__main__.py create mode 100755 libs/python/testdata/world-writeable/python8.4 rename {python => libs/python}/utils.go (95%) rename {python => libs/python}/utils_test.go (100%) create mode 100644 libs/python/venv.go create mode 100644 libs/python/venv_test.go delete mode 100644 python/env.go delete mode 100644 python/env_test.go delete mode 100644 python/runner.go delete mode 100644 python/runner_test.go delete mode 100644 python/testdata/simple-python-wheel/databricks.yml delete mode 100644 python/testdata/simple-python-wheel/dummy/__init__.py delete mode 100644 python/testdata/simple-python-wheel/dummy/transforms.py delete mode 100644 python/testdata/simple-python-wheel/setup.py delete mode 100644 python/wheel.go delete mode 100644 python/wheel_test.go diff --git a/bundle/artifacts/whl/build.go b/bundle/artifacts/whl/build.go index 4565a4c8..6ebc925f 100644 --- a/bundle/artifacts/whl/build.go +++ b/bundle/artifacts/whl/build.go @@ -9,7 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/python" + "github.com/databricks/cli/libs/python" ) type build struct { diff --git a/bundle/artifacts/whl/infer.go b/bundle/artifacts/whl/infer.go index 518d926c..1c0e9857 100644 --- a/bundle/artifacts/whl/infer.go +++ b/bundle/artifacts/whl/infer.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/python" + "github.com/databricks/cli/libs/python" ) type infer struct { diff --git a/libs/python/detect.go b/libs/python/detect.go new file mode 100644 index 00000000..b0c1475c --- /dev/null +++ b/libs/python/detect.go @@ -0,0 +1,34 @@ +package python + +import ( + "context" + "errors" + "os/exec" +) + +func DetectExecutable(ctx context.Context) (string, error) { + // TODO: add a shortcut if .python-version file is detected somewhere in + // the parent directory tree. + // + // See https://github.com/pyenv/pyenv#understanding-python-version-selection + out, err := exec.LookPath("python3") + // most of the OS'es have python3 in $PATH, but for those which don't, + // we perform the latest version lookup + if err != nil && !errors.Is(err, exec.ErrNotFound) { + return "", err + } + if out != "" { + return out, nil + } + // otherwise, detect all interpreters and pick the least that satisfies + // minimal version requirements + all, err := DetectInterpreters(ctx) + if err != nil { + return "", err + } + interpreter, err := all.AtLeast("3.8") + if err != nil { + return "", err + } + return interpreter.Path, nil +} diff --git a/libs/python/detect_unix_test.go b/libs/python/detect_unix_test.go new file mode 100644 index 00000000..a962e1f5 --- /dev/null +++ b/libs/python/detect_unix_test.go @@ -0,0 +1,39 @@ +//go:build unix + +package python + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDetectsViaPathLookup(t *testing.T) { + ctx := context.Background() + py, err := DetectExecutable(ctx) + assert.NoError(t, err) + assert.NotEmpty(t, py) +} + +func TestDetectsViaListing(t *testing.T) { + t.Setenv("PATH", "testdata/other-binaries-filtered") + ctx := context.Background() + py, err := DetectExecutable(ctx) + assert.NoError(t, err) + assert.Equal(t, "testdata/other-binaries-filtered/python3.10", py) +} + +func TestDetectFailsNoInterpreters(t *testing.T) { + t.Setenv("PATH", "testdata") + ctx := context.Background() + _, err := DetectExecutable(ctx) + assert.Equal(t, ErrNoPythonInterpreters, err) +} + +func TestDetectFailsNoMinimalVersion(t *testing.T) { + t.Setenv("PATH", "testdata/no-python3") + ctx := context.Background() + _, err := DetectExecutable(ctx) + assert.EqualError(t, err, "cannot find Python greater or equal to v3.8.0") +} diff --git a/libs/python/detect_win_test.go b/libs/python/detect_win_test.go new file mode 100644 index 00000000..2ef811a4 --- /dev/null +++ b/libs/python/detect_win_test.go @@ -0,0 +1,24 @@ +//go:build windows + +package python + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDetectsViaPathLookup(t *testing.T) { + ctx := context.Background() + py, err := DetectExecutable(ctx) + assert.NoError(t, err) + assert.NotEmpty(t, py) +} + +func TestDetectFailsNoInterpreters(t *testing.T) { + t.Setenv("PATH", "testdata") + ctx := context.Background() + _, err := DetectExecutable(ctx) + assert.ErrorIs(t, err, ErrNoPythonInterpreters) +} diff --git a/libs/python/interpreters.go b/libs/python/interpreters.go new file mode 100644 index 00000000..94f5074d --- /dev/null +++ b/libs/python/interpreters.go @@ -0,0 +1,216 @@ +package python + +import ( + "context" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/process" + "golang.org/x/mod/semver" +) + +var ErrNoPythonInterpreters = errors.New("no python3 interpreters found") + +const officialMswinPython = "(Python Official) https://python.org/downloads/windows" +const microsoftStorePython = "(Microsoft Store) https://apps.microsoft.com/store/search?publisher=Python%20Software%20Foundation" + +const worldWriteable = 0o002 + +type Interpreter struct { + Version string + Path string +} + +func (i Interpreter) String() string { + return fmt.Sprintf("%s (%s)", i.Version, i.Path) +} + +type allInterpreters []Interpreter + +func (a allInterpreters) Latest() Interpreter { + return a[len(a)-1] +} + +func (a allInterpreters) AtLeast(minimalVersion string) (*Interpreter, error) { + canonicalMinimalVersion := semver.Canonical("v" + strings.TrimPrefix(minimalVersion, "v")) + if canonicalMinimalVersion == "" { + return nil, fmt.Errorf("invalid SemVer: %s", minimalVersion) + } + for _, interpreter := range a { + cmp := semver.Compare(interpreter.Version, canonicalMinimalVersion) + if cmp < 0 { + continue + } + return &interpreter, nil + } + return nil, fmt.Errorf("cannot find Python greater or equal to %s", canonicalMinimalVersion) +} + +func DetectInterpreters(ctx context.Context) (allInterpreters, error) { + found := allInterpreters{} + seen := map[string]bool{} + executables, err := pythonicExecutablesFromPathEnvironment(ctx) + if err != nil { + return nil, err + } + log.Debugf(ctx, "found %d potential alternative Python versions in $PATH", len(executables)) + for _, resolved := range executables { + if seen[resolved] { + continue + } + seen[resolved] = true + // probe the binary version by executing it, like `python --version` + // and parsing the output. + // + // Keep in mind, that mswin installations get python.exe and pythonw.exe, + // which are slightly different: see https://stackoverflow.com/a/30313091 + out, err := process.Background(ctx, []string{resolved, "--version"}) + var processErr *process.ProcessError + if errors.As(err, &processErr) { + log.Debugf(ctx, "failed to check version for %s: %s", resolved, processErr.Err) + continue + } + if err != nil { + log.Debugf(ctx, "failed to check version for %s: %s", resolved, err) + continue + } + version := validPythonVersion(ctx, resolved, out) + if version == "" { + continue + } + found = append(found, Interpreter{ + Version: version, + Path: resolved, + }) + } + if runtime.GOOS == "windows" && len(found) == 0 { + return nil, fmt.Errorf("%w. Install them from %s or %s and restart the shell", + ErrNoPythonInterpreters, officialMswinPython, microsoftStorePython) + } + if len(found) == 0 { + return nil, ErrNoPythonInterpreters + } + sort.Slice(found, func(i, j int) bool { + a := found[i].Version + b := found[j].Version + cmp := semver.Compare(a, b) + if cmp != 0 { + return cmp < 0 + } + return a < b + }) + return found, nil +} + +func pythonicExecutablesFromPathEnvironment(ctx context.Context) (out []string, err error) { + paths := strings.Split(os.Getenv("PATH"), string(os.PathListSeparator)) + for _, prefix := range paths { + info, err := os.Stat(prefix) + if errors.Is(err, fs.ErrNotExist) { + // some directories in $PATH may not exist + continue + } + if errors.Is(err, fs.ErrPermission) { + // some directories we cannot list + continue + } + if err != nil { + return nil, fmt.Errorf("stat %s: %w", prefix, err) + } + if !info.IsDir() { + continue + } + perm := info.Mode().Perm() + if runtime.GOOS != "windows" && perm&worldWriteable != 0 { + // we try not to run any python binary that sits in a writable folder by all users. + // this is mainly to avoid breaking the security model on a multi-user system. + // If the PATH is pointing somewhere untrusted it is the user fault, but we can + // help here. + // + // See https://github.com/databricks/cli/pull/805#issuecomment-1735403952 + log.Debugf(ctx, "%s is world-writeable (%s), skipping for security reasons", prefix, perm) + continue + } + entries, err := os.ReadDir(prefix) + if errors.Is(err, fs.ErrPermission) { + // some directories we cannot list + continue + } + if err != nil { + return nil, fmt.Errorf("listing %s: %w", prefix, err) + } + for _, v := range entries { + if v.IsDir() { + continue + } + if strings.Contains(v.Name(), "-") { + // skip python3-config, python3.10-config, etc + continue + } + // If Python3 is installed on Windows through GUI installer app that was + // downloaded from https://python.org/downloads/windows, it may appear + // in $PATH as `python`, even though it means Python 2.7 in all other + // operating systems (macOS, Linux). + // + // See https://github.com/databrickslabs/ucx/issues/281 + if !strings.HasPrefix(v.Name(), "python") { + continue + } + bin := filepath.Join(prefix, v.Name()) + resolved, err := filepath.EvalSymlinks(bin) + if err != nil { + log.Debugf(ctx, "cannot resolve symlink for %s: %s", bin, resolved) + continue + } + out = append(out, resolved) + } + } + return out, nil +} + +func validPythonVersion(ctx context.Context, resolved, out string) string { + out = strings.TrimSpace(out) + log.Debugf(ctx, "%s --version: %s", resolved, out) + + words := strings.Split(out, " ") + // The Python distribution from the Windows Store is available in $PATH as `python.exe` + // and `python3.exe`, even though it symlinks to a real file packaged with some versions of Windows: + // /c/Program Files/WindowsApps/Microsoft.DesktopAppInstaller_.../AppInstallerPythonRedirector.exe. + // Executing the `python` command from this distribution opens the Windows Store, allowing users to + // download and install Python. Once installed, it replaces the `python.exe` and `python3.exe`` stub + // with the genuine Python executable. Additionally, once user installs from the main installer at + // https://python.org/downloads/windows, it does not replace this stub. + // + // However, a drawback is that if this initial stub is run with any command line arguments, it quietly + // fails to execute. According to https://github.com/databrickslabs/ucx/issues/281, it can be + // detected by seeing just the "Python" output without any version info from the `python --version` + // command execution. + // + // See https://github.com/pypa/packaging-problems/issues/379 + // See https://bugs.python.org/issue41327 + if len(words) < 2 { + log.Debugf(ctx, "%s --version: stub from Windows Store", resolved) + return "" + } + + if words[0] != "Python" { + log.Debugf(ctx, "%s --version: not a Python", resolved) + return "" + } + + lastWord := words[len(words)-1] + version := semver.Canonical("v" + lastWord) + if version == "" { + log.Debugf(ctx, "%s --version: invalid SemVer: %s", resolved, lastWord) + return "" + } + + return version +} diff --git a/libs/python/interpreters_unix_test.go b/libs/python/interpreters_unix_test.go new file mode 100644 index 00000000..e2b0a5a1 --- /dev/null +++ b/libs/python/interpreters_unix_test.go @@ -0,0 +1,95 @@ +//go:build unix + +package python + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAtLeastOnePythonInstalled(t *testing.T) { + ctx := context.Background() + all, err := DetectInterpreters(ctx) + assert.NoError(t, err) + a := all.Latest() + t.Logf("latest is: %s", a) + assert.True(t, len(all) > 0) +} + +func TestNoInterpretersFound(t *testing.T) { + t.Setenv("PATH", t.TempDir()) + + ctx := context.Background() + all, err := DetectInterpreters(ctx) + assert.Nil(t, all) + assert.Equal(t, ErrNoPythonInterpreters, err) +} + +func TestFilteringInterpreters(t *testing.T) { + rogueBin := filepath.Join(t.TempDir(), "rogue-bin") + err := os.Mkdir(rogueBin, 0o777) + assert.NoError(t, err) + os.Chmod(rogueBin, 0o777) + + raw, err := os.ReadFile("testdata/world-writeable/python8.4") + assert.NoError(t, err) + + injectedBinary := filepath.Join(rogueBin, "python8.4") + err = os.WriteFile(injectedBinary, raw, 00777) + assert.NoError(t, err) + + t.Setenv("PATH", "testdata/other-binaries-filtered:"+rogueBin) + + roguePath, err := exec.LookPath("python8.4") + assert.NoError(t, err) + assert.Equal(t, injectedBinary, roguePath) + + ctx := context.Background() + all, err := DetectInterpreters(ctx) + assert.NoError(t, err) + assert.Len(t, all, 3) + assert.Equal(t, "v2.7.18", all[0].Version) + assert.Equal(t, "v3.10.5", all[1].Version) + assert.Equal(t, "testdata/other-binaries-filtered/python3.10", all[1].Path) + assert.Equal(t, "v3.11.4", all[2].Version) + assert.Equal(t, "testdata/other-binaries-filtered/real-python3.11.4", all[2].Path) +} + +func TestInterpretersAtLeastInvalidSemver(t *testing.T) { + t.Setenv("PATH", "testdata/other-binaries-filtered") + + ctx := context.Background() + all, err := DetectInterpreters(ctx) + assert.NoError(t, err) + + _, err = all.AtLeast("v1.2.3.4") + assert.EqualError(t, err, "invalid SemVer: v1.2.3.4") +} + +func TestInterpretersAtLeast(t *testing.T) { + t.Setenv("PATH", "testdata/other-binaries-filtered") + + ctx := context.Background() + all, err := DetectInterpreters(ctx) + assert.NoError(t, err) + + interpreter, err := all.AtLeast("3.10") + assert.NoError(t, err) + assert.Equal(t, "testdata/other-binaries-filtered/python3.10", interpreter.Path) +} + +func TestInterpretersAtLeastNotSatisfied(t *testing.T) { + t.Setenv("PATH", "testdata/other-binaries-filtered") + + ctx := context.Background() + all, err := DetectInterpreters(ctx) + assert.NoError(t, err) + + _, err = all.AtLeast("4.0.1") + assert.EqualError(t, err, "cannot find Python greater or equal to v4.0.1") +} diff --git a/libs/python/interpreters_win_test.go b/libs/python/interpreters_win_test.go new file mode 100644 index 00000000..f9998152 --- /dev/null +++ b/libs/python/interpreters_win_test.go @@ -0,0 +1,28 @@ +//go:build windows + +package python + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAtLeastOnePythonInstalled(t *testing.T) { + ctx := context.Background() + all, err := DetectInterpreters(ctx) + assert.NoError(t, err) + a := all.Latest() + t.Logf("latest is: %s", a) + assert.True(t, len(all) > 0) +} + +func TestNoInterpretersFound(t *testing.T) { + t.Setenv("PATH", t.TempDir()) + + ctx := context.Background() + _, err := DetectInterpreters(ctx) + assert.ErrorIs(t, err, ErrNoPythonInterpreters) + assert.ErrorContains(t, err, "python.org/downloads") +} diff --git a/libs/python/testdata/no-python3/python b/libs/python/testdata/no-python3/python new file mode 100755 index 00000000..8a4d6f7f --- /dev/null +++ b/libs/python/testdata/no-python3/python @@ -0,0 +1,6 @@ +#!/bin/sh + +# this is an emulation of Windows App Store stub +>&2 echo "Python was not found; run without arguments to install from the Microsoft Store, ..." + +echo "Python" diff --git a/libs/python/testdata/no-python3/python3.6 b/libs/python/testdata/no-python3/python3.6 new file mode 100755 index 00000000..1a1bfe6e --- /dev/null +++ b/libs/python/testdata/no-python3/python3.6 @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Python 3.6.4" diff --git a/libs/python/testdata/no-python3/pythonw b/libs/python/testdata/no-python3/pythonw new file mode 100755 index 00000000..a0cd07d9 --- /dev/null +++ b/libs/python/testdata/no-python3/pythonw @@ -0,0 +1,5 @@ +#!/bin/sh + +# pythonw is a gui app for launching gui/no-ui-at-all scripts, +# when no console window is opened on Windows +echo "Python 2.7.18" diff --git a/libs/python/testdata/other-binaries-filtered/python b/libs/python/testdata/other-binaries-filtered/python new file mode 100755 index 00000000..8a4d6f7f --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/python @@ -0,0 +1,6 @@ +#!/bin/sh + +# this is an emulation of Windows App Store stub +>&2 echo "Python was not found; run without arguments to install from the Microsoft Store, ..." + +echo "Python" diff --git a/libs/python/testdata/other-binaries-filtered/python3-whatever b/libs/python/testdata/other-binaries-filtered/python3-whatever new file mode 100755 index 00000000..a0ed54ac --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/python3-whatever @@ -0,0 +1,4 @@ +#!/bin/sh + +echo "Must not get executed!" +exit 1 diff --git a/libs/python/testdata/other-binaries-filtered/python3.10 b/libs/python/testdata/other-binaries-filtered/python3.10 new file mode 100755 index 00000000..060c051b --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/python3.10 @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Python 3.10.5" diff --git a/libs/python/testdata/other-binaries-filtered/python3.10.100 b/libs/python/testdata/other-binaries-filtered/python3.10.100 new file mode 100755 index 00000000..c47d0da1 --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/python3.10.100 @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Python 3.a.b" diff --git a/libs/python/testdata/other-binaries-filtered/python3.11 b/libs/python/testdata/other-binaries-filtered/python3.11 new file mode 120000 index 00000000..311e1513 --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/python3.11 @@ -0,0 +1 @@ +real-python3.11.4 \ No newline at end of file diff --git a/libs/python/testdata/other-binaries-filtered/python4.8 b/libs/python/testdata/other-binaries-filtered/python4.8 new file mode 120000 index 00000000..86f59439 --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/python4.8 @@ -0,0 +1 @@ +python3-deleted \ No newline at end of file diff --git a/libs/python/testdata/other-binaries-filtered/python5 b/libs/python/testdata/other-binaries-filtered/python5 new file mode 100755 index 00000000..eb48a407 --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/python5 @@ -0,0 +1,5 @@ +#!/bin/sh + +# this is an emulation of Windows App Store stub + +echo "Python" diff --git a/libs/python/testdata/other-binaries-filtered/python6 b/libs/python/testdata/other-binaries-filtered/python6 new file mode 100755 index 00000000..4a6b64b2 --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/python6 @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Snake v3.12.4" diff --git a/libs/python/testdata/other-binaries-filtered/python7 b/libs/python/testdata/other-binaries-filtered/python7 new file mode 100755 index 00000000..242da116 --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/python7 @@ -0,0 +1,4 @@ +#!/bin/sh + +>&2 echo "This version of Python does not exist" +exit 1 diff --git a/libs/python/testdata/other-binaries-filtered/pythonw b/libs/python/testdata/other-binaries-filtered/pythonw new file mode 100755 index 00000000..a0cd07d9 --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/pythonw @@ -0,0 +1,5 @@ +#!/bin/sh + +# pythonw is a gui app for launching gui/no-ui-at-all scripts, +# when no console window is opened on Windows +echo "Python 2.7.18" diff --git a/libs/python/testdata/other-binaries-filtered/real-python3.11.4 b/libs/python/testdata/other-binaries-filtered/real-python3.11.4 new file mode 100755 index 00000000..02cfa04c --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/real-python3.11.4 @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Python 3.11.4" diff --git a/libs/python/testdata/other-binaries-filtered/whatever b/libs/python/testdata/other-binaries-filtered/whatever new file mode 100755 index 00000000..a0ed54ac --- /dev/null +++ b/libs/python/testdata/other-binaries-filtered/whatever @@ -0,0 +1,4 @@ +#!/bin/sh + +echo "Must not get executed!" +exit 1 diff --git a/libs/python/testdata/some-dir-with-venv/.venv/pyvenv.cfg b/libs/python/testdata/some-dir-with-venv/.venv/pyvenv.cfg new file mode 100644 index 00000000..e2561203 --- /dev/null +++ b/libs/python/testdata/some-dir-with-venv/.venv/pyvenv.cfg @@ -0,0 +1,8 @@ +home = /opt/homebrew/opt/python@3.10/bin +implementation = CPython +version_info = 3.10.12.final.0 +virtualenv = 20.24.2 +include-system-site-packages = false +base-prefix = /opt/homebrew/opt/python@3.10/Frameworks/Python.framework/Versions/3.10 +base-exec-prefix = /opt/homebrew/opt/python@3.10/Frameworks/Python.framework/Versions/3.10 +base-executable = /opt/homebrew/opt/python@3.10/bin/python3.10 diff --git a/libs/python/testdata/some-dir-with-venv/__main__.py b/libs/python/testdata/some-dir-with-venv/__main__.py new file mode 100644 index 00000000..cace6aef --- /dev/null +++ b/libs/python/testdata/some-dir-with-venv/__main__.py @@ -0,0 +1,2 @@ +if __name__ == "__main__": + print(1) diff --git a/libs/python/testdata/world-writeable/python8.4 b/libs/python/testdata/world-writeable/python8.4 new file mode 100755 index 00000000..56ddc86a --- /dev/null +++ b/libs/python/testdata/world-writeable/python8.4 @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Python 8.4.10" diff --git a/python/utils.go b/libs/python/utils.go similarity index 95% rename from python/utils.go rename to libs/python/utils.go index 47d5462d..282775ff 100644 --- a/python/utils.go +++ b/libs/python/utils.go @@ -1,7 +1,5 @@ package python -// TODO: move this package into the libs - import ( "context" "os" diff --git a/python/utils_test.go b/libs/python/utils_test.go similarity index 100% rename from python/utils_test.go rename to libs/python/utils_test.go diff --git a/libs/python/venv.go b/libs/python/venv.go new file mode 100644 index 00000000..2af1bcdd --- /dev/null +++ b/libs/python/venv.go @@ -0,0 +1,35 @@ +package python + +import ( + "errors" + "os" + "path/filepath" +) + +var ErrNoVirtualEnvDetected = errors.New("no Python virtual environment detected") + +// DetectVirtualEnv scans direct subfolders in path to get a valid +// Virtual Environment installation, that is marked by pyvenv.cfg file. +// +// See: https://packaging.python.org/en/latest/tutorials/packaging-projects/ +func DetectVirtualEnvPath(path string) (string, error) { + files, err := os.ReadDir(path) + if err != nil { + return "", err + } + for _, v := range files { + if !v.IsDir() { + continue + } + candidate := filepath.Join(path, v.Name()) + _, err = os.Stat(filepath.Join(candidate, "pyvenv.cfg")) + if errors.Is(err, os.ErrNotExist) { + continue + } + if err != nil { + return "", err + } + return candidate, nil + } + return "", ErrNoVirtualEnvDetected +} diff --git a/libs/python/venv_test.go b/libs/python/venv_test.go new file mode 100644 index 00000000..2b3d94c3 --- /dev/null +++ b/libs/python/venv_test.go @@ -0,0 +1,33 @@ +package python + +import ( + "runtime" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDetectVirtualEnvPath_NoVirtualEnvDetected(t *testing.T) { + _, err := DetectVirtualEnvPath("testdata") + assert.Equal(t, ErrNoVirtualEnvDetected, err) +} + +func TestDetectVirtualEnvPath_invalid(t *testing.T) { + _, err := DetectVirtualEnvPath("testdata/__invalid__") + assert.Error(t, err) +} + +func TestDetectVirtualEnvPath_wrongDir(t *testing.T) { + _, err := DetectVirtualEnvPath("testdata/other-binaries-filtered") + assert.Error(t, err) +} + +func TestDetectVirtualEnvPath_happy(t *testing.T) { + venv, err := DetectVirtualEnvPath("testdata/some-dir-with-venv") + assert.NoError(t, err) + found := "testdata/some-dir-with-venv/.venv" + if runtime.GOOS == "windows" { + found = "testdata\\some-dir-with-venv\\.venv" + } + assert.Equal(t, found, venv) +} diff --git a/python/env.go b/python/env.go deleted file mode 100644 index 8a9e4330..00000000 --- a/python/env.go +++ /dev/null @@ -1,101 +0,0 @@ -package python - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/databricks/cli/libs/log" - "golang.org/x/mod/semver" -) - -type Dependency struct { - Name string - Operator string - Version string - Location string // @ file:///usr/loca -} - -func (d Dependency) CanonicalVersion() string { - return semver.Canonical(fmt.Sprintf("v%s", d.Version)) -} - -type Environment []Dependency - -func (e Environment) Has(name string) bool { - for _, d := range e { - if d.Name == name { - return true - } - } - return false -} - -func Freeze(ctx context.Context) (Environment, error) { - out, err := Py(ctx, "-m", "pip", "freeze") - if err != nil { - return nil, err - } - env := Environment{} - deps := strings.Split(out, "\n") - for _, raw := range deps { - env = append(env, DependencyFromSpec(raw)) - } - return env, nil -} - -func DependencyFromSpec(raw string) (d Dependency) { - // TODO: write a normal parser for this - rawSplit := strings.Split(raw, "==") - if len(rawSplit) != 2 { - log.Debugf(context.Background(), "Skipping invalid dep: %s", raw) - return - } - d.Name = rawSplit[0] - d.Operator = "==" - d.Version = rawSplit[1] - return -} - -// Distribution holds part of PEP426 metadata -// See https://peps.python.org/pep-0426/ -type Distribution struct { - Name string `json:"name"` - Version string `json:"version"` - Packages []string `json:"packages"` - InstallRequires []string `json:"install_requires,omitempty"` -} - -// InstallEnvironment returns only direct install dependencies -func (d Distribution) InstallEnvironment() (env Environment) { - for _, raw := range d.InstallRequires { - env = append(env, DependencyFromSpec(raw)) - } - return -} - -// NormalizedName returns PEP503-compatible Python Package Index project name. -// As per PEP 426 the only valid characters in a name are the ASCII alphabet, -// ASCII numbers, ., -, and _. The name should be lowercased with all runs of -// the characters ., -, or _ replaced with a single - character. -func (d Distribution) NormalizedName() string { - // TODO: implement https://peps.python.org/pep-0503/#normalized-names - return d.Name -} - -// ReadDistribution "parses" metadata from setup.py file. -func ReadDistribution(ctx context.Context) (d Distribution, err error) { - out, err := PyInline(ctx, ` - import setuptools, json, sys - setup_config = {} # actual args for setuptools.dist.Distribution - def capture(**kwargs): global setup_config; setup_config = kwargs - setuptools.setup = capture - import setup - json.dump(setup_config, sys.stdout)`) - if err != nil { - return - } - err = json.Unmarshal([]byte(out), &d) - return -} diff --git a/python/env_test.go b/python/env_test.go deleted file mode 100644 index 487e15b1..00000000 --- a/python/env_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package python - -import ( - "context" - "runtime" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFreeze(t *testing.T) { - t.Skip("Skipping test until fixing Python installation on GitHub Windows environment") - - // remove this once equivalent tests for windows have been set up - // or this test has been fixed for windows - // date: 28 Nov 2022 - if runtime.GOOS == "windows" { - t.Skip("skipping temperorilty to make windows unit tests green") - } - - // remove this once equivalent tests for macos have been set up - // or this test has been fixed for mac os - // date: 28 Nov 2022 - if runtime.GOOS == "darwin" { - t.Skip("skipping temperorilty to make macos unit tests green") - } - - env, err := Freeze(context.Background()) - assert.NoError(t, err) - assert.Greater(t, len(env), 1) - assert.True(t, env.Has("urllib3")) -} - -func TestPyInlineX(t *testing.T) { - defer chdirAndBack("testdata/simple-python-wheel")() - dist, err := ReadDistribution(context.Background()) - assert.NoError(t, err) - assert.Equal(t, "dummy", dist.Name) - assert.Equal(t, "dummy", dist.Packages[0]) - assert.True(t, dist.InstallEnvironment().Has("requests")) -} diff --git a/python/runner.go b/python/runner.go deleted file mode 100644 index ebf24717..00000000 --- a/python/runner.go +++ /dev/null @@ -1,149 +0,0 @@ -package python - -import ( - "context" - "errors" - "fmt" - "os" - "os/exec" - "runtime" - "strings" - - "github.com/databricks/cli/libs/process" -) - -func PyInline(ctx context.Context, inlinePy string) (string, error) { - return Py(ctx, "-c", TrimLeadingWhitespace(inlinePy)) -} - -func Py(ctx context.Context, script string, args ...string) (string, error) { - py, err := DetectExecutable(ctx) - if err != nil { - return "", err - } - out, err := execAndPassErr(ctx, py, append([]string{script}, args...)...) - if err != nil { - // current error message chain is longer: - // failed to call {pyExec} __non_existing__.py: {pyExec}: can't open - // ... file '{pwd}/__non_existing__.py': [Errno 2] No such file or directory" - // probably we'll need to make it shorter: - // can't open file '$PWD/__non_existing__.py': [Errno 2] No such file or directory - return "", err - } - return trimmedS(out), nil -} - -func createVirtualEnv(ctx context.Context) error { - _, err := Py(context.Background(), "-m", "venv", ".venv") - return err -} - -// python3 -m build -w -// https://packaging.python.org/en/latest/tutorials/packaging-projects/ -func detectVirtualEnv() (string, error) { - wd, err := os.Getwd() - if err != nil { - return "", err - } - wdf, err := os.Open(wd) - if err != nil { - return "", err - } - files, err := wdf.ReadDir(0) - if err != nil { - return "", err - } - for _, v := range files { - if !v.IsDir() { - continue - } - candidate := fmt.Sprintf("%s/%s", wd, v.Name()) - _, err = os.Stat(fmt.Sprintf("%s/pyvenv.cfg", candidate)) - if errors.Is(err, os.ErrNotExist) { - continue - } - if err != nil { - return "", err - } - return candidate, nil - } - return "", nil -} - -var pyExec string - -func DetectExecutable(ctx context.Context) (string, error) { - if pyExec != "" { - return pyExec, nil - } - detector := "which" - if runtime.GOOS == "windows" { - detector = "where.exe" - } - out, err := execAndPassErr(ctx, detector, "python3") - if err != nil { - return "", err - } - pyExec = getFirstMatch(string(out)) - return pyExec, nil -} - -func execAndPassErr(ctx context.Context, name string, args ...string) ([]byte, error) { - // TODO: move out to a separate package, once we have Maven integration - out, err := process.Background(ctx, append([]string{name}, args...)) - return []byte(out), nicerErr(err) -} - -func getFirstMatch(out string) string { - res := strings.Split(out, "\n") - return strings.Trim(res[0], "\n\r") -} - -func nicerErr(err error) error { - if err == nil { - return nil - } - if ee, ok := err.(*exec.ExitError); ok { - errMsg := trimmedS(ee.Stderr) - if errMsg == "" { - errMsg = err.Error() - } - return errors.New(errMsg) - } - return err -} - -func trimmedS(bytes []byte) string { - return strings.Trim(string(bytes), "\n\r") -} - -// TrimLeadingWhitespace removes leading whitespace -// function copied from Databricks Terraform provider -func TrimLeadingWhitespace(commandStr string) (newCommand string) { - lines := strings.Split(strings.ReplaceAll(commandStr, "\t", " "), "\n") - leadingWhitespace := 1<<31 - 1 - for _, line := range lines { - for pos, char := range line { - if char == ' ' || char == '\t' { - continue - } - // first non-whitespace character - if pos < leadingWhitespace { - leadingWhitespace = pos - } - // is not needed further - break - } - } - for i := 0; i < len(lines); i++ { - if lines[i] == "" || strings.Trim(lines[i], " \t") == "" { - continue - } - if len(lines[i]) < leadingWhitespace { - newCommand += lines[i] + "\n" // or not.. - } else { - newCommand += lines[i][leadingWhitespace:] + "\n" - } - } - return -} diff --git a/python/runner_test.go b/python/runner_test.go deleted file mode 100644 index fc8f2508..00000000 --- a/python/runner_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package python - -import ( - "context" - "fmt" - "os" - "runtime" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestExecAndPassError(t *testing.T) { - - // remove this once equivalent tests for windows have been set up - // or this test has been fixed for windows - // date: 28 Nov 2022 - if runtime.GOOS == "windows" { - t.Skip("skipping temperorilty to make windows unit tests green") - } - - _, err := execAndPassErr(context.Background(), "which", "__non_existing__") - assert.EqualError(t, err, "which __non_existing__: exit status 1") -} - -func TestDetectPython(t *testing.T) { - pyExec = "" - py, err := DetectExecutable(context.Background()) - assert.NoError(t, err) - assert.Contains(t, py, "python3") -} - -func TestDetectPythonCache(t *testing.T) { - pyExec = "abc" - py, err := DetectExecutable(context.Background()) - assert.NoError(t, err) - assert.Equal(t, "abc", py) - pyExec = "" -} - -func TestDetectVirtualEnvFalse(t *testing.T) { - venvDir, err := detectVirtualEnv() - assert.NoError(t, err) - assert.Equal(t, "", venvDir) -} - -func TestGetFirstMatch(t *testing.T) { - matches := "C:\\hostedtoolcache\\windows\\Python\\3.9.13\\x64\\python3.exe\r\nC:\\ProgramData\\Chocolatey\\bin\\python3.exe" - assert.Equal(t, getFirstMatch(matches), "C:\\hostedtoolcache\\windows\\Python\\3.9.13\\x64\\python3.exe") -} - -func TestMakeDetectableVenv(t *testing.T) { - var temp string - defer testTempdir(t, &temp)() - - // TODO: rewrite with t.TempDir() and arguments - err := createVirtualEnv(context.Background()) - assert.NoError(t, err) - - venv, err := detectVirtualEnv() - assert.NoError(t, err) - assert.Equal(t, fmt.Sprintf("%s/.venv", temp), venv) -} - -func testTempdir(t *testing.T, dir *string) func() { - wd, _ := os.Getwd() - temp, err := os.MkdirTemp(os.TempDir(), "brickstest") - assert.NoError(t, err) - os.Chdir(temp) - wd2, _ := os.Getwd() - *dir = wd2 - return func() { - os.Chdir(wd) - os.RemoveAll(temp) - } -} - -func TestPyError(t *testing.T) { - _, err := Py(context.Background(), "__non_existing__.py") - assert.Contains(t, err.Error(), "exit status 2") -} - -func TestPyInline(t *testing.T) { - hello, err := PyInline(context.Background(), "print('Hello, world!')") - assert.NoError(t, err) - assert.Equal(t, "Hello, world!", hello) -} - -func TestPyInlineStderr(t *testing.T) { - DetectExecutable(context.Background()) - inline := "import sys; sys.stderr.write('___msg___'); sys.exit(1)" - _, err := PyInline(context.Background(), inline) - assert.ErrorContains(t, err, "___msg___") -} diff --git a/python/testdata/simple-python-wheel/databricks.yml b/python/testdata/simple-python-wheel/databricks.yml deleted file mode 100644 index 3b8eb81f..00000000 --- a/python/testdata/simple-python-wheel/databricks.yml +++ /dev/null @@ -1,4 +0,0 @@ -name: dev -profile: demo -dev_cluster: - cluster_name: Shared Autoscaling \ No newline at end of file diff --git a/python/testdata/simple-python-wheel/dummy/__init__.py b/python/testdata/simple-python-wheel/dummy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python/testdata/simple-python-wheel/dummy/transforms.py b/python/testdata/simple-python-wheel/dummy/transforms.py deleted file mode 100644 index d61605cb..00000000 --- a/python/testdata/simple-python-wheel/dummy/transforms.py +++ /dev/null @@ -1 +0,0 @@ -def something(): return True \ No newline at end of file diff --git a/python/testdata/simple-python-wheel/setup.py b/python/testdata/simple-python-wheel/setup.py deleted file mode 100644 index 53d795bc..00000000 --- a/python/testdata/simple-python-wheel/setup.py +++ /dev/null @@ -1,8 +0,0 @@ -from setuptools import setup, find_packages - -setup( - name='dummy', - version='0.0.1', - packages=find_packages(exclude=['tests', 'tests.*']), - install_requires=['requests==2.31.1'] -) diff --git a/python/wheel.go b/python/wheel.go deleted file mode 100644 index 39c3d4cb..00000000 --- a/python/wheel.go +++ /dev/null @@ -1,93 +0,0 @@ -package python - -import ( - "context" - "fmt" - "io" - "os" - "path" - - "github.com/databricks/cli/libs/log" - "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/service/files" -) - -func BuildWheel(ctx context.Context, dir string) (string, error) { - defer chdirAndBack(dir)() - // remove previous dist leak - os.RemoveAll("dist") - // remove all other irrelevant traces - CleanupWheelFolder(".") - // call simple wheel builder. we may need to pip install wheel as well - out, err := Py(ctx, "setup.py", "bdist_wheel") - if err != nil { - return "", err - } - log.Debugf(ctx, "Built wheel: %s", out) - - // and cleanup afterwards - CleanupWheelFolder(".") - - wheels := FindFilesWithSuffixInPath("dist", ".whl") - if len(wheels) == 0 { - return "", fmt.Errorf("cannot find built wheel in %s", dir) - } - if len(wheels) != 1 { - return "", fmt.Errorf("more than 1 wheel file found in %s", dir) - } - return path.Join(dir, wheels[0]), nil -} - -const DBFSWheelLocation = "dbfs:/FileStore/wheels/simple" - -// TODO: research deeper if we make new data resource for terraform, like `databricks_latest_wheel` (preferred), -// or do we bypass the environment variable into terraform deployer. And make a decision. -// -// Whatever this method gets refactored to is intended to be used for two purposes: -// - uploading project's wheel archives: one per project or one per project/developer, depending on isolation -// - synchronising enterprise artifactories, jfrogs, azdo feeds, so that we fix the gap of private code artifact -// repository integration. -func UploadWheelToDBFSWithPEP503(ctx context.Context, dir string) (string, error) { - wheel, err := BuildWheel(ctx, dir) - if err != nil { - return "", err - } - defer chdirAndBack(dir)() - dist, err := ReadDistribution(ctx) - if err != nil { - return "", err - } - // TODO: figure out wheel naming criteria for Soft project isolation to allow multiple - // people workin on the same project to upload wheels and let them be deployed as independent jobs. - // we should also consider multiple PEP503 index stacking: per enterprise, per project, per developer. - // PEP503 indexes can be rolled out to clusters via checksummed global init script, that creates - // a driver/worker `/etc/pip.conf` with FUSE-mounted file:///dbfs/FileStore/wheels/simple/.. - // extra index URLs. See more pointers at https://stackoverflow.com/q/30889494/277035 - dbfsLoc := fmt.Sprintf("%s/%s/%s", DBFSWheelLocation, dist.NormalizedName(), path.Base(wheel)) - - wsc, err := databricks.NewWorkspaceClient(&databricks.Config{}) - if err != nil { - return "", err - } - wf, err := os.Open(wheel) - if err != nil { - return "", err - } - defer wf.Close() - h, err := wsc.Dbfs.Open(ctx, dbfsLoc, files.FileModeOverwrite|files.FileModeWrite) - if err != nil { - return "", err - } - _, err = io.Copy(h, wf) - // TODO: maintain PEP503 compliance and update meta-files: - // ${DBFSWheelLocation}/index.html and ${DBFSWheelLocation}/${NormalizedName}/index.html - return dbfsLoc, err -} - -func chdirAndBack(dir string) func() { - wd, _ := os.Getwd() - os.Chdir(dir) - return func() { - os.Chdir(wd) - } -} diff --git a/python/wheel_test.go b/python/wheel_test.go deleted file mode 100644 index 5524dfb8..00000000 --- a/python/wheel_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package python - -import ( - "context" - "os" - "runtime" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestWheel(t *testing.T) { - - // remove this once equivalent tests for windows have been set up - // or this test has been fixed for windows - // date: 28 Nov 2022 - if runtime.GOOS == "windows" { - t.Skip("skipping temperorilty to make windows unit tests green") - } - - // remove this once equivalent tests for macos have been set up - // or this test has been fixed for mac os - // date: 28 Nov 2022 - if runtime.GOOS == "darwin" { - t.Skip("skipping temperorilty to make macos unit tests green") - } - - wheel, err := BuildWheel(context.Background(), "testdata/simple-python-wheel") - assert.NoError(t, err) - assert.Equal(t, "testdata/simple-python-wheel/dist/dummy-0.0.1-py3-none-any.whl", wheel) - - noFile(t, "testdata/simple-python-wheel/dummy.egg-info") - noFile(t, "testdata/simple-python-wheel/__pycache__") - noFile(t, "testdata/simple-python-wheel/build") -} - -func noFile(t *testing.T, name string) { - _, err := os.Stat(name) - assert.Error(t, err, "file %s should exist", name) -} From 452565cbd30568e33ab50e319bab410b6af3c6aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 12:48:40 +0200 Subject: [PATCH 125/310] Bump github.com/google/uuid from 1.3.0 to 1.3.1 (#825) Bumps [github.com/google/uuid](https://github.com/google/uuid) from 1.3.0 to 1.3.1.
Release notes

Sourced from github.com/google/uuid's releases.

v1.3.1

1.3.1 (2023-08-18)

Bug Fixes

  • Use .EqualFold() to parse urn prefixed UUIDs (#118) (574e687)
Changelog

Sourced from github.com/google/uuid's changelog.

1.3.1 (2023-08-18)

Bug Fixes

  • Use .EqualFold() to parse urn prefixed UUIDs (#118) (574e687)

Changelog

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/google/uuid&package-manager=go_modules&previous-version=1.3.0&new-version=1.3.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2894f2a9..9f999035 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/databricks/databricks-sdk-go v0.19.3-0.20230914130855-dacb7f4fc878 // Apache 2.0 github.com/fatih/color v1.15.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE - github.com/google/uuid v1.3.0 // BSD-3-Clause + github.com/google/uuid v1.3.1 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 github.com/hashicorp/hc-install v0.6.0 // MPL 2.0 github.com/hashicorp/terraform-exec v0.19.0 // MPL 2.0 diff --git a/go.sum b/go.sum index 77d7eebe..99cfefe7 100644 --- a/go.sum +++ b/go.sum @@ -97,8 +97,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg= github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= From 79e271f8592b5951578dfd185ddda82c3de6be28 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 3 Oct 2023 13:18:55 +0200 Subject: [PATCH 126/310] Added test to submit and run various Python tasks on multiple DBR versions (#806) ## Changes These tests allow us to get information for execution context (PYTHONPATH, CWD) for various Python tasks and different cluster setups. Note: this test won't be executed automatically as part of nightly builds since it requires RUN_PYTHON_TASKS_TEST env to be executed. ## Tests Integration test run successfully. --------- Co-authored-by: Pieter Noordhuis --- internal/filer_test.go | 61 +--- internal/fs_cat_test.go | 4 +- internal/fs_cp_test.go | 4 +- internal/fs_ls_test.go | 8 +- internal/fs_mkdir_test.go | 8 +- internal/fs_rm_test.go | 8 +- internal/helpers.go | 161 +++++++++++ internal/locker_test.go | 2 +- internal/python/python_tasks_test.go | 267 ++++++++++++++++++ .../my_test_code-0.0.1-py3-none-any.whl | Bin 0 -> 1933 bytes internal/python/testdata/spark.py | 7 + internal/python/testdata/test.py | 8 + internal/sync_test.go | 2 +- internal/workspace_test.go | 4 +- 14 files changed, 465 insertions(+), 79 deletions(-) create mode 100644 internal/python/python_tasks_test.go create mode 100644 internal/python/testdata/my_test_code-0.0.1-py3-none-any.whl create mode 100644 internal/python/testdata/spark.py create mode 100644 internal/python/testdata/test.py diff --git a/internal/filer_test.go b/internal/filer_test.go index 0e126abc..b1af6886 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "fmt" "io" "io/fs" "net/http" @@ -15,8 +14,6 @@ import ( "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" - "github.com/databricks/databricks-sdk-go/service/files" - "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -209,41 +206,12 @@ func runFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.False(t, entries[0].IsDir()) } -func temporaryWorkspaceDir(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - me, err := w.CurrentUser.Me(ctx) - require.NoError(t, err) - - path := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName("integration-test-wsfs-")) - - // Ensure directory exists, but doesn't exist YET! - // Otherwise we could inadvertently remove a directory that already exists on cleanup. - t.Logf("mkdir %s", path) - err = w.Workspace.MkdirsByPath(ctx, path) - require.NoError(t, err) - - // Remove test directory on test completion. - t.Cleanup(func() { - t.Logf("rm -rf %s", path) - err := w.Workspace.Delete(ctx, workspace.Delete{ - Path: path, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("unable to remove temporary workspace directory %s: %#v", path, err) - }) - - return path -} - func setupWorkspaceFilesTest(t *testing.T) (context.Context, filer.Filer) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := temporaryWorkspaceDir(t, w) + tmpdir := TemporaryWorkspaceDir(t, w) f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -267,37 +235,12 @@ func TestAccFilerWorkspaceFilesReadDir(t *testing.T) { runFilerReadDirTest(t, ctx, f) } -func temporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { - ctx := context.Background() - path := fmt.Sprintf("/tmp/%s", RandomName("integration-test-dbfs-")) - - // This call fails if the path already exists. - t.Logf("mkdir dbfs:%s", path) - err := w.Dbfs.MkdirsByPath(ctx, path) - require.NoError(t, err) - - // Remove test directory on test completion. - t.Cleanup(func() { - t.Logf("rm -rf dbfs:%s", path) - err := w.Dbfs.Delete(ctx, files.Delete{ - Path: path, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("unable to remove temporary dbfs directory %s: %#v", path, err) - }) - - return path -} - func setupFilerDbfsTest(t *testing.T) (context.Context, filer.Filer) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := temporaryDbfsDir(t, w) + tmpdir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpdir) require.NoError(t, err) return ctx, f diff --git a/internal/fs_cat_test.go b/internal/fs_cat_test.go index f3c8e59c..2c979ea7 100644 --- a/internal/fs_cat_test.go +++ b/internal/fs_cat_test.go @@ -20,7 +20,7 @@ func TestAccFsCatForDbfs(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -54,7 +54,7 @@ func TestAccFsCatDoesNotSupportOutputModeJson(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) diff --git a/internal/fs_cp_test.go b/internal/fs_cp_test.go index 766d6a59..3b73b48d 100644 --- a/internal/fs_cp_test.go +++ b/internal/fs_cp_test.go @@ -75,7 +75,7 @@ func setupDbfsFiler(t *testing.T) (filer.Filer, string) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -256,7 +256,7 @@ func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) _, _, err = RequireErrorRun(t, "fs", "cp", "dbfs:"+tmpDir, "dbfs:/tmp") assert.Equal(t, fmt.Sprintf("source path %s is a directory. Please specify the --recursive flag", tmpDir), err.Error()) diff --git a/internal/fs_ls_test.go b/internal/fs_ls_test.go index d2181728..9e02b09c 100644 --- a/internal/fs_ls_test.go +++ b/internal/fs_ls_test.go @@ -23,7 +23,7 @@ func TestAccFsLsForDbfs(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -58,7 +58,7 @@ func TestAccFsLsForDbfsWithAbsolutePaths(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -94,7 +94,7 @@ func TestAccFsLsForDbfsOnFile(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -114,7 +114,7 @@ func TestAccFsLsForDbfsOnEmptyDir(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", "dbfs:"+tmpDir, "--output=json") assert.Equal(t, "", stderr.String()) diff --git a/internal/fs_mkdir_test.go b/internal/fs_mkdir_test.go index 25117d53..af0e9d18 100644 --- a/internal/fs_mkdir_test.go +++ b/internal/fs_mkdir_test.go @@ -20,7 +20,7 @@ func TestAccFsMkdirCreatesDirectory(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -44,7 +44,7 @@ func TestAccFsMkdirCreatesMultipleDirectories(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -80,7 +80,7 @@ func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) // create directory "a" f, err := filer.NewDbfsClient(w, tmpDir) @@ -101,7 +101,7 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) // create file hello f, err := filer.NewDbfsClient(w, tmpDir) diff --git a/internal/fs_rm_test.go b/internal/fs_rm_test.go index 1bee06c7..d70827d1 100644 --- a/internal/fs_rm_test.go +++ b/internal/fs_rm_test.go @@ -20,7 +20,7 @@ func TestAccFsRmForFile(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -52,7 +52,7 @@ func TestAccFsRmForEmptyDirectory(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -84,7 +84,7 @@ func TestAccFsRmForNonEmptyDirectory(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) @@ -120,7 +120,7 @@ func TestAccFsRmForNonEmptyDirectoryWithRecursiveFlag(t *testing.T) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryDbfsDir(t, w) + tmpDir := TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) diff --git a/internal/helpers.go b/internal/helpers.go index 68c00019..5a7e59e8 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -17,6 +17,12 @@ import ( "github.com/databricks/cli/cmd" _ "github.com/databricks/cli/cmd/version" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/files" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/stretchr/testify/require" @@ -272,3 +278,158 @@ func writeFile(t *testing.T, name string, body string) string { f.Close() return f.Name() } + +func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := 0; i < len(versions); i++ { + task := jobs.SubmitTask{ + TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")), + NotebookTask: &jobs.NotebookTask{ + NotebookPath: notebookPath, + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + } + tasks = append(tasks, task) + } + + return tasks +} + +func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := 0; i < len(versions); i++ { + task := jobs.SubmitTask{ + TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")), + SparkPythonTask: &jobs.SparkPythonTask{ + PythonFile: notebookPath, + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + } + tasks = append(tasks, task) + } + + return tasks +} + +func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := 0; i < len(versions); i++ { + task := jobs.SubmitTask{ + TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")), + PythonWheelTask: &jobs.PythonWheelTask{ + PackageName: "my_test_code", + EntryPoint: "run", + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + Libraries: []compute.Library{ + {Whl: wheelPath}, + }, + } + tasks = append(tasks, task) + } + + return tasks +} + +func TemporaryWorkspaceDir(t *testing.T, w *databricks.WorkspaceClient) string { + ctx := context.Background() + me, err := w.CurrentUser.Me(ctx) + require.NoError(t, err) + + basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName("integration-test-wsfs-")) + + t.Logf("Creating %s", basePath) + err = w.Workspace.MkdirsByPath(ctx, basePath) + require.NoError(t, err) + + // Remove test directory on test completion. + t.Cleanup(func() { + t.Logf("Removing %s", basePath) + err := w.Workspace.Delete(ctx, workspace.Delete{ + Path: basePath, + Recursive: true, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) + }) + + return basePath +} + +func TemporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { + ctx := context.Background() + path := fmt.Sprintf("/tmp/%s", RandomName("integration-test-dbfs-")) + + t.Logf("Creating DBFS folder:%s", path) + err := w.Dbfs.MkdirsByPath(ctx, path) + require.NoError(t, err) + + t.Cleanup(func() { + t.Logf("Removing DBFS folder:%s", path) + err := w.Dbfs.Delete(ctx, files.Delete{ + Path: path, + Recursive: true, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("unable to remove temporary dbfs directory %s: %#v", path, err) + }) + + return path +} + +func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { + ctx := context.Background() + me, err := w.CurrentUser.Me(ctx) + require.NoError(t, err) + + repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("integration-test-repo-")) + + t.Logf("Creating repo:%s", repoPath) + repoInfo, err := w.Repos.Create(ctx, workspace.CreateRepo{ + Url: "https://github.com/databricks/cli", + Provider: "github", + Path: repoPath, + }) + require.NoError(t, err) + + t.Cleanup(func() { + t.Logf("Removing repo: %s", repoPath) + err := w.Repos.Delete(ctx, workspace.DeleteRepoRequest{ + RepoId: repoInfo.Id, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("unable to remove repo %s: %#v", repoPath, err) + }) + + return repoPath +} + +func GetNodeTypeId(env string) string { + if env == "gcp" { + return "n1-standard-4" + } else if env == "aws" { + return "i3.xlarge" + } + return "Standard_DS4_v2" +} diff --git a/internal/locker_test.go b/internal/locker_test.go index 661838ec..21e08f73 100644 --- a/internal/locker_test.go +++ b/internal/locker_test.go @@ -169,7 +169,7 @@ func setupLockerTest(ctx context.Context, t *testing.T) (*lockpkg.Locker, filer. require.NoError(t, err) // create temp wsfs dir - tmpDir := temporaryWorkspaceDir(t, w) + tmpDir := TemporaryWorkspaceDir(t, w) f, err := filer.NewWorkspaceFilesClient(w, tmpDir) require.NoError(t, err) diff --git a/internal/python/python_tasks_test.go b/internal/python/python_tasks_test.go new file mode 100644 index 00000000..fde9b37f --- /dev/null +++ b/internal/python/python_tasks_test.go @@ -0,0 +1,267 @@ +package python + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "path" + "slices" + "strings" + "testing" + "time" + + "github.com/databricks/cli/bundle/run/output" + "github.com/databricks/cli/internal" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/require" +) + +const PY_CONTENT = `# Databricks notebook source +import os +import sys +import json + +out = {"PYTHONPATH": sys.path, "CWD": os.getcwd()} +json_object = json.dumps(out, indent = 4) +dbutils.notebook.exit(json_object) +` + +const SPARK_PY_CONTENT = ` +import os +import sys +import json + +out = {"PYTHONPATH": sys.path, "CWD": os.getcwd()} +json_object = json.dumps(out, indent = 4) +print(json_object) +` + +type testOutput struct { + PythonPath []string `json:"PYTHONPATH"` + Cwd string `json:"CWD"` +} + +type testFiles struct { + w *databricks.WorkspaceClient + pyNotebookPath string + sparkPythonPath string + wheelPath string +} + +type testOpts struct { + name string + includeNotebookTasks bool + includeSparkPythonTasks bool + includeWheelTasks bool + wheelSparkVersions []string +} + +var sparkVersions = []string{ + "11.3.x-scala2.12", + "12.2.x-scala2.12", + "13.0.x-scala2.12", + "13.1.x-scala2.12", + "13.2.x-scala2.12", + "13.3.x-scala2.12", + "14.0.x-scala2.12", + "14.1.x-scala2.12", +} + +func TestAccRunPythonTaskWorkspace(t *testing.T) { + // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly + internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") + internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + + unsupportedSparkVersionsForWheel := []string{ + "11.3.x-scala2.12", + "12.2.x-scala2.12", + "13.0.x-scala2.12", + } + runPythonTasks(t, prepareWorkspaceFiles(t), testOpts{ + name: "Python tasks from WSFS", + includeNotebookTasks: true, + includeSparkPythonTasks: true, + includeWheelTasks: true, + wheelSparkVersions: slices.DeleteFunc(slices.Clone(sparkVersions), func(s string) bool { + return slices.Contains(unsupportedSparkVersionsForWheel, s) + }), + }) +} + +func TestAccRunPythonTaskDBFS(t *testing.T) { + // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly + internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") + internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + + runPythonTasks(t, prepareDBFSFiles(t), testOpts{ + name: "Python tasks from DBFS", + includeNotebookTasks: false, + includeSparkPythonTasks: true, + includeWheelTasks: true, + }) +} + +func TestAccRunPythonTaskRepo(t *testing.T) { + // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly + internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") + internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + + runPythonTasks(t, prepareRepoFiles(t), testOpts{ + name: "Python tasks from Repo", + includeNotebookTasks: true, + includeSparkPythonTasks: true, + includeWheelTasks: false, + }) +} + +func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + w := tw.w + + nodeTypeId := internal.GetNodeTypeId(env) + tasks := make([]jobs.SubmitTask, 0) + if opts.includeNotebookTasks { + tasks = append(tasks, internal.GenerateNotebookTasks(tw.pyNotebookPath, sparkVersions, nodeTypeId)...) + } + + if opts.includeSparkPythonTasks { + tasks = append(tasks, internal.GenerateSparkPythonTasks(tw.sparkPythonPath, sparkVersions, nodeTypeId)...) + } + + if opts.includeWheelTasks { + versions := sparkVersions + if len(opts.wheelSparkVersions) > 0 { + versions = opts.wheelSparkVersions + } + tasks = append(tasks, internal.GenerateWheelTasks(tw.wheelPath, versions, nodeTypeId)...) + } + + ctx := context.Background() + run, err := w.Jobs.Submit(ctx, jobs.SubmitRun{ + RunName: opts.name, + Tasks: tasks, + }) + require.NoError(t, err) + + _, err = w.Jobs.WaitGetRunJobTerminatedOrSkipped(ctx, run.RunId, time.Hour, nil) + require.NoError(t, err) + + output, err := output.GetJobOutput(ctx, w, run.RunId) + require.NoError(t, err) + + result := make(map[string]testOutput, 0) + for _, out := range output.TaskOutputs { + s, err := out.Output.String() + require.NoError(t, err) + + tOut := testOutput{} + err = json.Unmarshal([]byte(s), &tOut) + if err != nil { + continue + } + result[out.TaskKey] = tOut + } + + out, err := json.MarshalIndent(result, "", " ") + require.NoError(t, err) + + t.Log("==== Run output ====") + t.Log(string(out)) +} + +func prepareWorkspaceFiles(t *testing.T) *testFiles { + ctx := context.Background() + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + baseDir := internal.TemporaryWorkspaceDir(t, w) + pyNotebookPath := path.Join(baseDir, "test.py") + + err = w.Workspace.Import(ctx, workspace.Import{ + Path: pyNotebookPath, + Overwrite: true, + Language: workspace.LanguagePython, + Format: workspace.ImportFormatSource, + Content: base64.StdEncoding.EncodeToString([]byte(PY_CONTENT)), + }) + require.NoError(t, err) + + sparkPythonPath := path.Join(baseDir, "spark.py") + err = w.Workspace.Import(ctx, workspace.Import{ + Path: sparkPythonPath, + Overwrite: true, + Format: workspace.ImportFormatAuto, + Content: base64.StdEncoding.EncodeToString([]byte(SPARK_PY_CONTENT)), + }) + require.NoError(t, err) + + raw, err := os.ReadFile("./testdata/my_test_code-0.0.1-py3-none-any.whl") + require.NoError(t, err) + + wheelPath := path.Join(baseDir, "my_test_code-0.0.1-py3-none-any.whl") + err = w.Workspace.Import(ctx, workspace.Import{ + Path: path.Join(baseDir, "my_test_code-0.0.1-py3-none-any.whl"), + Overwrite: true, + Format: workspace.ImportFormatAuto, + Content: base64.StdEncoding.EncodeToString(raw), + }) + require.NoError(t, err) + + return &testFiles{ + w: w, + pyNotebookPath: pyNotebookPath, + sparkPythonPath: sparkPythonPath, + wheelPath: path.Join("/Workspace", wheelPath), + } +} + +func prepareDBFSFiles(t *testing.T) *testFiles { + ctx := context.Background() + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + baseDir := internal.TemporaryDbfsDir(t, w) + f, err := filer.NewDbfsClient(w, baseDir) + require.NoError(t, err) + + err = f.Write(ctx, "test.py", strings.NewReader(PY_CONTENT)) + require.NoError(t, err) + + err = f.Write(ctx, "spark.py", strings.NewReader(SPARK_PY_CONTENT)) + require.NoError(t, err) + + raw, err := os.ReadFile("./testdata/my_test_code-0.0.1-py3-none-any.whl") + require.NoError(t, err) + + err = f.Write(ctx, "my_test_code-0.0.1-py3-none-any.whl", bytes.NewReader(raw)) + require.NoError(t, err) + + return &testFiles{ + w: w, + pyNotebookPath: path.Join(baseDir, "test.py"), + sparkPythonPath: fmt.Sprintf("dbfs:%s", path.Join(baseDir, "spark.py")), + wheelPath: fmt.Sprintf("dbfs:%s", path.Join(baseDir, "my_test_code-0.0.1-py3-none-any.whl")), + } +} + +func prepareRepoFiles(t *testing.T) *testFiles { + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + repo := internal.TemporaryRepo(t, w) + packagePath := "internal/python/testdata" + return &testFiles{ + w: w, + pyNotebookPath: path.Join(repo, packagePath, "test"), + sparkPythonPath: path.Join(repo, packagePath, "spark.py"), + wheelPath: path.Join(repo, packagePath, "my_test_code-0.0.1-py3-none-any.whl"), + } +} diff --git a/internal/python/testdata/my_test_code-0.0.1-py3-none-any.whl b/internal/python/testdata/my_test_code-0.0.1-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..249263968892de5f2a7a5fc52f2df9b7dcde3838 GIT binary patch literal 1933 zcmWIWW@Zs#U|`^2aL(WhKVkiRsveMM0K^hNoLd=Rl3H96pPZkPsvjSpnU`4-AFo$X z+1;=2t*dqJ%=zyA@C~5>Mi-1P7>7LS>i5^tJ$XXEiDQ?_G!-?`RJC+*?LtO|0Jt3; zb(Z0aZl`AN0-F5~yB)cSnR#G4eEST!n+*h7zyIk{?&f-Q#j`-;?S+-40!uC@nqKpm z%&iu*1pRX+{WX@c{t|-^@TW0d93r`N{ zebauq+|7HI;k8b=5arT2Pk8=Ze6yfITz~!5mqmKtuXa99R50$94tmi3>jm$oo3T5- z-8_1-dTp!elG_@$0{G*vojrBq#`~oTrOzw7V>hLpeX{WJez!-KDf}I~0+|H6n3Rq< zv`XYgNGttD^6wO&e_JhiZ?6LS^$QTI0x@!!>Kf=7=o#vzWEPj`X6B{k>-)NfIJ!87 zIL7vR^BpqaartiPsyE?NWE!gwx5@$qfemJB7dGzD+o=8En@DH>k#A>b&t%n_vfx44 z43&bnZw;hp$Z+gk+8!)Y$~rw`U#0!)@?*iO#Nf86*uoxXfK6FPTl&*J@$Y(yfw&G(#5|5-GDb|{N& z(IWZC$6I+hkNP&P*xq!@?3B#W8U0D~5RnV=YQU46LEd-$Hc z$O}nMn}ZCl7(XcTKkKdQrE^k$Q;>((NgcgTefE`s7Yr^Kn_V})@PzBsdHsuDb*}Mh z=z5(!>9Z+FLsQH5DVL|u_8^TFCfClMKJR_i_sr@ikC@OR;^T_Cs~SKzX#%k>A-ARG zl@wLR7vyK=l@#lhRFri4>7UZqHRyS&rFC}m*|k0Tp1OLS{^$HXuXgvJ_dRo_yMH@V zU`z~}mAxzU4ETXo@dB|HAuCJr3*vK9%TjYd_MJwu;widM+|`2b#V|53I506VC=)U^ z$ko|D$ffn{>9R$@Y<;12zv=}R!GepdtsJ~fx{J2u6>Uf{Ijk&_q)}75`{SI#$A1p& z{pz9mTJ^68YxR;T<}pF5wky9lzteF0ym^0o~P&a`V6B$iDsTd(-= zRnl)mudO8v%RUF(FDgAZTUxJQvQB+kQ_kb9vFH7jRQJkyr1PHvX-Jv#?Nk7 zbT)RrlMmXmC8;a)KU?t6J)5iM&DB=2voOmHnX3N2NyIpQUy9(v{rU!TWRC0>cG&#e zv1@PXE9MU$m;<~SnM9azm!iNJ0)r)uAPPDEBbTYjreG^4A!aZzENM)^GzDI2BAbG( z^g);s0!%>2Zoyd=p&N;w#}P(40@EX2Bf&Wy-8}Ssi7@XjA@k64DZ0_<`43@q05bun mqU1(&bI~&(%-k)E%L$na$&LZutZX1n>_BJ?^rZ_chz9_0O188B literal 0 HcmV?d00001 diff --git a/internal/python/testdata/spark.py b/internal/python/testdata/spark.py new file mode 100644 index 00000000..0bc38409 --- /dev/null +++ b/internal/python/testdata/spark.py @@ -0,0 +1,7 @@ +import os +import sys +import json + +out = {"PYTHONPATH": sys.path, "CWD": os.getcwd()} +json_object = json.dumps(out, indent=4) +print(json_object) diff --git a/internal/python/testdata/test.py b/internal/python/testdata/test.py new file mode 100644 index 00000000..95b1f916 --- /dev/null +++ b/internal/python/testdata/test.py @@ -0,0 +1,8 @@ +# Databricks notebook source +import os +import sys +import json + +out = {"PYTHONPATH": sys.path, "CWD": os.getcwd()} +json_object = json.dumps(out, indent=4) +dbutils.notebook.exit(json_object) diff --git a/internal/sync_test.go b/internal/sync_test.go index bc1cbd91..9eb1ac1b 100644 --- a/internal/sync_test.go +++ b/internal/sync_test.go @@ -75,7 +75,7 @@ func setupSyncTest(t *testing.T, args ...string) *syncTest { w := databricks.Must(databricks.NewWorkspaceClient()) localRoot := t.TempDir() - remoteRoot := temporaryWorkspaceDir(t, w) + remoteRoot := TemporaryWorkspaceDir(t, w) f, err := filer.NewWorkspaceFilesClient(w, remoteRoot) require.NoError(t, err) diff --git a/internal/workspace_test.go b/internal/workspace_test.go index dd26bcf4..7110d5c9 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -45,7 +45,7 @@ func TestWorkpaceExportPrintsContents(t *testing.T) { ctx := context.Background() w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := temporaryWorkspaceDir(t, w) + tmpdir := TemporaryWorkspaceDir(t, w) f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -65,7 +65,7 @@ func setupWorkspaceImportExportTest(t *testing.T) (context.Context, filer.Filer, ctx := context.Background() w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := temporaryWorkspaceDir(t, w) + tmpdir := TemporaryWorkspaceDir(t, w) f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) From e1d1e955251aa96eb4c3ec9a5ede9e557be46f96 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 3 Oct 2023 13:46:16 +0200 Subject: [PATCH 127/310] Updated Go SDK to 0.22.0 (#831) ## Changes Updated Go SDK to 0.22.0 --- .codegen/_openapi_sha | 2 +- .gitattributes | 3 + cmd/account/cmd.go | 2 + .../custom-app-integration.go | 6 +- .../metastore-assignments.go | 2 +- .../o-auth-published-apps.go | 114 +++++++++ .../published-app-integration.go | 8 +- .../artifact-allowlists.go | 4 +- cmd/workspace/catalogs/catalogs.go | 1 - cmd/workspace/clean-rooms/clean-rooms.go | 31 ++- cmd/workspace/clusters/clusters.go | 4 +- cmd/workspace/cmd.go | 4 + cmd/workspace/connections/connections.go | 1 - .../credentials-manager.go | 108 ++++++++ cmd/workspace/jobs/jobs.go | 47 ++-- .../model-registry/model-registry.go | 4 +- cmd/workspace/pipelines/pipelines.go | 1 + .../serving-endpoints/serving-endpoints.go | 92 ++++++- cmd/workspace/settings/settings.go | 241 ++++++++++++++++++ .../system-schemas/system-schemas.go | 3 - go.mod | 14 +- go.sum | 64 +---- 22 files changed, 644 insertions(+), 112 deletions(-) create mode 100755 cmd/account/o-auth-published-apps/o-auth-published-apps.go create mode 100755 cmd/workspace/credentials-manager/credentials-manager.go create mode 100755 cmd/workspace/settings/settings.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index b59218d3..7d4ee2a6 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -09a7fa63d9ae243e5407941f200960ca14d48b07 \ No newline at end of file +bcbf6e851e3d82fd910940910dd31c10c059746c \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 3209a0f3..61527fed 100755 --- a/.gitattributes +++ b/.gitattributes @@ -13,6 +13,7 @@ cmd/account/metastores/metastores.go linguist-generated=true cmd/account/network-policy/network-policy.go linguist-generated=true cmd/account/networks/networks.go linguist-generated=true cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true +cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true cmd/account/private-access/private-access.go linguist-generated=true cmd/account/published-app-integration/published-app-integration.go linguist-generated=true cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true @@ -32,6 +33,7 @@ cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true cmd/workspace/connections/connections.go linguist-generated=true +cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true @@ -66,6 +68,7 @@ cmd/workspace/schemas/schemas.go linguist-generated=true cmd/workspace/secrets/secrets.go linguist-generated=true cmd/workspace/service-principals/service-principals.go linguist-generated=true cmd/workspace/serving-endpoints/serving-endpoints.go linguist-generated=true +cmd/workspace/settings/settings.go linguist-generated=true cmd/workspace/shares/shares.go linguist-generated=true cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true cmd/workspace/system-schemas/system-schemas.go linguist-generated=true diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 2b06171d..38be7314 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -19,6 +19,7 @@ import ( account_network_policy "github.com/databricks/cli/cmd/account/network-policy" networks "github.com/databricks/cli/cmd/account/networks" o_auth_enrollment "github.com/databricks/cli/cmd/account/o-auth-enrollment" + o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps" private_access "github.com/databricks/cli/cmd/account/private-access" published_app_integration "github.com/databricks/cli/cmd/account/published-app-integration" service_principal_secrets "github.com/databricks/cli/cmd/account/service-principal-secrets" @@ -52,6 +53,7 @@ func New() *cobra.Command { cmd.AddCommand(account_network_policy.New()) cmd.AddCommand(networks.New()) cmd.AddCommand(o_auth_enrollment.New()) + cmd.AddCommand(o_auth_published_apps.New()) cmd.AddCommand(private_access.New()) cmd.AddCommand(published_app_integration.New()) cmd.AddCommand(service_principal_secrets.New()) diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index e5868809..e7b56df7 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -22,11 +22,7 @@ func New() *cobra.Command { Short: `These APIs enable administrators to manage custom oauth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.`, Long: `These APIs enable administrators to manage custom oauth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau - Cloud for Databricks in AWS cloud. - - **Note:** You can only add/use the OAuth custom application integrations when - OAuth enrollment status is enabled. For more details see - :method:OAuthEnrollment/create`, + Cloud for Databricks in AWS cloud.`, GroupID: "oauth2", Annotations: map[string]string{ "package": "oauth2", diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index 24c4eb69..00979f45 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -280,7 +280,7 @@ func newList() *cobra.Command { listReq.MetastoreId = args[0] - response, err := a.MetastoreAssignments.List(ctx, listReq) + response, err := a.MetastoreAssignments.ListAll(ctx, listReq) if err != nil { return err } diff --git a/cmd/account/o-auth-published-apps/o-auth-published-apps.go b/cmd/account/o-auth-published-apps/o-auth-published-apps.go new file mode 100755 index 00000000..640e8a4c --- /dev/null +++ b/cmd/account/o-auth-published-apps/o-auth-published-apps.go @@ -0,0 +1,114 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package o_auth_published_apps + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "o-auth-published-apps", + Short: `These APIs enable administrators to view all the available published OAuth applications in Databricks.`, + Long: `These APIs enable administrators to view all the available published OAuth + applications in Databricks. Administrators can add the published OAuth + applications to their account through the OAuth Published App Integration + APIs.`, + GroupID: "oauth2", + Annotations: map[string]string{ + "package": "oauth2", + }, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *oauth2.ListOAuthPublishedAppsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq oauth2.ListOAuthPublishedAppsRequest + var listJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Int64Var(&listReq.PageSize, "page-size", listReq.PageSize, `The max number of OAuth published apps to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`) + + cmd.Use = "list" + cmd.Short = `Get all the published OAuth apps.` + cmd.Long = `Get all the published OAuth apps. + + Get all the available published OAuth apps in Databricks.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = listJson.Unmarshal(&listReq) + if err != nil { + return err + } + } else { + } + + response, err := a.OAuthPublishedApps.ListAll(ctx, listReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newList()) + }) +} + +// end service OAuthPublishedApps diff --git a/cmd/account/published-app-integration/published-app-integration.go b/cmd/account/published-app-integration/published-app-integration.go index b367ad71..9b29d53d 100755 --- a/cmd/account/published-app-integration/published-app-integration.go +++ b/cmd/account/published-app-integration/published-app-integration.go @@ -17,14 +17,10 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "published-app-integration", - Short: `These APIs enable administrators to manage published oauth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.`, + Short: `These APIs enable administrators to manage published oauth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud.`, Long: `These APIs enable administrators to manage published oauth app integrations, which is required for adding/using Published OAuth App Integration like - Tableau Cloud for Databricks in AWS cloud. - - **Note:** You can only add/use the OAuth published application integrations - when OAuth enrollment status is enabled. For more details see - :method:OAuthEnrollment/create`, + Tableau Desktop for Databricks in AWS cloud.`, GroupID: "oauth2", Annotations: map[string]string{ "package": "oauth2", diff --git a/cmd/workspace/artifact-allowlists/artifact-allowlists.go b/cmd/workspace/artifact-allowlists/artifact-allowlists.go index 9f9b9be1..ad6e58b4 100755 --- a/cmd/workspace/artifact-allowlists/artifact-allowlists.go +++ b/cmd/workspace/artifact-allowlists/artifact-allowlists.go @@ -58,7 +58,7 @@ func newGet() *cobra.Command { cmd.Long = `Get an artifact allowlist. Get the artifact allowlist of a certain artifact type. The caller must be a - metastore admin.` + metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore.` cmd.Annotations = make(map[string]string) @@ -126,7 +126,7 @@ func newUpdate() *cobra.Command { Set the artifact allowlist of a certain artifact type. The whole artifact allowlist is replaced with the new allowlist. The caller must be a metastore - admin.` + admin or have the **MANAGE ALLOWLIST** privilege on the metastore.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 5e06977c..5896c5cb 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -322,7 +322,6 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of catalog.`) - // TODO: map via StringToStringVar: options cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of catalog.`) // TODO: map via StringToStringVar: properties diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index 5aa704fa..1eab2fb3 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -243,11 +243,21 @@ func init() { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *sharing.ListCleanRoomsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq sharing.ListCleanRoomsRequest + var listJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of clean rooms to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Pagination token to go to next page based on previous query.`) + cmd.Use = "list" cmd.Short = `List clean rooms.` cmd.Long = `List clean rooms. @@ -258,11 +268,28 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.CleanRooms.ListAll(ctx) + + if cmd.Flags().Changed("json") { + err = listJson.Unmarshal(&listReq) + if err != nil { + return err + } + } else { + } + + response, err := w.CleanRooms.ListAll(ctx, listReq) if err != nil { return err } @@ -275,7 +302,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index dce6753d..f14864f0 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -169,7 +169,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`) cmd.Flags().Var(&createReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.`) // TODO: map via StringToStringVar: custom_tags - cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `This describes an enum.`) + cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster.`) // TODO: complex arg: docker_image cmd.Flags().StringVar(&createReq.DriverInstancePoolId, "driver-instance-pool-id", createReq.DriverInstancePoolId, `The optional ID of the instance pool for the driver of the cluster belongs.`) cmd.Flags().StringVar(&createReq.DriverNodeTypeId, "driver-node-type-id", createReq.DriverNodeTypeId, `The node type of the Spark driver.`) @@ -396,7 +396,7 @@ func newEdit() *cobra.Command { cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`) cmd.Flags().Var(&editReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.`) // TODO: map via StringToStringVar: custom_tags - cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `This describes an enum.`) + cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster.`) // TODO: complex arg: docker_image cmd.Flags().StringVar(&editReq.DriverInstancePoolId, "driver-instance-pool-id", editReq.DriverInstancePoolId, `The optional ID of the instance pool for the driver of the cluster belongs.`) cmd.Flags().StringVar(&editReq.DriverNodeTypeId, "driver-node-type-id", editReq.DriverNodeTypeId, `The node type of the Spark driver.`) diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 495d8066..dc3f6798 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -10,6 +10,7 @@ import ( cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" clusters "github.com/databricks/cli/cmd/workspace/clusters" connections "github.com/databricks/cli/cmd/workspace/connections" + credentials_manager "github.com/databricks/cli/cmd/workspace/credentials-manager" current_user "github.com/databricks/cli/cmd/workspace/current-user" dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" dashboards "github.com/databricks/cli/cmd/workspace/dashboards" @@ -44,6 +45,7 @@ import ( secrets "github.com/databricks/cli/cmd/workspace/secrets" service_principals "github.com/databricks/cli/cmd/workspace/service-principals" serving_endpoints "github.com/databricks/cli/cmd/workspace/serving-endpoints" + settings "github.com/databricks/cli/cmd/workspace/settings" shares "github.com/databricks/cli/cmd/workspace/shares" storage_credentials "github.com/databricks/cli/cmd/workspace/storage-credentials" system_schemas "github.com/databricks/cli/cmd/workspace/system-schemas" @@ -70,6 +72,7 @@ func All() []*cobra.Command { out = append(out, cluster_policies.New()) out = append(out, clusters.New()) out = append(out, connections.New()) + out = append(out, credentials_manager.New()) out = append(out, current_user.New()) out = append(out, dashboard_widgets.New()) out = append(out, dashboards.New()) @@ -104,6 +107,7 @@ func All() []*cobra.Command { out = append(out, secrets.New()) out = append(out, service_principals.New()) out = append(out, serving_endpoints.New()) + out = append(out, settings.New()) out = append(out, shares.New()) out = append(out, storage_credentials.New()) out = append(out, system_schemas.New()) diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index c25825c9..917aeda9 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -64,7 +64,6 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) - cmd.Flags().StringVar(&createReq.Owner, "owner", createReq.Owner, `Username of current owner of the connection.`) // TODO: map via StringToStringVar: properties cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `If the connection is read only.`) diff --git a/cmd/workspace/credentials-manager/credentials-manager.go b/cmd/workspace/credentials-manager/credentials-manager.go new file mode 100755 index 00000000..30b33f7b --- /dev/null +++ b/cmd/workspace/credentials-manager/credentials-manager.go @@ -0,0 +1,108 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package credentials_manager + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "credentials-manager", + Short: `Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens.`, + Long: `Credentials manager interacts with with Identity Providers to to perform token + exchanges using stored credentials and refresh tokens.`, + GroupID: "settings", + Annotations: map[string]string{ + "package": "settings", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start exchange-token command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var exchangeTokenOverrides []func( + *cobra.Command, + *settings.ExchangeTokenRequest, +) + +func newExchangeToken() *cobra.Command { + cmd := &cobra.Command{} + + var exchangeTokenReq settings.ExchangeTokenRequest + var exchangeTokenJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&exchangeTokenJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "exchange-token" + cmd.Short = `Exchange token.` + cmd.Long = `Exchange token. + + Exchange tokens with an Identity Provider to get a new access token. It + allowes specifying scopes to determine token permissions.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = exchangeTokenJson.Unmarshal(&exchangeTokenReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.CredentialsManager.ExchangeToken(ctx, exchangeTokenReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range exchangeTokenOverrides { + fn(cmd, &exchangeTokenReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newExchangeToken()) + }) +} + +// end service CredentialsManager diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 7670ebb7..9edebb66 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -71,7 +71,10 @@ func newCancelAllRuns() *cobra.Command { // TODO: short flags cmd.Flags().Var(&cancelAllRunsJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "cancel-all-runs JOB_ID" + cmd.Flags().BoolVar(&cancelAllRunsReq.AllQueuedRuns, "all-queued-runs", cancelAllRunsReq.AllQueuedRuns, `Optional boolean parameter to cancel all queued runs.`) + cmd.Flags().Int64Var(&cancelAllRunsReq.JobId, "job-id", cancelAllRunsReq.JobId, `The canonical identifier of the job to cancel all runs of.`) + + cmd.Use = "cancel-all-runs" cmd.Short = `Cancel all runs of a job.` cmd.Long = `Cancel all runs of a job. @@ -80,6 +83,14 @@ func newCancelAllRuns() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -91,27 +102,6 @@ func newCancelAllRuns() *cobra.Command { return err } } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No JOB_ID argument specified. Loading names for Jobs drop-down." - names, err := w.Jobs.BaseJobSettingsNameToJobIdMap(ctx, jobs.ListJobsRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "The canonical identifier of the job to cancel all runs of") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have the canonical identifier of the job to cancel all runs of") - } - _, err = fmt.Sscan(args[0], &cancelAllRunsReq.JobId) - if err != nil { - return fmt.Errorf("invalid JOB_ID: %s", args[0]) - } } err = w.Jobs.CancelAllRuns(ctx, cancelAllRunsReq) @@ -163,11 +153,11 @@ func newCancelRun() *cobra.Command { cmd.Flags().Var(&cancelRunJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Use = "cancel-run RUN_ID" - cmd.Short = `Cancel a job run.` - cmd.Long = `Cancel a job run. + cmd.Short = `Cancel a run.` + cmd.Long = `Cancel a run. - Cancels a job run. The run is canceled asynchronously, so it may still be - running when this request completes.` + Cancels a job run or a task run. The run is canceled asynchronously, so it may + still be running when this request completes.` cmd.Annotations = make(map[string]string) @@ -1147,6 +1137,7 @@ func newRepairRun() *cobra.Command { // TODO: array: dbt_commands // TODO: array: jar_params + // TODO: map via StringToStringVar: job_parameters cmd.Flags().Int64Var(&repairRunReq.LatestRepairId, "latest-repair-id", repairRunReq.LatestRepairId, `The ID of the latest repair.`) // TODO: map via StringToStringVar: notebook_params // TODO: complex arg: pipeline_params @@ -1338,11 +1329,12 @@ func newRunNow() *cobra.Command { // TODO: array: dbt_commands cmd.Flags().StringVar(&runNowReq.IdempotencyToken, "idempotency-token", runNowReq.IdempotencyToken, `An optional token to guarantee the idempotency of job run requests.`) // TODO: array: jar_params - // TODO: array: job_parameters + // TODO: map via StringToStringVar: job_parameters // TODO: map via StringToStringVar: notebook_params // TODO: complex arg: pipeline_params // TODO: map via StringToStringVar: python_named_params // TODO: array: python_params + // TODO: complex arg: queue // TODO: array: spark_submit_params // TODO: map via StringToStringVar: sql_params @@ -1545,6 +1537,7 @@ func newSubmit() *cobra.Command { // TODO: complex arg: health cmd.Flags().StringVar(&submitReq.IdempotencyToken, "idempotency-token", submitReq.IdempotencyToken, `An optional token that can be used to guarantee the idempotency of job run requests.`) // TODO: complex arg: notification_settings + // TODO: complex arg: queue cmd.Flags().StringVar(&submitReq.RunName, "run-name", submitReq.RunName, `An optional name for the run.`) // TODO: array: tasks cmd.Flags().IntVar(&submitReq.TimeoutSeconds, "timeout-seconds", submitReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`) diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index e2e55225..4a84bca6 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -457,7 +457,7 @@ func newCreateWebhook() *cobra.Command { // TODO: complex arg: http_url_spec // TODO: complex arg: job_spec cmd.Flags().StringVar(&createWebhookReq.ModelName, "model-name", createWebhookReq.ModelName, `Name of the model whose events would trigger this webhook.`) - cmd.Flags().Var(&createWebhookReq.Status, "status", `This describes an enum.`) + cmd.Flags().Var(&createWebhookReq.Status, "status", `Enable or disable triggering the webhook, or put the webhook into test mode.`) cmd.Use = "create-webhook" cmd.Short = `Create a webhook.` @@ -2581,7 +2581,7 @@ func newUpdateWebhook() *cobra.Command { // TODO: array: events // TODO: complex arg: http_url_spec // TODO: complex arg: job_spec - cmd.Flags().Var(&updateWebhookReq.Status, "status", `This describes an enum.`) + cmd.Flags().Var(&updateWebhookReq.Status, "status", `Enable or disable triggering the webhook, or put the webhook into test mode.`) cmd.Use = "update-webhook ID" cmd.Short = `Update a webhook.` diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 10bcc226..06d904d3 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -1102,6 +1102,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Unique identifier for this pipeline.`) // TODO: array: libraries cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Friendly identifier for this pipeline.`) + // TODO: array: notifications cmd.Flags().BoolVar(&updateReq.Photon, "photon", updateReq.Photon, `Whether Photon is enabled for this pipeline.`) cmd.Flags().StringVar(&updateReq.PipelineId, "pipeline-id", updateReq.PipelineId, `Unique identifier for this pipeline.`) cmd.Flags().BoolVar(&updateReq.Serverless, "serverless", updateReq.Serverless, `Whether serverless compute is enabled for this pipeline.`) diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index e22a3844..67614b72 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -134,6 +134,8 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: array: tags + cmd.Use = "create" cmd.Short = `Create a new serving endpoint.` cmd.Long = `Create a new serving endpoint.` @@ -606,21 +608,101 @@ func init() { }) } +// start patch command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var patchOverrides []func( + *cobra.Command, + *serving.PatchServingEndpointTags, +) + +func newPatch() *cobra.Command { + cmd := &cobra.Command{} + + var patchReq serving.PatchServingEndpointTags + var patchJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&patchJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: add_tags + // TODO: array: delete_tags + + cmd.Use = "patch NAME" + cmd.Short = `Patch the tags of a serving endpoint.` + cmd.Long = `Patch the tags of a serving endpoint. + + Used to batch add and delete tags from a serving endpoint with a single API + call.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = patchJson.Unmarshal(&patchReq) + if err != nil { + return err + } + } + patchReq.Name = args[0] + + response, err := w.ServingEndpoints.Patch(ctx, patchReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range patchOverrides { + fn(cmd, &patchReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newPatch()) + }) +} + // start query command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var queryOverrides []func( *cobra.Command, - *serving.QueryRequest, + *serving.QueryEndpointInput, ) func newQuery() *cobra.Command { cmd := &cobra.Command{} - var queryReq serving.QueryRequest + var queryReq serving.QueryEndpointInput + var queryJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&queryJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: dataframe_records + // TODO: complex arg: dataframe_split + // TODO: any: inputs + // TODO: array: instances cmd.Use = "query NAME" cmd.Short = `Query a serving endpoint with provided model input.` @@ -638,6 +720,12 @@ func newQuery() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = queryJson.Unmarshal(&queryReq) + if err != nil { + return err + } + } queryReq.Name = args[0] response, err := w.ServingEndpoints.Query(ctx, queryReq) diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go new file mode 100755 index 00000000..71a682a4 --- /dev/null +++ b/cmd/workspace/settings/settings.go @@ -0,0 +1,241 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package settings + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "settings", + Short: `// TODO(yuyuan.tang) to add the description for the setting.`, + Long: `// TODO(yuyuan.tang) to add the description for the setting`, + GroupID: "settings", + Annotations: map[string]string{ + "package": "settings", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete-default-workspace-namespace command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteDefaultWorkspaceNamespaceOverrides []func( + *cobra.Command, + *settings.DeleteDefaultWorkspaceNamespaceRequest, +) + +func newDeleteDefaultWorkspaceNamespace() *cobra.Command { + cmd := &cobra.Command{} + + var deleteDefaultWorkspaceNamespaceReq settings.DeleteDefaultWorkspaceNamespaceRequest + + // TODO: short flags + + cmd.Use = "delete-default-workspace-namespace ETAG" + cmd.Short = `Delete the default namespace.` + cmd.Long = `Delete the default namespace. + + Deletes the default namespace.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteDefaultWorkspaceNamespaceReq.Etag = args[0] + + response, err := w.Settings.DeleteDefaultWorkspaceNamespace(ctx, deleteDefaultWorkspaceNamespaceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteDefaultWorkspaceNamespaceOverrides { + fn(cmd, &deleteDefaultWorkspaceNamespaceReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDeleteDefaultWorkspaceNamespace()) + }) +} + +// start read-default-workspace-namespace command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var readDefaultWorkspaceNamespaceOverrides []func( + *cobra.Command, + *settings.ReadDefaultWorkspaceNamespaceRequest, +) + +func newReadDefaultWorkspaceNamespace() *cobra.Command { + cmd := &cobra.Command{} + + var readDefaultWorkspaceNamespaceReq settings.ReadDefaultWorkspaceNamespaceRequest + + // TODO: short flags + + cmd.Use = "read-default-workspace-namespace ETAG" + cmd.Short = `Get the default namespace.` + cmd.Long = `Get the default namespace. + + Gets the default namespace.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + readDefaultWorkspaceNamespaceReq.Etag = args[0] + + response, err := w.Settings.ReadDefaultWorkspaceNamespace(ctx, readDefaultWorkspaceNamespaceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range readDefaultWorkspaceNamespaceOverrides { + fn(cmd, &readDefaultWorkspaceNamespaceReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newReadDefaultWorkspaceNamespace()) + }) +} + +// start update-default-workspace-namespace command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateDefaultWorkspaceNamespaceOverrides []func( + *cobra.Command, + *settings.UpdateDefaultWorkspaceNamespaceRequest, +) + +func newUpdateDefaultWorkspaceNamespace() *cobra.Command { + cmd := &cobra.Command{} + + var updateDefaultWorkspaceNamespaceReq settings.UpdateDefaultWorkspaceNamespaceRequest + var updateDefaultWorkspaceNamespaceJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateDefaultWorkspaceNamespaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&updateDefaultWorkspaceNamespaceReq.AllowMissing, "allow-missing", updateDefaultWorkspaceNamespaceReq.AllowMissing, `This should always be set to true for Settings RPCs.`) + cmd.Flags().StringVar(&updateDefaultWorkspaceNamespaceReq.FieldMask, "field-mask", updateDefaultWorkspaceNamespaceReq.FieldMask, `Field mask required to be passed into the PATCH request.`) + // TODO: complex arg: setting + + cmd.Use = "update-default-workspace-namespace" + cmd.Short = `Updates the default namespace setting.` + cmd.Long = `Updates the default namespace setting. + + Updates the default namespace setting for the workspace. A fresh etag needs to + be provided in PATCH requests (as part the setting field). The etag can be + retrieved by making a GET request before the PATCH request. Note that if the + setting does not exist, GET will return a NOT_FOUND error and the etag will be + present in the error response, which should be set in the PATCH request.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(0) + } + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateDefaultWorkspaceNamespaceJson.Unmarshal(&updateDefaultWorkspaceNamespaceReq) + if err != nil { + return err + } + } else { + } + + response, err := w.Settings.UpdateDefaultWorkspaceNamespace(ctx, updateDefaultWorkspaceNamespaceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateDefaultWorkspaceNamespaceOverrides { + fn(cmd, &updateDefaultWorkspaceNamespaceReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdateDefaultWorkspaceNamespace()) + }) +} + +// end service Settings diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index 2dd729f1..f4347098 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -26,9 +26,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "catalog", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Apply optional overrides to this command. diff --git a/go.mod b/go.mod index 9f999035..918d3ce2 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.19.3-0.20230914130855-dacb7f4fc878 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.22.0 // Apache 2.0 github.com/fatih/color v1.15.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.3.1 // BSD-3-Clause @@ -42,8 +42,8 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/s2a-go v0.1.5 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -54,10 +54,10 @@ require ( golang.org/x/net v0.15.0 // indirect golang.org/x/sys v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/api v0.138.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect - google.golang.org/grpc v1.57.0 // indirect + google.golang.org/api v0.143.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 99cfefe7..13eed5a3 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,4 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= @@ -13,14 +12,12 @@ github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwC github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= @@ -31,14 +28,9 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/databricks/databricks-sdk-go v0.19.3-0.20230914130855-dacb7f4fc878 h1:BteIFhP/8wlfEF3CMX8YFMb4fRD4T0dvcROmzZTeyWw= -github.com/databricks/databricks-sdk-go v0.19.3-0.20230914130855-dacb7f4fc878/go.mod h1:Bt/3i3ry/rQdE6Y+psvkAENlp+LzJHaQK5PsLIstQb4= +github.com/databricks/databricks-sdk-go v0.22.0 h1:CIwNZcOV7wYZmRLl1NWA+07f2j6H9h5L6MhR5O/4dRw= +github.com/databricks/databricks-sdk-go v0.22.0/go.mod h1:COiklTN3IdieazXcs4TnMou5GQFwIM7uhMGrz7nEAAk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -47,8 +39,6 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= @@ -67,16 +57,13 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -94,16 +81,15 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg= -github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= @@ -141,7 +127,6 @@ github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzL github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= @@ -156,9 +141,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -173,11 +156,9 @@ github.com/zclconf/go-cty v1.14.0 h1:/Xrd39K7DXbHzlisFP9c4pHao4yyf+/Ug9LEz+Y/yhc github.com/zclconf/go-cty v1.14.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= @@ -194,16 +175,12 @@ golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -211,12 +188,10 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -228,10 +203,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -251,9 +224,7 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -274,29 +245,24 @@ golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0= -google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY= +google.golang.org/api v0.143.0 h1:o8cekTkqhywkbZT6p1UHJPZ9+9uuCAJs/KYomxZB8fA= +google.golang.org/api v0.143.0/go.mod h1:FoX9DO9hT7DLNn97OuoZAGSDuNAXdJRuGK98rSUgurk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -316,8 +282,6 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 40ae23bb33cb8babb590f9bf21fd1d357b54d902 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 3 Oct 2023 15:47:46 +0200 Subject: [PATCH 128/310] Refactor change computation for sync (#785) ## Changes This PR pays some tech debt by refactoring sync diff computation into interfaces that are more robust. Specifically: 1. Refactor the single diff computation function into a `SnapshotState` class that computes the target state only based on the current local files making it more robust and not carrying over state from previous iterations. 2. Adds new validations for the sync state which make sure that the invariants that downstream code expects are actually held true. This prevents a class of issues where these invariants break and the synchroniser behaves unexpectedly. Note, this does not change the existing schema for the snapshot, only the way the diff is computed, and thus is backwards compatible (ie does not require a schema version bump). ## Tests --- internal/sync_test.go | 6 +- libs/sync/diff.go | 69 +++++++++ libs/sync/diff_test.go | 113 ++++++++++++++ libs/sync/snapshot.go | 144 +++--------------- libs/sync/snapshot_state.go | 114 ++++++++++++++ libs/sync/snapshot_state_test.go | 116 ++++++++++++++ libs/sync/snapshot_test.go | 66 ++++---- .../testdata/sync-fileset/invalid-nb.ipynb | 0 libs/sync/testdata/sync-fileset/my-nb.py | 2 + libs/sync/testdata/sync-fileset/my-script.py | 1 + .../sync/testdata/sync-fileset/valid-nb.ipynb | 21 +++ 11 files changed, 500 insertions(+), 152 deletions(-) create mode 100644 libs/sync/snapshot_state.go create mode 100644 libs/sync/snapshot_state_test.go create mode 100644 libs/sync/testdata/sync-fileset/invalid-nb.ipynb create mode 100644 libs/sync/testdata/sync-fileset/my-nb.py create mode 100644 libs/sync/testdata/sync-fileset/my-script.py create mode 100644 libs/sync/testdata/sync-fileset/valid-nb.ipynb diff --git a/internal/sync_test.go b/internal/sync_test.go index 9eb1ac1b..f970a7ce 100644 --- a/internal/sync_test.go +++ b/internal/sync_test.go @@ -222,10 +222,10 @@ func (a *syncTest) snapshotContains(files []string) { assert.Equal(a.t, s.Host, a.w.Config.Host) assert.Equal(a.t, s.RemotePath, a.remoteRoot) for _, filePath := range files { - _, ok := s.LastUpdatedTimes[filePath] - assert.True(a.t, ok, fmt.Sprintf("%s not in snapshot file: %v", filePath, s.LastUpdatedTimes)) + _, ok := s.LastModifiedTimes[filePath] + assert.True(a.t, ok, fmt.Sprintf("%s not in snapshot file: %v", filePath, s.LastModifiedTimes)) } - assert.Equal(a.t, len(files), len(s.LastUpdatedTimes)) + assert.Equal(a.t, len(files), len(s.LastModifiedTimes)) } func TestAccSyncFullFileSync(t *testing.T) { diff --git a/libs/sync/diff.go b/libs/sync/diff.go index 26e99b34..074bfc56 100644 --- a/libs/sync/diff.go +++ b/libs/sync/diff.go @@ -2,8 +2,12 @@ package sync import ( "path" + "path/filepath" + + "golang.org/x/exp/maps" ) +// List of operations to apply to synchronize local file systems changes to WSFS. type diff struct { delete []string rmdir []string @@ -15,6 +19,71 @@ func (d diff) IsEmpty() bool { return len(d.put) == 0 && len(d.delete) == 0 } +// Compute operations required to make files in WSFS reflect current local files. +// Takes into account changes since the last sync iteration. +func computeDiff(after *SnapshotState, before *SnapshotState) diff { + d := &diff{ + delete: make([]string, 0), + rmdir: make([]string, 0), + mkdir: make([]string, 0), + put: make([]string, 0), + } + d.addRemovedFiles(after, before) + d.addFilesWithRemoteNameChanged(after, before) + d.addNewFiles(after, before) + d.addUpdatedFiles(after, before) + return *d +} + +// Add operators for tracked files that no longer exist. +func (d *diff) addRemovedFiles(after *SnapshotState, before *SnapshotState) { + for localName, remoteName := range before.LocalToRemoteNames { + if _, ok := after.LocalToRemoteNames[localName]; !ok { + d.delete = append(d.delete, remoteName) + } + } + + // Remove directories that would no longer contain any files. + beforeDirs := MakeDirSet(maps.Keys(before.LocalToRemoteNames)) + afterDirs := MakeDirSet(maps.Keys(after.LocalToRemoteNames)) + d.rmdir = beforeDirs.Remove(afterDirs).Slice() +} + +// Cleanup previous remote files for files that had their remote targets change. For +// example this is possible if you convert a normal python script to a notebook. +func (d *diff) addFilesWithRemoteNameChanged(after *SnapshotState, before *SnapshotState) { + for localName, beforeRemoteName := range before.LocalToRemoteNames { + afterRemoteName, ok := after.LocalToRemoteNames[localName] + if ok && afterRemoteName != beforeRemoteName { + d.delete = append(d.delete, beforeRemoteName) + } + } +} + +// Add operators for files that were not being tracked before. +func (d *diff) addNewFiles(after *SnapshotState, before *SnapshotState) { + for localName := range after.LastModifiedTimes { + if _, ok := before.LastModifiedTimes[localName]; !ok { + d.put = append(d.put, filepath.ToSlash(localName)) + } + } + + // Add directories required for these new files. + beforeDirs := MakeDirSet(maps.Keys(before.LocalToRemoteNames)) + afterDirs := MakeDirSet(maps.Keys(after.LocalToRemoteNames)) + d.mkdir = afterDirs.Remove(beforeDirs).Slice() +} + +// Add operators for files which had their contents updated. +func (d *diff) addUpdatedFiles(after *SnapshotState, before *SnapshotState) { + for localName, modTime := range after.LastModifiedTimes { + prevModTime, ok := before.LastModifiedTimes[localName] + if ok && modTime.After(prevModTime) { + d.put = append(d.put, filepath.ToSlash(localName)) + } + } +} + // groupedMkdir returns a slice of slices of paths to create. // Because the underlying mkdir calls create intermediate directories, // we can group them together to reduce the total number of calls. diff --git a/libs/sync/diff_test.go b/libs/sync/diff_test.go index ff448872..94b6cc37 100644 --- a/libs/sync/diff_test.go +++ b/libs/sync/diff_test.go @@ -2,6 +2,7 @@ package sync import ( "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -71,3 +72,115 @@ func TestDiffGroupedRmdirWithLeafsOnly(t *testing.T) { assert.Len(t, out, 1) assert.ElementsMatch(t, d.rmdir, out[0]) } + +func TestDiffComputationForRemovedFiles(t *testing.T) { + before := &SnapshotState{ + LocalToRemoteNames: map[string]string{ + "foo/a/b/c.py": "foo/a/b/c", + }, + RemoteToLocalNames: map[string]string{ + "foo/a/b/c": "foo/a/b/c.py", + }, + } + after := &SnapshotState{} + + expected := diff{ + delete: []string{"foo/a/b/c"}, + rmdir: []string{"foo", "foo/a", "foo/a/b"}, + mkdir: []string{}, + put: []string{}, + } + assert.Equal(t, expected, computeDiff(after, before)) +} + +func TestDiffComputationWhenRemoteNameIsChanged(t *testing.T) { + tick := time.Now() + before := &SnapshotState{ + LocalToRemoteNames: map[string]string{ + "foo/a/b/c.py": "foo/a/b/c", + }, + RemoteToLocalNames: map[string]string{ + "foo/a/b/c": "foo/a/b/c.py", + }, + LastModifiedTimes: map[string]time.Time{ + "foo/a/b/c.py": tick, + }, + } + tick = tick.Add(time.Second) + after := &SnapshotState{ + LocalToRemoteNames: map[string]string{ + "foo/a/b/c.py": "foo/a/b/c.py", + }, + RemoteToLocalNames: map[string]string{ + "foo/a/b/c.py": "foo/a/b/c.py", + }, + LastModifiedTimes: map[string]time.Time{ + "foo/a/b/c.py": tick, + }, + } + + expected := diff{ + delete: []string{"foo/a/b/c"}, + rmdir: []string{}, + mkdir: []string{}, + put: []string{"foo/a/b/c.py"}, + } + assert.Equal(t, expected, computeDiff(after, before)) +} + +func TestDiffComputationForNewFiles(t *testing.T) { + after := &SnapshotState{ + LocalToRemoteNames: map[string]string{ + "foo/a/b/c.py": "foo/a/b/c", + }, + RemoteToLocalNames: map[string]string{ + "foo/a/b/c": "foo/a/b/c.py", + }, + LastModifiedTimes: map[string]time.Time{ + "foo/a/b/c.py": time.Now(), + }, + } + + expected := diff{ + delete: []string{}, + rmdir: []string{}, + mkdir: []string{"foo", "foo/a", "foo/a/b"}, + put: []string{"foo/a/b/c.py"}, + } + assert.Equal(t, expected, computeDiff(after, &SnapshotState{})) +} + +func TestDiffComputationForUpdatedFiles(t *testing.T) { + tick := time.Now() + before := &SnapshotState{ + LocalToRemoteNames: map[string]string{ + "foo/a/b/c": "foo/a/b/c", + }, + RemoteToLocalNames: map[string]string{ + "foo/a/b/c": "foo/a/b/c", + }, + LastModifiedTimes: map[string]time.Time{ + "foo/a/b/c": tick, + }, + } + tick = tick.Add(time.Second) + after := &SnapshotState{ + LocalToRemoteNames: map[string]string{ + "foo/a/b/c": "foo/a/b/c", + }, + RemoteToLocalNames: map[string]string{ + "foo/a/b/c": "foo/a/b/c", + }, + LastModifiedTimes: map[string]time.Time{ + "foo/a/b/c": tick, + }, + } + + expected := diff{ + delete: []string{}, + rmdir: []string{}, + mkdir: []string{}, + put: []string{"foo/a/b/c"}, + } + assert.Equal(t, expected, computeDiff(after, before)) +} diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index 1680f046..7e2130e9 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "path/filepath" - "strings" "time" "crypto/md5" @@ -14,8 +13,6 @@ import ( "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/log" - "github.com/databricks/cli/libs/notebook" - "golang.org/x/exp/maps" ) // Bump it up every time a potentially breaking change is made to the snapshot schema @@ -51,19 +48,7 @@ type Snapshot struct { // Path in workspace for project repo RemotePath string `json:"remote_path"` - // Map of all files present in the remote repo with the: - // key: relative file path from project root - // value: last time the remote instance of this file was updated - LastUpdatedTimes map[string]time.Time `json:"last_modified_times"` - - // This map maps local file names to their remote names - // eg. notebook named "foo.py" locally would be stored as "foo", thus this - // map will contain an entry "foo.py" -> "foo" - LocalToRemoteNames map[string]string `json:"local_to_remote_names"` - - // Inverse of localToRemoteNames. Together the form a bijective mapping (ie - // there is a 1:1 unique mapping between local and remote name) - RemoteToLocalNames map[string]string `json:"remote_to_local_names"` + *SnapshotState } const syncSnapshotDirName = "sync-snapshots" @@ -99,12 +84,14 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { SnapshotPath: path, New: true, - Version: LatestSnapshotVersion, - Host: opts.Host, - RemotePath: opts.RemotePath, - LastUpdatedTimes: make(map[string]time.Time), - LocalToRemoteNames: make(map[string]string), - RemoteToLocalNames: make(map[string]string), + Version: LatestSnapshotVersion, + Host: opts.Host, + RemotePath: opts.RemotePath, + SnapshotState: &SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + }, }, nil } @@ -173,109 +160,22 @@ func loadOrNewSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error return snapshot, nil } -func (s *Snapshot) diff(ctx context.Context, all []fileset.File) (change diff, err error) { - lastModifiedTimes := s.LastUpdatedTimes - remoteToLocalNames := s.RemoteToLocalNames - localToRemoteNames := s.LocalToRemoteNames - - // set of files currently present in the local file system and tracked by git - localFileSet := map[string]struct{}{} - for _, f := range all { - localFileSet[f.Relative] = struct{}{} +func (s *Snapshot) diff(ctx context.Context, all []fileset.File) (diff, error) { + targetState, err := NewSnapshotState(all) + if err != nil { + return diff{}, fmt.Errorf("error while computing new sync state: %w", err) } - // Capture both previous and current set of files. - previousFiles := maps.Keys(lastModifiedTimes) - currentFiles := maps.Keys(localFileSet) - - // Build directory sets to figure out which directories to create and which to remove. - previousDirectories := MakeDirSet(previousFiles) - currentDirectories := MakeDirSet(currentFiles) - - // Create new directories; remove stale directories. - change.mkdir = currentDirectories.Remove(previousDirectories).Slice() - change.rmdir = previousDirectories.Remove(currentDirectories).Slice() - - for _, f := range all { - // get current modified timestamp - modified := f.Modified() - lastSeenModified, seen := lastModifiedTimes[f.Relative] - - if !seen || modified.After(lastSeenModified) { - lastModifiedTimes[f.Relative] = modified - - // get file metadata about whether it's a notebook - isNotebook, _, err := notebook.Detect(f.Absolute) - if err != nil { - // Ignore this file if we're unable to determine the notebook type. - // Trying to upload such a file to the workspace would fail anyway. - log.Warnf(ctx, err.Error()) - continue - } - - // change separators to '/' for file paths in remote store - unixFileName := filepath.ToSlash(f.Relative) - - // put file in databricks workspace - change.put = append(change.put, unixFileName) - - // Strip extension for notebooks. - remoteName := unixFileName - if isNotebook { - ext := filepath.Ext(remoteName) - remoteName = strings.TrimSuffix(remoteName, ext) - } - - // If the remote handle of a file changes, we want to delete the old - // remote version of that file to avoid duplicates. - // This can happen if a python notebook is converted to a python - // script or vice versa - oldRemoteName, ok := localToRemoteNames[f.Relative] - if ok && oldRemoteName != remoteName { - change.delete = append(change.delete, oldRemoteName) - delete(remoteToLocalNames, oldRemoteName) - } - - // We cannot allow two local files in the project to point to the same - // remote path - prevLocalName, ok := remoteToLocalNames[remoteName] - _, prevLocalFileExists := localFileSet[prevLocalName] - if ok && prevLocalName != f.Relative && prevLocalFileExists { - return change, fmt.Errorf("both %s and %s point to the same remote file location %s. Please remove one of them from your local project", prevLocalName, f.Relative, remoteName) - } - localToRemoteNames[f.Relative] = remoteName - remoteToLocalNames[remoteName] = f.Relative - } - } - // figure out files in the snapshot.lastModifiedTimes, but not on local - // filesystem. These will be deleted - for localName := range lastModifiedTimes { - _, exists := localFileSet[localName] - if exists { - continue - } - - // TODO: https://databricks.atlassian.net/browse/DECO-429 - // Add error wrapper giving instructions like this for all errors here :) - remoteName, ok := localToRemoteNames[localName] - if !ok { - return change, fmt.Errorf("missing remote path for local path: %s. Please try syncing again after deleting .databricks/sync-snapshots dir from your project root", localName) - } - - // add them to a delete batch - change.delete = append(change.delete, remoteName) + currentState := s.SnapshotState + if err := currentState.validate(); err != nil { + return diff{}, fmt.Errorf("error parsing existing sync state: %w", err) } - // and remove them from the snapshot - for _, remoteName := range change.delete { - // we do note assert that remoteName exists in remoteToLocalNames since it - // will be missing for files with remote name changed - localName := remoteToLocalNames[remoteName] + // Compute diff to apply to get from current state to new target state. + diff := computeDiff(targetState, currentState) - delete(lastModifiedTimes, localName) - delete(remoteToLocalNames, remoteName) - delete(localToRemoteNames, localName) - } - - return + // Update state to new value. This is not persisted to the file system before + // the diff is applied successfully. + s.SnapshotState = targetState + return diff, nil } diff --git a/libs/sync/snapshot_state.go b/libs/sync/snapshot_state.go new file mode 100644 index 00000000..57506352 --- /dev/null +++ b/libs/sync/snapshot_state.go @@ -0,0 +1,114 @@ +package sync + +import ( + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/notebook" +) + +// SnapshotState keeps track of files on the local filesystem and their corresponding +// entries in WSFS. +type SnapshotState struct { + // Map of local file names to their last recorded modified time. Files found + // to have a newer mtime have their content synced to their remote version. + LastModifiedTimes map[string]time.Time `json:"last_modified_times"` + + // Map of local file names to corresponding remote names. + // For example: A notebook named "foo.py" locally would be stored as "foo" + // in WSFS, and the entry would be: {"foo.py": "foo"} + LocalToRemoteNames map[string]string `json:"local_to_remote_names"` + + // Inverse of LocalToRemoteNames. Together they form a 1:1 mapping where all + // the remote names and local names are unique. + RemoteToLocalNames map[string]string `json:"remote_to_local_names"` +} + +// Convert an array of files on the local file system to a SnapshotState representation. +func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { + fs := &SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + } + + // Expect no files to have a duplicate entry in the input array. + seen := make(map[string]fileset.File) + for _, f := range localFiles { + if _, ok := seen[f.Relative]; !ok { + seen[f.Relative] = f + } else { + return nil, fmt.Errorf("expected only one entry per file. Found duplicate entries for file: %s", f.Relative) + } + } + + // Compute the new state. + for _, f := range localFiles { + // Compute the remote name the file will have in WSFS + remoteName := filepath.ToSlash(f.Relative) + isNotebook, _, err := notebook.Detect(f.Absolute) + if err != nil { + // Ignore this file if we're unable to determine the notebook type. + // Trying to upload such a file to the workspace would fail anyway. + continue + } + if isNotebook { + ext := filepath.Ext(remoteName) + remoteName = strings.TrimSuffix(remoteName, ext) + } + + // Add the file to snapshot state + fs.LastModifiedTimes[f.Relative] = f.Modified() + if existingLocalName, ok := fs.RemoteToLocalNames[remoteName]; ok { + return nil, fmt.Errorf("both %s and %s point to the same remote file location %s. Please remove one of them from your local project", existingLocalName, f.Relative, remoteName) + } + + fs.LocalToRemoteNames[f.Relative] = remoteName + fs.RemoteToLocalNames[remoteName] = f.Relative + } + return fs, nil +} + +// Consistency checks for the sync files state representation. These are invariants +// that downstream code for computing changes to apply to WSFS depends on. +// +// Invariants: +// 1. All entries in LastModifiedTimes have a corresponding entry in LocalToRemoteNames +// and vice versa. +// 2. LocalToRemoteNames and RemoteToLocalNames together form a 1:1 mapping of +// local <-> remote file names. +func (fs *SnapshotState) validate() error { + // Validate invariant (1) + for localName := range fs.LastModifiedTimes { + if _, ok := fs.LocalToRemoteNames[localName]; !ok { + return fmt.Errorf("invalid sync state representation. Local file %s is missing the corresponding remote file", localName) + } + } + for localName := range fs.LocalToRemoteNames { + if _, ok := fs.LastModifiedTimes[localName]; !ok { + return fmt.Errorf("invalid sync state representation. Local file %s is missing it's last modified time", localName) + } + } + + // Validate invariant (2) + for localName, remoteName := range fs.LocalToRemoteNames { + if _, ok := fs.RemoteToLocalNames[remoteName]; !ok { + return fmt.Errorf("invalid sync state representation. Remote file %s is missing the corresponding local file", remoteName) + } + if fs.RemoteToLocalNames[remoteName] != localName { + return fmt.Errorf("invalid sync state representation. Inconsistent values found. Local file %s points to %s. Remote file %s points to %s", localName, remoteName, remoteName, fs.RemoteToLocalNames[remoteName]) + } + } + for remoteName, localName := range fs.RemoteToLocalNames { + if _, ok := fs.LocalToRemoteNames[localName]; !ok { + return fmt.Errorf("invalid sync state representation. local file %s is missing the corresponding remote file", localName) + } + if fs.LocalToRemoteNames[localName] != remoteName { + return fmt.Errorf("invalid sync state representation. Inconsistent values found. Remote file %s points to %s. Local file %s points to %s", remoteName, localName, localName, fs.LocalToRemoteNames[localName]) + } + } + return nil +} diff --git a/libs/sync/snapshot_state_test.go b/libs/sync/snapshot_state_test.go new file mode 100644 index 00000000..bfcdbef6 --- /dev/null +++ b/libs/sync/snapshot_state_test.go @@ -0,0 +1,116 @@ +package sync + +import ( + "testing" + "time" + + "github.com/databricks/cli/libs/fileset" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSnapshotState(t *testing.T) { + fileSet := fileset.New("./testdata/sync-fileset") + files, err := fileSet.All() + require.NoError(t, err) + + // Assert initial contents of the fileset + assert.Len(t, files, 4) + assert.Equal(t, "invalid-nb.ipynb", files[0].Name()) + assert.Equal(t, "my-nb.py", files[1].Name()) + assert.Equal(t, "my-script.py", files[2].Name()) + assert.Equal(t, "valid-nb.ipynb", files[3].Name()) + + // Assert snapshot state generated from the fileset. Note that the invalid notebook + // has been ignored. + s, err := NewSnapshotState(files) + require.NoError(t, err) + assertKeysOfMap(t, s.LastModifiedTimes, []string{"valid-nb.ipynb", "my-nb.py", "my-script.py"}) + assertKeysOfMap(t, s.LocalToRemoteNames, []string{"valid-nb.ipynb", "my-nb.py", "my-script.py"}) + assertKeysOfMap(t, s.RemoteToLocalNames, []string{"valid-nb", "my-nb", "my-script.py"}) + assert.NoError(t, s.validate()) +} + +func TestSnapshotStateValidationErrors(t *testing.T) { + s := &SnapshotState{ + LastModifiedTimes: map[string]time.Time{ + "a": time.Now(), + }, + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + } + assert.EqualError(t, s.validate(), "invalid sync state representation. Local file a is missing the corresponding remote file") + + s = &SnapshotState{ + LastModifiedTimes: map[string]time.Time{}, + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: map[string]string{ + "a": "b", + }, + } + assert.EqualError(t, s.validate(), "invalid sync state representation. local file b is missing the corresponding remote file") + + s = &SnapshotState{ + LastModifiedTimes: map[string]time.Time{ + "a": time.Now(), + }, + LocalToRemoteNames: map[string]string{ + "a": "b", + }, + RemoteToLocalNames: make(map[string]string), + } + assert.EqualError(t, s.validate(), "invalid sync state representation. Remote file b is missing the corresponding local file") + + s = &SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: map[string]string{ + "a": "b", + }, + RemoteToLocalNames: map[string]string{ + "b": "a", + }, + } + assert.EqualError(t, s.validate(), "invalid sync state representation. Local file a is missing it's last modified time") + + s = &SnapshotState{ + LastModifiedTimes: map[string]time.Time{ + "a": time.Now(), + }, + LocalToRemoteNames: map[string]string{ + "a": "b", + }, + RemoteToLocalNames: map[string]string{ + "b": "b", + }, + } + assert.EqualError(t, s.validate(), "invalid sync state representation. Inconsistent values found. Local file a points to b. Remote file b points to b") + + s = &SnapshotState{ + LastModifiedTimes: map[string]time.Time{ + "a": time.Now(), + "c": time.Now(), + }, + LocalToRemoteNames: map[string]string{ + "a": "b", + "c": "b", + }, + RemoteToLocalNames: map[string]string{ + "b": "a", + }, + } + assert.EqualError(t, s.validate(), "invalid sync state representation. Inconsistent values found. Local file c points to b. Remote file b points to a") + + s = &SnapshotState{ + LastModifiedTimes: map[string]time.Time{ + "a": time.Now(), + }, + LocalToRemoteNames: map[string]string{ + "a": "b", + }, + RemoteToLocalNames: map[string]string{ + "b": "a", + "c": "a", + }, + } + assert.EqualError(t, s.validate(), "invalid sync state representation. Inconsistent values found. Remote file c points to a. Local file a points to b") +} diff --git a/libs/sync/snapshot_test.go b/libs/sync/snapshot_test.go index c2e8f6b8..d6358d4a 100644 --- a/libs/sync/snapshot_test.go +++ b/libs/sync/snapshot_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func assertKeysOfMap(t *testing.T, m map[string]time.Time, expectedKeys []string) { +func assertKeysOfMap[T any](t *testing.T, m map[string]T, expectedKeys []string) { keys := make([]string, len(m)) i := 0 for k := range m { @@ -32,9 +32,11 @@ func TestDiff(t *testing.T) { fileSet, err := git.NewFileSet(projectDir) require.NoError(t, err) state := Snapshot{ - LastUpdatedTimes: make(map[string]time.Time), - LocalToRemoteNames: make(map[string]string), - RemoteToLocalNames: make(map[string]string), + SnapshotState: &SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + }, } f1 := testfile.CreateFile(t, filepath.Join(projectDir, "hello.txt")) @@ -52,7 +54,7 @@ func TestDiff(t *testing.T) { assert.Len(t, change.put, 2) assert.Contains(t, change.put, "hello.txt") assert.Contains(t, change.put, "world.txt") - assertKeysOfMap(t, state.LastUpdatedTimes, []string{"hello.txt", "world.txt"}) + assertKeysOfMap(t, state.LastModifiedTimes, []string{"hello.txt", "world.txt"}) assert.Equal(t, map[string]string{"hello.txt": "hello.txt", "world.txt": "world.txt"}, state.LocalToRemoteNames) assert.Equal(t, map[string]string{"hello.txt": "hello.txt", "world.txt": "world.txt"}, state.RemoteToLocalNames) @@ -67,7 +69,7 @@ func TestDiff(t *testing.T) { assert.Len(t, change.delete, 0) assert.Len(t, change.put, 1) assert.Contains(t, change.put, "world.txt") - assertKeysOfMap(t, state.LastUpdatedTimes, []string{"hello.txt", "world.txt"}) + assertKeysOfMap(t, state.LastModifiedTimes, []string{"hello.txt", "world.txt"}) assert.Equal(t, map[string]string{"hello.txt": "hello.txt", "world.txt": "world.txt"}, state.LocalToRemoteNames) assert.Equal(t, map[string]string{"hello.txt": "hello.txt", "world.txt": "world.txt"}, state.RemoteToLocalNames) @@ -81,7 +83,7 @@ func TestDiff(t *testing.T) { assert.Len(t, change.delete, 1) assert.Len(t, change.put, 0) assert.Contains(t, change.delete, "hello.txt") - assertKeysOfMap(t, state.LastUpdatedTimes, []string{"world.txt"}) + assertKeysOfMap(t, state.LastModifiedTimes, []string{"world.txt"}) assert.Equal(t, map[string]string{"world.txt": "world.txt"}, state.LocalToRemoteNames) assert.Equal(t, map[string]string{"world.txt": "world.txt"}, state.RemoteToLocalNames) } @@ -94,9 +96,11 @@ func TestSymlinkDiff(t *testing.T) { fileSet, err := git.NewFileSet(projectDir) require.NoError(t, err) state := Snapshot{ - LastUpdatedTimes: make(map[string]time.Time), - LocalToRemoteNames: make(map[string]string), - RemoteToLocalNames: make(map[string]string), + SnapshotState: &SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + }, } err = os.Mkdir(filepath.Join(projectDir, "foo"), os.ModePerm) @@ -123,9 +127,11 @@ func TestFolderDiff(t *testing.T) { fileSet, err := git.NewFileSet(projectDir) require.NoError(t, err) state := Snapshot{ - LastUpdatedTimes: make(map[string]time.Time), - LocalToRemoteNames: make(map[string]string), - RemoteToLocalNames: make(map[string]string), + SnapshotState: &SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + }, } err = os.Mkdir(filepath.Join(projectDir, "foo"), os.ModePerm) @@ -166,9 +172,11 @@ func TestPythonNotebookDiff(t *testing.T) { fileSet, err := git.NewFileSet(projectDir) require.NoError(t, err) state := Snapshot{ - LastUpdatedTimes: make(map[string]time.Time), - LocalToRemoteNames: make(map[string]string), - RemoteToLocalNames: make(map[string]string), + SnapshotState: &SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + }, } foo := testfile.CreateFile(t, filepath.Join(projectDir, "foo.py")) @@ -183,7 +191,7 @@ func TestPythonNotebookDiff(t *testing.T) { assert.Len(t, change.delete, 0) assert.Len(t, change.put, 1) assert.Contains(t, change.put, "foo.py") - assertKeysOfMap(t, state.LastUpdatedTimes, []string{"foo.py"}) + assertKeysOfMap(t, state.LastModifiedTimes, []string{"foo.py"}) assert.Equal(t, map[string]string{"foo.py": "foo"}, state.LocalToRemoteNames) assert.Equal(t, map[string]string{"foo": "foo.py"}, state.RemoteToLocalNames) @@ -198,7 +206,7 @@ func TestPythonNotebookDiff(t *testing.T) { assert.Len(t, change.put, 1) assert.Contains(t, change.put, "foo.py") assert.Contains(t, change.delete, "foo") - assertKeysOfMap(t, state.LastUpdatedTimes, []string{"foo.py"}) + assertKeysOfMap(t, state.LastModifiedTimes, []string{"foo.py"}) assert.Equal(t, map[string]string{"foo.py": "foo.py"}, state.LocalToRemoteNames) assert.Equal(t, map[string]string{"foo.py": "foo.py"}, state.RemoteToLocalNames) @@ -212,7 +220,7 @@ func TestPythonNotebookDiff(t *testing.T) { assert.Len(t, change.put, 1) assert.Contains(t, change.put, "foo.py") assert.Contains(t, change.delete, "foo.py") - assertKeysOfMap(t, state.LastUpdatedTimes, []string{"foo.py"}) + assertKeysOfMap(t, state.LastModifiedTimes, []string{"foo.py"}) assert.Equal(t, map[string]string{"foo.py": "foo"}, state.LocalToRemoteNames) assert.Equal(t, map[string]string{"foo": "foo.py"}, state.RemoteToLocalNames) @@ -226,7 +234,7 @@ func TestPythonNotebookDiff(t *testing.T) { assert.Len(t, change.delete, 1) assert.Len(t, change.put, 0) assert.Contains(t, change.delete, "foo") - assert.Len(t, state.LastUpdatedTimes, 0) + assert.Len(t, state.LastModifiedTimes, 0) assert.Equal(t, map[string]string{}, state.LocalToRemoteNames) assert.Equal(t, map[string]string{}, state.RemoteToLocalNames) } @@ -239,9 +247,11 @@ func TestErrorWhenIdenticalRemoteName(t *testing.T) { fileSet, err := git.NewFileSet(projectDir) require.NoError(t, err) state := Snapshot{ - LastUpdatedTimes: make(map[string]time.Time), - LocalToRemoteNames: make(map[string]string), - RemoteToLocalNames: make(map[string]string), + SnapshotState: &SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + }, } // upload should work since they point to different destinations @@ -274,9 +284,11 @@ func TestNoErrorRenameWithIdenticalRemoteName(t *testing.T) { fileSet, err := git.NewFileSet(projectDir) require.NoError(t, err) state := Snapshot{ - LastUpdatedTimes: make(map[string]time.Time), - LocalToRemoteNames: make(map[string]string), - RemoteToLocalNames: make(map[string]string), + SnapshotState: &SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + }, } // upload should work since they point to different destinations @@ -321,7 +333,7 @@ func TestNewSnapshotDefaults(t *testing.T) { assert.Equal(t, LatestSnapshotVersion, snapshot.Version) assert.Equal(t, opts.RemotePath, snapshot.RemotePath) assert.Equal(t, opts.Host, snapshot.Host) - assert.Empty(t, snapshot.LastUpdatedTimes) + assert.Empty(t, snapshot.LastModifiedTimes) assert.Empty(t, snapshot.RemoteToLocalNames) assert.Empty(t, snapshot.LocalToRemoteNames) } diff --git a/libs/sync/testdata/sync-fileset/invalid-nb.ipynb b/libs/sync/testdata/sync-fileset/invalid-nb.ipynb new file mode 100644 index 00000000..e69de29b diff --git a/libs/sync/testdata/sync-fileset/my-nb.py b/libs/sync/testdata/sync-fileset/my-nb.py new file mode 100644 index 00000000..7bdf929b --- /dev/null +++ b/libs/sync/testdata/sync-fileset/my-nb.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("2") diff --git a/libs/sync/testdata/sync-fileset/my-script.py b/libs/sync/testdata/sync-fileset/my-script.py new file mode 100644 index 00000000..d2297561 --- /dev/null +++ b/libs/sync/testdata/sync-fileset/my-script.py @@ -0,0 +1 @@ +print("1") diff --git a/libs/sync/testdata/sync-fileset/valid-nb.ipynb b/libs/sync/testdata/sync-fileset/valid-nb.ipynb new file mode 100644 index 00000000..f36e3589 --- /dev/null +++ b/libs/sync/testdata/sync-fileset/valid-nb.ipynb @@ -0,0 +1,21 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"3\")" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 9b6a8471786a2d1c7d0a3911dfba00d7e981ab2d Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 3 Oct 2023 15:59:28 +0200 Subject: [PATCH 129/310] Mark artifacts properties as optional (#834) ## Changes Mark artifacts properties as optional Fixes #816 --- bundle/config/artifact.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index 755116eb..63ab6c48 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -37,12 +37,12 @@ type Artifact struct { // The local path to the directory with a root of artifact, // for example, where setup.py is for Python projects - Path string `json:"path"` + Path string `json:"path,omitempty"` // The relative or absolute path to the built artifact files // (Python wheel, Java jar and etc) itself - Files []ArtifactFile `json:"files"` - BuildCommand string `json:"build"` + Files []ArtifactFile `json:"files,omitempty"` + BuildCommand string `json:"build,omitempty"` paths.Paths } From aa54a8665a66ddd397a4a357c7099ffe3906f38e Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 4 Oct 2023 15:23:13 +0200 Subject: [PATCH 130/310] Added support for glob patterns in pipeline libraries section (#833) ## Changes Now it's possible to specify glob pattern in pipeline libraries section and DAB will add all matched files as libraries ``` pipelines: dummy: name: " DLT with Python files" target: "dlt_python_files" libraries: - file: path: ./*.py ``` ## Tests Added unit test --- .../mutator/expand_pipeline_glob_paths.go | 89 ++++++++++ .../expand_pipeline_glob_paths_test.go | 154 ++++++++++++++++++ bundle/libraries/libraries.go | 4 + bundle/phases/initialize.go | 1 + 4 files changed, 248 insertions(+) create mode 100644 bundle/config/mutator/expand_pipeline_glob_paths.go create mode 100644 bundle/config/mutator/expand_pipeline_glob_paths_test.go diff --git a/bundle/config/mutator/expand_pipeline_glob_paths.go b/bundle/config/mutator/expand_pipeline_glob_paths.go new file mode 100644 index 00000000..5fa203a0 --- /dev/null +++ b/bundle/config/mutator/expand_pipeline_glob_paths.go @@ -0,0 +1,89 @@ +package mutator + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/databricks-sdk-go/service/pipelines" +) + +type expandPipelineGlobPaths struct{} + +func ExpandPipelineGlobPaths() bundle.Mutator { + return &expandPipelineGlobPaths{} +} + +func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) error { + for key, pipeline := range b.Config.Resources.Pipelines { + dir, err := pipeline.ConfigFileDirectory() + if err != nil { + return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) + } + + expandedLibraries := make([]pipelines.PipelineLibrary, 0) + for i := 0; i < len(pipeline.Libraries); i++ { + + library := &pipeline.Libraries[i] + path := getGlobPatternToExpand(library) + if path == "" || !libraries.IsLocalPath(path) { + expandedLibraries = append(expandedLibraries, *library) + continue + } + + matches, err := filepath.Glob(filepath.Join(dir, path)) + if err != nil { + return err + } + + for _, match := range matches { + m, err := filepath.Rel(dir, match) + if err != nil { + return err + } + expandedLibraries = append(expandedLibraries, cloneWithPath(library, m)) + } + } + pipeline.Libraries = expandedLibraries + } + + return nil +} + +func getGlobPatternToExpand(library *pipelines.PipelineLibrary) string { + if library.File != nil { + return library.File.Path + } + + if library.Notebook != nil { + return library.Notebook.Path + } + + return "" +} + +func cloneWithPath(library *pipelines.PipelineLibrary, path string) pipelines.PipelineLibrary { + if library.File != nil { + return pipelines.PipelineLibrary{ + File: &pipelines.FileLibrary{ + Path: path, + }, + } + } + + if library.Notebook != nil { + return pipelines.PipelineLibrary{ + Notebook: &pipelines.NotebookLibrary{ + Path: path, + }, + } + } + + return pipelines.PipelineLibrary{} +} + +func (*expandPipelineGlobPaths) Name() string { + return "ExpandPipelineGlobPaths" +} diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go new file mode 100644 index 00000000..ef99e716 --- /dev/null +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -0,0 +1,154 @@ +package mutator + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/require" +) + +func touchEmptyFile(t *testing.T, path string) { + err := os.MkdirAll(filepath.Dir(path), 0700) + require.NoError(t, err) + f, err := os.Create(path) + require.NoError(t, err) + f.Close() +} + +func TestExpandGlobPathsInPipelines(t *testing.T) { + dir := t.TempDir() + + touchEmptyFile(t, filepath.Join(dir, "test1.ipynb")) + touchEmptyFile(t, filepath.Join(dir, "test/test2.ipynb")) + touchEmptyFile(t, filepath.Join(dir, "test/test3.ipynb")) + touchEmptyFile(t, filepath.Join(dir, "test1.jar")) + touchEmptyFile(t, filepath.Join(dir, "test/test2.jar")) + touchEmptyFile(t, filepath.Join(dir, "test/test3.jar")) + touchEmptyFile(t, filepath.Join(dir, "test1.py")) + touchEmptyFile(t, filepath.Join(dir, "test/test2.py")) + touchEmptyFile(t, filepath.Join(dir, "test/test3.py")) + + b := &bundle.Bundle{ + Config: config.Root{ + Path: dir, + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "pipeline": { + Paths: paths.Paths{ + ConfigFilePath: filepath.Join(dir, "resource.yml"), + }, + PipelineSpec: &pipelines.PipelineSpec{ + Libraries: []pipelines.PipelineLibrary{ + { + Notebook: &pipelines.NotebookLibrary{ + Path: "./**/*.ipynb", + }, + }, + { + Jar: "./*.jar", + }, + { + File: &pipelines.FileLibrary{ + Path: "./**/*.py", + }, + }, + { + Maven: &compute.MavenLibrary{ + Coordinates: "org.jsoup:jsoup:1.7.2", + }, + }, + { + Notebook: &pipelines.NotebookLibrary{ + Path: "./test1.ipynb", + }, + }, + { + Notebook: &pipelines.NotebookLibrary{ + Path: "/Workspace/Users/me@company.com/test.ipynb", + }, + }, + { + Notebook: &pipelines.NotebookLibrary{ + Path: "dbfs:/me@company.com/test.ipynb", + }, + }, + }, + }, + }, + }, + }, + }, + } + + m := ExpandPipelineGlobPaths() + err := bundle.Apply(context.Background(), b, m) + require.NoError(t, err) + + libraries := b.Config.Resources.Pipelines["pipeline"].Libraries + require.Len(t, libraries, 9) + + // Making sure glob patterns are expanded correctly + require.True(t, containsNotebook(libraries, filepath.Join("test", "test2.ipynb"))) + require.True(t, containsNotebook(libraries, filepath.Join("test", "test3.ipynb"))) + require.True(t, containsFile(libraries, filepath.Join("test", "test2.py"))) + require.True(t, containsFile(libraries, filepath.Join("test", "test3.py"))) + + // Making sure exact file references work as well + require.True(t, containsNotebook(libraries, "test1.ipynb")) + + // Making sure absolute pass to remote FS file references work as well + require.True(t, containsNotebook(libraries, "/Workspace/Users/me@company.com/test.ipynb")) + require.True(t, containsNotebook(libraries, "dbfs:/me@company.com/test.ipynb")) + + // Making sure other libraries are not replaced + require.True(t, containsJar(libraries, "./*.jar")) + require.True(t, containsMaven(libraries, "org.jsoup:jsoup:1.7.2")) +} + +func containsNotebook(libraries []pipelines.PipelineLibrary, path string) bool { + for _, l := range libraries { + if l.Notebook != nil && l.Notebook.Path == path { + return true + } + } + + return false +} + +func containsJar(libraries []pipelines.PipelineLibrary, path string) bool { + for _, l := range libraries { + if l.Jar == path { + return true + } + } + + return false +} + +func containsMaven(libraries []pipelines.PipelineLibrary, coordinates string) bool { + for _, l := range libraries { + if l.Maven != nil && l.Maven.Coordinates == coordinates { + return true + } + } + + return false +} + +func containsFile(libraries []pipelines.PipelineLibrary, path string) bool { + for _, l := range libraries { + if l.File != nil && l.File.Path == path { + return true + } + } + + return false +} diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 8e2e504c..f973642f 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -161,6 +161,10 @@ func isLocalLibrary(library *compute.Library) bool { return false } + return IsLocalPath(path) +} + +func IsLocalPath(path string) bool { if isExplicitFileScheme(path) { return true } diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 818886db..e03a6336 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -31,6 +31,7 @@ func Initialize() bundle.Mutator { ), mutator.OverrideCompute(), mutator.ProcessTargetMode(), + mutator.ExpandPipelineGlobPaths(), mutator.TranslatePaths(), python.WrapperWarning(), terraform.Initialize(), From 706393b64f6e026ff011aadebb0923be530c0649 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 4 Oct 2023 16:03:37 +0200 Subject: [PATCH 131/310] Create a release PR in setup-cli repo on tag push (#827) ## Changes Create a release PR in setup-cli repo on tag push --- .github/workflows/release.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index cbab15ec..8d09c963 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,3 +29,27 @@ jobs: args: release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + create-release-prs: + needs: goreleaser + runs-on: ubuntu-latest + steps: + - name: Set VERSION variable from tag + run: | + VERSION=${{ github.ref_name }} + echo "VERSION=${VERSION:1}" >> $GITHUB_ENV + + - name: Update setup-cli + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.DECO_GITHUB_TOKEN }} + script: | + await github.rest.actions.createWorkflowDispatch({ + owner: 'databricks', + repo: 'setup-cli', + workflow_id: 'release-pr.yml', + ref: 'main', + inputs: { + version: "${{ env.VERSION }}", + } + }); From 2ce4f74f9c827b47d9686b719065dc87af5ff231 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 4 Oct 2023 16:34:59 +0200 Subject: [PATCH 132/310] Release v0.207.0 (#839) CLI: * Refactor change computation for sync ([#785](https://github.com/databricks/cli/pull/785)). Bundles: * Allow digits in the generated short name ([#820](https://github.com/databricks/cli/pull/820)). * Emit an error when incompatible all purpose cluster used with Python wheel tasks ([#823](https://github.com/databricks/cli/pull/823)). * Use normalized short name for tag value in development mode ([#821](https://github.com/databricks/cli/pull/821)). * Added `python.DetectInterpreters` and other utils ([#805](https://github.com/databricks/cli/pull/805)). * Mark artifacts properties as optional ([#834](https://github.com/databricks/cli/pull/834)). * Added support for glob patterns in pipeline libraries section ([#833](https://github.com/databricks/cli/pull/833)). Internal: * Run tests to verify backend tag validation behavior ([#814](https://github.com/databricks/cli/pull/814)). * Library to validate and normalize cloud specific tags ([#819](https://github.com/databricks/cli/pull/819)). * Added test to submit and run various Python tasks on multiple DBR versions ([#806](https://github.com/databricks/cli/pull/806)). * Create a release PR in setup-cli repo on tag push ([#827](https://github.com/databricks/cli/pull/827)). API Changes: * Changed `databricks account metastore-assignments list` command to return . * Changed `databricks jobs cancel-all-runs` command with new required argument order. * Added `databricks account o-auth-published-apps` command group. * Changed `databricks serving-endpoints query` command . New request type is . * Added `databricks serving-endpoints patch` command. * Added `databricks credentials-manager` command group. * Added `databricks settings` command group. * Changed `databricks clean-rooms list` command to require request of . * Changed `databricks statement-execution execute-statement` command with new required argument order. OpenAPI commit bcbf6e851e3d82fd910940910dd31c10c059746c (2023-10-02) Dependency updates: * Bump github.com/google/uuid from 1.3.0 to 1.3.1 ([#825](https://github.com/databricks/cli/pull/825)). * Updated Go SDK to 0.22.0 ([#831](https://github.com/databricks/cli/pull/831)). --- CHANGELOG.md | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17e88159..5b4740c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,40 @@ # Version changelog +## 0.207.0 + +CLI: + * Refactor change computation for sync ([#785](https://github.com/databricks/cli/pull/785)). + +Bundles: + * Allow digits in the generated short name ([#820](https://github.com/databricks/cli/pull/820)). + * Emit an error when incompatible all purpose cluster used with Python wheel tasks ([#823](https://github.com/databricks/cli/pull/823)). + * Use normalized short name for tag value in development mode ([#821](https://github.com/databricks/cli/pull/821)). + * Added `python.DetectInterpreters` and other utils ([#805](https://github.com/databricks/cli/pull/805)). + * Mark artifacts properties as optional ([#834](https://github.com/databricks/cli/pull/834)). + * Added support for glob patterns in pipeline libraries section ([#833](https://github.com/databricks/cli/pull/833)). + +Internal: + * Run tests to verify backend tag validation behavior ([#814](https://github.com/databricks/cli/pull/814)). + * Library to validate and normalize cloud specific tags ([#819](https://github.com/databricks/cli/pull/819)). + * Added test to submit and run various Python tasks on multiple DBR versions ([#806](https://github.com/databricks/cli/pull/806)). + * Create a release PR in setup-cli repo on tag push ([#827](https://github.com/databricks/cli/pull/827)). + +API Changes: + * Changed `databricks account metastore-assignments list` command to return . + * Changed `databricks jobs cancel-all-runs` command with new required argument order. + * Added `databricks account o-auth-published-apps` command group. + * Changed `databricks serving-endpoints query` command . New request type is . + * Added `databricks serving-endpoints patch` command. + * Added `databricks credentials-manager` command group. + * Added `databricks settings` command group. + * Changed `databricks clean-rooms list` command to require request of . + * Changed `databricks statement-execution execute-statement` command with new required argument order. + +OpenAPI commit bcbf6e851e3d82fd910940910dd31c10c059746c (2023-10-02) +Dependency updates: + * Bump github.com/google/uuid from 1.3.0 to 1.3.1 ([#825](https://github.com/databricks/cli/pull/825)). + * Updated Go SDK to 0.22.0 ([#831](https://github.com/databricks/cli/pull/831)). + ## 0.206.0 Bundles: From 089458413223695714f5b3f453ff644a3384b92e Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 4 Oct 2023 17:27:09 +0200 Subject: [PATCH 133/310] Minor template tweaks (#832) ## Changes Minor tweaks to the template. --- .../templates/default-python/databricks_template_schema.json | 2 +- .../template/templates/default-python/template/__preamble.tmpl | 1 + .../resources/{{.project_name}}_pipeline.yml.tmpl | 3 --- .../template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl | 1 - 4 files changed, 2 insertions(+), 5 deletions(-) diff --git a/libs/template/templates/default-python/databricks_template_schema.json b/libs/template/templates/default-python/databricks_template_schema.json index 8784841e..59697a61 100644 --- a/libs/template/templates/default-python/databricks_template_schema.json +++ b/libs/template/templates/default-python/databricks_template_schema.json @@ -26,7 +26,7 @@ "type": "string", "default": "yes", "enum": ["yes", "no"], - "description": "Include a stub (sample) Python package 'my_project/src'", + "description": "Include a stub (sample) Python package in '{{.project_name}}/src'", "order": 4 } } diff --git a/libs/template/templates/default-python/template/__preamble.tmpl b/libs/template/templates/default-python/template/__preamble.tmpl index 54732493..a919a269 100644 --- a/libs/template/templates/default-python/template/__preamble.tmpl +++ b/libs/template/templates/default-python/template/__preamble.tmpl @@ -13,6 +13,7 @@ This file only template directives; it is skipped for the actual output. {{skip "{{.project_name}}/tests/main_test.py"}} {{skip "{{.project_name}}/setup.py"}} {{skip "{{.project_name}}/pytest.ini"}} + {{skip "{{.project_name}}/requirements-dev.txt"}} {{end}} {{if $notDLT}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl index 4b8f74d1..498604f6 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl @@ -7,6 +7,3 @@ resources: libraries: - notebook: path: ../src/dlt_pipeline.ipynb - - configuration: - bundle.sourcePath: /Workspace/${workspace.file_path}/src diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl index 4f50294f..8c85e97e 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl @@ -35,7 +35,6 @@ "# Import DLT and src/{{.project_name}}\n", "import dlt\n", "import sys\n", - "sys.path.append(spark.conf.get(\"bundle.sourcePath\", \".\"))\n", "from pyspark.sql.functions import expr\n", "from {{.project_name}} import main" {{else}} From caade735e32d95ff941adb2ff95371b5d8d30fa3 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 5 Oct 2023 14:48:59 +0200 Subject: [PATCH 134/310] Improve `workspace import` command by allowing references to local files for content (#793) ## Changes This PR makes a few really important QOL improvements to the `workspace import` command. They are: 1. Adds the `--file` flag, which allows a user to specify a file to read the content from. 2. Wraps the most common error first time users of this command will run into with a helpful hint. 3. Minor changes to the command Use string changing `PATH` -> `TARGET_PATH` ## Tests Integration tests. The newly added integration tests that check the --file flag works as expected for both `SOURCE` and `AUTO` format. Skipped the other formats because the API behaviour for them is straightforward. --- cmd/workspace/workspace/overrides.go | 44 +++++++++++++++ internal/workspace_test.go | 80 ++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+) diff --git a/cmd/workspace/workspace/overrides.go b/cmd/workspace/workspace/overrides.go index 9cae5bef..5c0692d5 100644 --- a/cmd/workspace/workspace/overrides.go +++ b/cmd/workspace/workspace/overrides.go @@ -1,7 +1,15 @@ package workspace import ( + "encoding/base64" + "errors" + "fmt" + "net/http" + "os" + "strings" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/spf13/cobra" ) @@ -19,7 +27,43 @@ func exportOverride(exportCmd *cobra.Command, exportReq *workspace.ExportRequest exportCmd.Annotations["template"] = `{{.Content | b64_decode}}` } +// Give better errors / hints for common API errors. +func wrapImportAPIErrors(err error, importReq *workspace.Import) error { + apiErr := &apierr.APIError{} + if !errors.As(err, &apiErr) { + return err + } + isFormatSource := importReq.Format == workspace.ImportFormatSource || importReq.Format == "" + if isFormatSource && apiErr.StatusCode == http.StatusBadRequest && + strings.Contains(apiErr.Message, "The zip file may not be valid or may be an unsupported version.") { + return fmt.Errorf("%w Hint: Objects imported using format=SOURCE are expected to be zip encoded databricks source notebook(s) by default. Please specify a language using the --language flag if you are trying to import a single uncompressed notebook", err) + } + return err +} + +func importOverride(importCmd *cobra.Command, importReq *workspace.Import) { + var filePath string + importCmd.Use = "import TARGET_PATH" + importCmd.Flags().StringVar(&filePath, "file", "", `Path of local file to import`) + importCmd.MarkFlagsMutuallyExclusive("content", "file") + + originalRunE := importCmd.RunE + importCmd.RunE = func(cmd *cobra.Command, args []string) error { + if filePath != "" { + b, err := os.ReadFile(filePath) + if err != nil { + return err + } + importReq.Content = base64.StdEncoding.EncodeToString(b) + } + err := originalRunE(cmd, args) + return wrapImportAPIErrors(err, importReq) + } + +} + func init() { listOverrides = append(listOverrides, listOverride) exportOverrides = append(exportOverrides, exportOverride) + importOverrides = append(importOverrides, importOverride) } diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 7110d5c9..6513300e 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -2,6 +2,7 @@ package internal import ( "context" + "encoding/base64" "errors" "io" "net/http" @@ -14,6 +15,7 @@ import ( "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -95,6 +97,12 @@ func assertFilerFileContents(t *testing.T, ctx context.Context, f filer.Filer, p assert.Contains(t, string(b), content) } +func assertWorkspaceFileType(t *testing.T, ctx context.Context, f filer.Filer, path string, fileType workspace.ObjectType) { + info, err := f.Stat(ctx, path) + require.NoError(t, err) + assert.Equal(t, fileType, info.Sys().(workspace.ObjectInfo).ObjectType) +} + func TestAccExportDir(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -239,3 +247,75 @@ func TestAccImportDirWithOverwriteFlag(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "hello, world") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") } + +func TestAccImportFileUsingContentFormatSource(t *testing.T) { + ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) + + // Content = `print(1)`. Uploaded as a notebook by default + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "pyScript"), + "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--language=PYTHON") + assertFilerFileContents(t, ctx, workspaceFiler, "pyScript", "print(1)") + assertWorkspaceFileType(t, ctx, workspaceFiler, "pyScript", workspace.ObjectTypeNotebook) + + // Import with content = `# Databricks notebook source\nprint(1)`. Uploaded as a notebook with the content just being print(1) + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "pyNb"), + "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), + "--language=PYTHON") + assertFilerFileContents(t, ctx, workspaceFiler, "pyNb", "print(1)") + assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNb", workspace.ObjectTypeNotebook) +} + +func TestAccImportFileUsingContentFormatAuto(t *testing.T) { + ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) + + // Content = `# Databricks notebook source\nprint(1)`. Upload as file if path has no extension. + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "py-nb-as-file"), + "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") + assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source\nprint(1)") + assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) + + // Content = `# Databricks notebook source\nprint(1)`. Upload as notebook if path has py extension + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "py-nb-as-notebook.py"), + "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") + assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(1)") + assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) + + // Content = `print(1)`. Upload as file if content is not notebook (even if path has .py extension) + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "not-a-notebook.py"), "--content", + base64.StdEncoding.EncodeToString([]byte("print(1)")), "--format=AUTO") + assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "print(1)") + assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) +} + +func TestAccImportFileFormatSource(t *testing.T) { + ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") + assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") + assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNotebook", workspace.ObjectTypeNotebook) + + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") + assertFilerFileContents(t, ctx, workspaceFiler, "scalaNotebook", "// Databricks notebook source\nprintln(\"scala\")") + assertWorkspaceFileType(t, ctx, workspaceFiler, "scalaNotebook", workspace.ObjectTypeNotebook) + + _, _, err := RequireErrorRun(t, "workspace", "import", filepath.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") + assert.ErrorContains(t, err, "The zip file may not be valid or may be an unsupported version. Hint: Objects imported using format=SOURCE are expected to be zip encoded databricks source notebook(s) by default. Please specify a language using the --language flag if you are trying to import a single uncompressed notebook") +} + +func TestAccImportFileFormatAuto(t *testing.T) { + ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) + + // Upload as file if path has no extension + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source\nprint(\"python\")") + assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) + + // Upload as notebook if path has extension + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(\"python\")") + assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) + + // Upload as file if content is not notebook (even if path has .py extension) + RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") + assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "hello, world\n") + assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) +} From 1e9dbcfa2ae069ac5e84bf5ce84d05a666902381 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 5 Oct 2023 15:20:33 +0200 Subject: [PATCH 135/310] Add `--file` flag to workspace export command (#794) This PR: 1. Adds the `--file` flag to the workspace export command. This allows you to specify a output file to write to. 2. Adds e2e integration tests for the workspace export command --- cmd/workspace/workspace/overrides.go | 28 +++++++++++++ internal/workspace_test.go | 63 ++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/cmd/workspace/workspace/overrides.go b/cmd/workspace/workspace/overrides.go index 5c0692d5..1cac6741 100644 --- a/cmd/workspace/workspace/overrides.go +++ b/cmd/workspace/workspace/overrides.go @@ -8,6 +8,7 @@ import ( "os" "strings" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/workspace" @@ -25,6 +26,33 @@ func listOverride(listCmd *cobra.Command, listReq *workspace.ListWorkspaceReques func exportOverride(exportCmd *cobra.Command, exportReq *workspace.ExportRequest) { // The export command prints the contents of the file to stdout by default. exportCmd.Annotations["template"] = `{{.Content | b64_decode}}` + exportCmd.Use = "export SOURCE_PATH" + + var filePath string + exportCmd.Flags().StringVar(&filePath, "file", "", `Path on the local file system to save exported file at.`) + + exportCmd.RunE = func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + if len(args) != 1 { + return fmt.Errorf("expected to have the absolute path of the object or directory") + } + exportReq.Path = args[0] + + response, err := w.Workspace.Export(ctx, *exportReq) + if err != nil { + return err + } + // Render file content to stdout if no file path is specified. + if filePath == "" { + return cmdio.Render(ctx, response) + } + b, err := base64.StdEncoding.DecodeString(response.Content) + if err != nil { + return err + } + return os.WriteFile(filePath, b, 0755) + } } // Give better errors / hints for common API errors. diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 6513300e..39760ec5 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -248,6 +248,69 @@ func TestAccImportDirWithOverwriteFlag(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") } +func TestAccExport(t *testing.T) { + ctx, f, sourceDir := setupWorkspaceImportExportTest(t) + + var err error + + // Export vanilla file + err = f.Write(ctx, "file-a", strings.NewReader("abc")) + require.NoError(t, err) + stdout, _ := RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "file-a")) + b, err := io.ReadAll(&stdout) + require.NoError(t, err) + assert.Equal(t, "abc", string(b)) + + // Export python notebook + err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) + require.NoError(t, err) + stdout, _ = RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "pyNotebook")) + b, err = io.ReadAll(&stdout) + require.NoError(t, err) + assert.Equal(t, "# Databricks notebook source\n", string(b)) + + // Export python notebook as jupyter + stdout, _ = RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") + b, err = io.ReadAll(&stdout) + require.NoError(t, err) + assert.Contains(t, string(b), `"cells":`, "jupyter notebooks contain the cells field") + assert.Contains(t, string(b), `"metadata":`, "jupyter notebooks contain the metadata field") +} + +func TestAccExportWithFileFlag(t *testing.T) { + ctx, f, sourceDir := setupWorkspaceImportExportTest(t) + localTmpDir := t.TempDir() + + var err error + + // Export vanilla file + err = f.Write(ctx, "file-a", strings.NewReader("abc")) + require.NoError(t, err) + stdout, _ := RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) + b, err := io.ReadAll(&stdout) + require.NoError(t, err) + // Expect nothing to be printed to stdout + assert.Equal(t, "", string(b)) + assertLocalFileContents(t, filepath.Join(localTmpDir, "file.txt"), "abc") + + // Export python notebook + err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) + require.NoError(t, err) + stdout, _ = RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) + b, err = io.ReadAll(&stdout) + require.NoError(t, err) + assert.Equal(t, "", string(b)) + assertLocalFileContents(t, filepath.Join(localTmpDir, "pyNb.py"), "# Databricks notebook source\n") + + // Export python notebook as jupyter + stdout, _ = RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) + b, err = io.ReadAll(&stdout) + require.NoError(t, err) + assert.Equal(t, "", string(b)) + assertLocalFileContents(t, filepath.Join(localTmpDir, "jupyterNb.ipynb"), `"cells":`) + assertLocalFileContents(t, filepath.Join(localTmpDir, "jupyterNb.ipynb"), `"metadata":`) +} + func TestAccImportFileUsingContentFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) From 847b6f4bc342529be5c9d088ae891d1bb7c82c33 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 6 Oct 2023 12:28:18 +0200 Subject: [PATCH 136/310] Fix import export integration tests on windows (#842) We should be using the path package here because they are paths in WSFS --- internal/workspace_test.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 39760ec5..21ef8231 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -256,7 +256,7 @@ func TestAccExport(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "file-a")) + stdout, _ := RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a")) b, err := io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "abc", string(b)) @@ -264,13 +264,13 @@ func TestAccExport(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "pyNotebook")) + stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "# Databricks notebook source\n", string(b)) // Export python notebook as jupyter - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") + stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Contains(t, string(b), `"cells":`, "jupyter notebooks contain the cells field") @@ -286,7 +286,7 @@ func TestAccExportWithFileFlag(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) + stdout, _ := RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) b, err := io.ReadAll(&stdout) require.NoError(t, err) // Expect nothing to be printed to stdout @@ -296,14 +296,14 @@ func TestAccExportWithFileFlag(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) + stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) assertLocalFileContents(t, filepath.Join(localTmpDir, "pyNb.py"), "# Databricks notebook source\n") // Export python notebook as jupyter - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", filepath.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) + stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) @@ -315,13 +315,13 @@ func TestAccImportFileUsingContentFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `print(1)`. Uploaded as a notebook by default - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "pyScript"), + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyScript"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyScript", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyScript", workspace.ObjectTypeNotebook) // Import with content = `# Databricks notebook source\nprint(1)`. Uploaded as a notebook with the content just being print(1) - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "pyNb"), + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNb"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNb", "print(1)") @@ -332,19 +332,19 @@ func TestAccImportFileUsingContentFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `# Databricks notebook source\nprint(1)`. Upload as file if path has no extension. - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "py-nb-as-file"), + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Content = `# Databricks notebook source\nprint(1)`. Upload as notebook if path has py extension - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "py-nb-as-notebook.py"), + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Content = `print(1)`. Upload as file if content is not notebook (even if path has .py extension) - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "not-a-notebook.py"), "--content", + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) @@ -352,15 +352,15 @@ func TestAccImportFileUsingContentFormatAuto(t *testing.T) { func TestAccImportFileFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNotebook", workspace.ObjectTypeNotebook) - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") assertFilerFileContents(t, ctx, workspaceFiler, "scalaNotebook", "// Databricks notebook source\nprintln(\"scala\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "scalaNotebook", workspace.ObjectTypeNotebook) - _, _, err := RequireErrorRun(t, "workspace", "import", filepath.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") + _, _, err := RequireErrorRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") assert.ErrorContains(t, err, "The zip file may not be valid or may be an unsupported version. Hint: Objects imported using format=SOURCE are expected to be zip encoded databricks source notebook(s) by default. Please specify a language using the --language flag if you are trying to import a single uncompressed notebook") } @@ -368,17 +368,17 @@ func TestAccImportFileFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Upload as file if path has no extension - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Upload as notebook if path has extension - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Upload as file if content is not notebook (even if path has .py extension) - RequireSuccessfulRun(t, "workspace", "import", filepath.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") + RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "hello, world\n") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) } From 054df2b58b5d68ffb7aa3bdd2db2a50343e73bf6 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 6 Oct 2023 20:09:56 +0200 Subject: [PATCH 137/310] Fix workspace import test (#844) Windows and unix have different new line characters. Separating the string assertions here to make the test pass. --- internal/workspace_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 21ef8231..a6e641b6 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -369,7 +369,8 @@ func TestAccImportFileFormatAuto(t *testing.T) { // Upload as file if path has no extension RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") - assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source\nprint(\"python\")") + assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source") + assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "print(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Upload as notebook if path has extension @@ -379,6 +380,6 @@ func TestAccImportFileFormatAuto(t *testing.T) { // Upload as file if content is not notebook (even if path has .py extension) RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") - assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "hello, world\n") + assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "hello, world") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) } From 8d8de3f5095e01efea55e9310a5e0789da88e095 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 9 Oct 2023 12:10:28 +0200 Subject: [PATCH 138/310] Fixed using repo files as pipeline libraries (#847) ## Changes Fixed using repo files as pipeline libraries ## Tests Added regression test --- bundle/config/mutator/expand_pipeline_glob_paths_test.go | 8 +++++++- bundle/libraries/libraries.go | 8 +++++++- bundle/libraries/libraries_test.go | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index ef99e716..48cd52a0 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -80,6 +80,11 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { Path: "dbfs:/me@company.com/test.ipynb", }, }, + { + Notebook: &pipelines.NotebookLibrary{ + Path: "/Repos/somerepo/test.ipynb", + }, + }, }, }, }, @@ -93,7 +98,7 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { require.NoError(t, err) libraries := b.Config.Resources.Pipelines["pipeline"].Libraries - require.Len(t, libraries, 9) + require.Len(t, libraries, 10) // Making sure glob patterns are expanded correctly require.True(t, containsNotebook(libraries, filepath.Join("test", "test2.ipynb"))) @@ -107,6 +112,7 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { // Making sure absolute pass to remote FS file references work as well require.True(t, containsNotebook(libraries, "/Workspace/Users/me@company.com/test.ipynb")) require.True(t, containsNotebook(libraries, "dbfs:/me@company.com/test.ipynb")) + require.True(t, containsNotebook(libraries, "/Repos/somerepo/test.ipynb")) // Making sure other libraries are not replaced require.True(t, containsJar(libraries, "./*.jar")) diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index f973642f..548d5ef1 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/url" + "path" "path/filepath" "strings" @@ -173,7 +174,7 @@ func IsLocalPath(path string) bool { return false } - return !isWorkspacePath(path) + return !isAbsoluteRemotePath(path) } func isExplicitFileScheme(path string) bool { @@ -200,3 +201,8 @@ func isWorkspacePath(path string) bool { strings.HasPrefix(path, "/Users/") || strings.HasPrefix(path, "/Shared/") } + +func isAbsoluteRemotePath(p string) bool { + // If path for library starts with /, it's a remote absolute path + return path.IsAbs(p) +} diff --git a/bundle/libraries/libraries_test.go b/bundle/libraries/libraries_test.go index 7ff1609a..41609bd4 100644 --- a/bundle/libraries/libraries_test.go +++ b/bundle/libraries/libraries_test.go @@ -10,7 +10,7 @@ import ( var testCases map[string]bool = map[string]bool{ "./some/local/path": true, - "/some/full/path": true, + "/some/full/path": false, "/Workspace/path/to/package": false, "/Users/path/to/package": false, "file://path/to/package": true, From ad4b476270a779511e997e9b67ba30fbcfda2837 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 9 Oct 2023 12:37:18 +0200 Subject: [PATCH 139/310] Ensure profile flag is respected for sync command (#837) ## Changes Fixes #836 ## Tests Manually running `sync` command with and without the flag Integration tests pass as well ``` --- PASS: TestAccSyncFullFileSync (13.38s) PASS coverage: 39.1% of statements in ./... ok github.com/databricks/cli/internal 14.148s coverage: 39.1% of statements in ./... --- PASS: TestAccSyncIncrementalFileSync (11.38s) PASS coverage: 39.1% of statements in ./... ok github.com/databricks/cli/internal 11.674s coverage: 39.1% of statements in ./... ``` --- cmd/root/auth.go | 18 +++++++++++++++++- cmd/sync/sync.go | 11 ++++++----- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/cmd/root/auth.go b/cmd/root/auth.go index de5648c6..ed91090e 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -92,7 +92,8 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { } } - allowPrompt := !hasProfileFlag + noPrompt, ok := cmd.Context().Value(noPromptKey).(bool) + allowPrompt := !hasProfileFlag && (!ok || !noPrompt) a, err := accountClientOrPrompt(cmd.Context(), cfg, allowPrompt) if err != nil { return err @@ -102,6 +103,21 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { return nil } +type noPrompt int + +var noPromptKey noPrompt + +// NoPrompt allows to skip prompt for profile configuration in MustWorkspaceClient. +// +// When calling MustWorkspaceClient we want to be able to customise if to show prompt or not. +// Since we can't change function interface, in the code we only have an access to `cmd“ object. +// Command struct does not have any state flag which indicates that it's being called in completion mode and +// thus the Context object seems to be the only viable option for us to configure prompt behaviour based on +// the context it's executed from. +func NoPrompt(ctx context.Context) context.Context { + return context.WithValue(ctx, noPromptKey, true) +} + // Helper function to create a workspace client or prompt once if the given configuration is not valid. func workspaceClientOrPrompt(ctx context.Context, cfg *config.Config, allowPrompt bool) (*databricks.WorkspaceClient, error) { w, err := databricks.NewWorkspaceClient((*databricks.Config)(cfg)) diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 5fdfb169..7cfc1f29 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -90,6 +90,7 @@ func New() *cobra.Command { cmd.Flags().BoolVar(&f.watch, "watch", false, "watch local file system for changes") cmd.Flags().Var(&f.output, "output", "type of output format") + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) error { var opts *sync.SyncOptions var err error @@ -149,7 +150,10 @@ func New() *cobra.Command { } cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - err := root.TryConfigureBundle(cmd, args) + ctx := cmd.Context() + cmd.SetContext(root.NoPrompt(ctx)) + + err := root.MustWorkspaceClient(cmd, args) if err != nil { return nil, cobra.ShellCompDirectiveError } @@ -165,10 +169,7 @@ func New() *cobra.Command { case 0: return nil, cobra.ShellCompDirectiveFilterDirs case 1: - wsc, err := databricks.NewWorkspaceClient() - if err != nil { - return nil, cobra.ShellCompDirectiveError - } + wsc := root.WorkspaceClient(cmd.Context()) return completeRemotePath(cmd.Context(), wsc, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp From fa8ce65d291c3b0b79ec8e642363cff1b7b446ac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 14:43:45 +0200 Subject: [PATCH 140/310] Bump golang.org/x/term from 0.12.0 to 0.13.0 (#852) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.12.0 to 0.13.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.12.0&new-version=0.13.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 918d3ce2..362bb009 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( golang.org/x/mod v0.12.0 golang.org/x/oauth2 v0.12.0 golang.org/x/sync v0.3.0 - golang.org/x/term v0.12.0 + golang.org/x/term v0.13.0 golang.org/x/text v0.13.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) @@ -52,7 +52,7 @@ require ( go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.13.0 // indirect golang.org/x/net v0.15.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/api v0.143.0 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index 13eed5a3..bfe4089d 100644 --- a/go.sum +++ b/go.sum @@ -214,15 +214,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From 8dcba1810ab0c9f9eea249e4b8681d00ce762069 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 14:44:05 +0200 Subject: [PATCH 141/310] Bump golang.org/x/mod from 0.12.0 to 0.13.0 (#851) Bumps [golang.org/x/mod](https://github.com/golang/mod) from 0.12.0 to 0.13.0.
Commits
  • 5b69280 modfile: use new go version string format in error message
  • 273ef6c go.mod: update to go 1.18 and x/tools v0.13.0
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/mod&package-manager=go_modules&previous-version=0.12.0&new-version=0.13.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 362bb009..78b2c784 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/stretchr/testify v1.8.4 // MIT github.com/whilp/git-urls v1.0.0 // MIT golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 - golang.org/x/mod v0.12.0 + golang.org/x/mod v0.13.0 golang.org/x/oauth2 v0.12.0 golang.org/x/sync v0.3.0 golang.org/x/term v0.13.0 diff --git a/go.sum b/go.sum index bfe4089d..9543eb7e 100644 --- a/go.sum +++ b/go.sum @@ -171,8 +171,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -241,8 +241,9 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.143.0 h1:o8cekTkqhywkbZT6p1UHJPZ9+9uuCAJs/KYomxZB8fA= From b4086e4f1dd565fb8e5b4f4d75bdd377c7c437a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 12:58:22 +0000 Subject: [PATCH 142/310] Bump golang.org/x/sync from 0.3.0 to 0.4.0 (#849) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.3.0 to 0.4.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/sync&package-manager=go_modules&previous-version=0.3.0&new-version=0.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 78b2c784..31293ea3 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 golang.org/x/mod v0.13.0 golang.org/x/oauth2 v0.12.0 - golang.org/x/sync v0.3.0 + golang.org/x/sync v0.4.0 golang.org/x/term v0.13.0 golang.org/x/text v0.13.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 diff --git a/go.sum b/go.sum index 9543eb7e..2e05563a 100644 --- a/go.sum +++ b/go.sum @@ -195,8 +195,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 043e54950d542ec738bb3e7787862b23796174d8 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 9 Oct 2023 17:26:46 +0200 Subject: [PATCH 143/310] Add hint to delete sync snapshot if parsing fails (#853) ## Changes Example error: ``` Error: error parsing existing sync state. Please delete your existing sync snapshot file (.databricks/sync-snapshots/f3c00bc127903f9b.json) and retry: invalid sync state representation. Remote file footxt is missing the corresponding local file ``` ## Tests Manually --- libs/sync/snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index 7e2130e9..f9956962 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -168,7 +168,7 @@ func (s *Snapshot) diff(ctx context.Context, all []fileset.File) (diff, error) { currentState := s.SnapshotState if err := currentState.validate(); err != nil { - return diff{}, fmt.Errorf("error parsing existing sync state: %w", err) + return diff{}, fmt.Errorf("error parsing existing sync state. Please delete your existing sync snapshot file (%s) and retry: %w", s.SnapshotPath, err) } // Compute diff to apply to get from current state to new target state. From 8131a8b5faeac2512701f8735147a24ef781e7dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 17:27:15 +0200 Subject: [PATCH 144/310] Bump golang.org/x/oauth2 from 0.12.0 to 0.13.0 (#850) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.12.0 to 0.13.0.
Commits
  • 3c5dbf0 go.mod: update golang.org/x dependencies
  • 11625cc google: add authorized_user conditional to Credentials.UniverseDomain
  • 8d6d45b google: add Credentials.UniverseDomain to support TPC
  • 43b6a7b google: adding support for external account authorized user
  • 14b275c oauth2: workaround misspelling of verification_uri
  • 18352fc google/internal/externalaccount: adding BYOID Metrics
  • 9095a51 oauth2: clarify error if endpoint missing DeviceAuthURL
  • 2d9e4a2 oauth2/google: remove meta validations for aws external credentials
  • 55cd552 oauth2: support PKCE
  • e3fb0fb oauth2: support device flow
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.12.0&new-version=0.13.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 31293ea3..d6e72e47 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/whilp/git-urls v1.0.0 // MIT golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 golang.org/x/mod v0.13.0 - golang.org/x/oauth2 v0.12.0 + golang.org/x/oauth2 v0.13.0 golang.org/x/sync v0.4.0 golang.org/x/term v0.13.0 golang.org/x/text v0.13.0 @@ -50,8 +50,8 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/zclconf/go-cty v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.13.0 // indirect - golang.org/x/net v0.15.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.16.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/api v0.143.0 // indirect diff --git a/go.sum b/go.sum index 2e05563a..96eef68c 100644 --- a/go.sum +++ b/go.sum @@ -161,8 +161,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw= golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= @@ -185,11 +185,11 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 8c1441ff71e06ea49de38cc401e413f325be051c Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Oct 2023 10:45:15 +0200 Subject: [PATCH 145/310] Support .gitignore syntax in sync section and make sure it works recursively (#854) Fixes #815 --- libs/fileset/glob.go | 43 ++++-------------- libs/fileset/glob_test.go | 94 +++++++++++++++++++++++++++++++++++++++ libs/fileset/ignorer.go | 38 ++++++++++++++++ libs/sync/sync.go | 4 +- libs/sync/sync_test.go | 55 ++++++++++++++++++++--- 5 files changed, 192 insertions(+), 42 deletions(-) diff --git a/libs/fileset/glob.go b/libs/fileset/glob.go index 7a9f130b..9d8626e5 100644 --- a/libs/fileset/glob.go +++ b/libs/fileset/glob.go @@ -1,49 +1,22 @@ package fileset import ( - "io/fs" - "os" "path/filepath" ) -type GlobSet struct { - root string - patterns []string -} - -func NewGlobSet(root string, includes []string) (*GlobSet, error) { +func NewGlobSet(root string, includes []string) (*FileSet, error) { absRoot, err := filepath.Abs(root) if err != nil { return nil, err } + for k := range includes { - includes[k] = filepath.Join(absRoot, filepath.FromSlash(includes[k])) - } - return &GlobSet{absRoot, includes}, nil -} - -// Return all files which matches defined glob patterns -func (s *GlobSet) All() ([]File, error) { - files := make([]File, 0) - for _, pattern := range s.patterns { - matches, err := filepath.Glob(pattern) - if err != nil { - return files, err - } - - for _, match := range matches { - matchRel, err := filepath.Rel(s.root, match) - if err != nil { - return files, err - } - - stat, err := os.Stat(match) - if err != nil { - return files, err - } - files = append(files, File{fs.FileInfoToDirEntry(stat), match, matchRel}) - } + includes[k] = filepath.ToSlash(filepath.Clean(includes[k])) } - return files, nil + fs := &FileSet{ + absRoot, + newIncluder(includes), + } + return fs, nil } diff --git a/libs/fileset/glob_test.go b/libs/fileset/glob_test.go index f6ac7e19..e8d3696c 100644 --- a/libs/fileset/glob_test.go +++ b/libs/fileset/glob_test.go @@ -5,6 +5,7 @@ import ( "os" "path/filepath" "slices" + "strings" "testing" "github.com/stretchr/testify/require" @@ -63,3 +64,96 @@ func TestGlobFilesetWithRelativeRoot(t *testing.T) { require.True(t, filepath.IsAbs(f.Absolute)) } } + +func TestGlobFilesetRecursively(t *testing.T) { + cwd, err := os.Getwd() + require.NoError(t, err) + root := filepath.Join(cwd, "..", "git") + + entries := make([]string, 0) + err = filepath.Walk(filepath.Join(root, "testdata"), func(path string, info fs.FileInfo, err error) error { + if !info.IsDir() { + entries = append(entries, path) + } + return nil + }) + require.NoError(t, err) + + g, err := NewGlobSet(root, []string{ + "testdata/*", + }) + require.NoError(t, err) + + files, err := g.All() + require.NoError(t, err) + + require.Equal(t, len(files), len(entries)) + for _, f := range files { + exists := slices.ContainsFunc(entries, func(path string) bool { + return path == f.Absolute + }) + require.True(t, exists) + } +} + +func TestGlobFilesetDir(t *testing.T) { + cwd, err := os.Getwd() + require.NoError(t, err) + root := filepath.Join(cwd, "..", "git") + + entries := make([]string, 0) + err = filepath.Walk(filepath.Join(root, "testdata", "a"), func(path string, info fs.FileInfo, err error) error { + if !info.IsDir() { + entries = append(entries, path) + } + return nil + }) + require.NoError(t, err) + + g, err := NewGlobSet(root, []string{ + "testdata/a", + }) + require.NoError(t, err) + + files, err := g.All() + require.NoError(t, err) + + require.Equal(t, len(files), len(entries)) + for _, f := range files { + exists := slices.ContainsFunc(entries, func(path string) bool { + return path == f.Absolute + }) + require.True(t, exists) + } +} + +func TestGlobFilesetDoubleQuotesWithFilePatterns(t *testing.T) { + cwd, err := os.Getwd() + require.NoError(t, err) + root := filepath.Join(cwd, "..", "git") + + entries := make([]string, 0) + err = filepath.Walk(filepath.Join(root, "testdata"), func(path string, info fs.FileInfo, err error) error { + if strings.HasSuffix(path, ".txt") { + entries = append(entries, path) + } + return nil + }) + require.NoError(t, err) + + g, err := NewGlobSet(root, []string{ + "testdata/**/*.txt", + }) + require.NoError(t, err) + + files, err := g.All() + require.NoError(t, err) + + require.Equal(t, len(files), len(entries)) + for _, f := range files { + exists := slices.ContainsFunc(entries, func(path string) bool { + return path == f.Absolute + }) + require.True(t, exists) + } +} diff --git a/libs/fileset/ignorer.go b/libs/fileset/ignorer.go index ba066f41..eb87682f 100644 --- a/libs/fileset/ignorer.go +++ b/libs/fileset/ignorer.go @@ -1,5 +1,9 @@ package fileset +import ( + ignore "github.com/sabhiram/go-gitignore" +) + // Ignorer is the interface for what determines if a path // in the [FileSet] must be ignored or not. type Ignorer interface { @@ -17,3 +21,37 @@ func (nopIgnorer) IgnoreFile(path string) (bool, error) { func (nopIgnorer) IgnoreDirectory(path string) (bool, error) { return false, nil } + +type includer struct { + matcher *ignore.GitIgnore +} + +func newIncluder(includes []string) *includer { + matcher := ignore.CompileIgnoreLines(includes...) + return &includer{ + matcher, + } +} + +func (i *includer) IgnoreFile(path string) (bool, error) { + return i.ignore(path), nil +} + +// In the context of 'include' functionality, the Ignorer logic appears to be reversed: +// For patterns like 'foo/bar/' which intends to match directories only, we still need to traverse into the directory for potential file matches. +// Ignoring the directory entirely isn't an option, especially when dealing with patterns like 'foo/bar/*.go'. +// While this pattern doesn't target directories, it does match all Go files within them and ignoring directories not matching the pattern +// Will result in missing file matches. +// During the tree traversal process, we call 'IgnoreDirectory' on ".", "./foo", and "./foo/bar", +// all while applying the 'foo/bar/*.go' pattern. To handle this situation effectively, it requires to make the code more complex. +// This could mean generating various prefix patterns to facilitate the exclusion of directories from traversal. +// It's worth noting that, in this particular case, opting for a simpler logic results in a performance trade-off. +func (i *includer) IgnoreDirectory(path string) (bool, error) { + return false, nil +} + +func (i *includer) ignore(path string) bool { + matched := i.matcher.MatchesPath(path) + // If matched, do not ignore the file because we want to include it + return !matched +} diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 8be478fc..beb3f6a3 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -37,8 +37,8 @@ type Sync struct { *SyncOptions fileSet *git.FileSet - includeFileSet *fileset.GlobSet - excludeFileSet *fileset.GlobSet + includeFileSet *fileset.FileSet + excludeFileSet *fileset.FileSet snapshot *Snapshot filer filer.Filer diff --git a/libs/sync/sync_test.go b/libs/sync/sync_test.go index 99c7e04b..0f1ad61b 100644 --- a/libs/sync/sync_test.go +++ b/libs/sync/sync_test.go @@ -48,8 +48,25 @@ func setupFiles(t *testing.T) string { err = createFile(dbDir, "e.go") require.NoError(t, err) - return dir + testDir := filepath.Join(dir, "test") + err = os.Mkdir(testDir, 0755) + require.NoError(t, err) + sub1 := filepath.Join(testDir, "sub1") + err = os.Mkdir(sub1, 0755) + require.NoError(t, err) + + err = createFile(sub1, "f.go") + require.NoError(t, err) + + sub2 := filepath.Join(sub1, "sub2") + err = os.Mkdir(sub2, 0755) + require.NoError(t, err) + + err = createFile(sub2, "g.go") + require.NoError(t, err) + + return dir } func TestGetFileSet(t *testing.T) { @@ -78,7 +95,7 @@ func TestGetFileSet(t *testing.T) { fileList, err := getFileList(ctx, s) require.NoError(t, err) - require.Equal(t, len(fileList), 7) + require.Equal(t, len(fileList), 9) inc, err = fileset.NewGlobSet(dir, []string{}) require.NoError(t, err) @@ -98,7 +115,7 @@ func TestGetFileSet(t *testing.T) { require.NoError(t, err) require.Equal(t, len(fileList), 1) - inc, err = fileset.NewGlobSet(dir, []string{".databricks/*.*"}) + inc, err = fileset.NewGlobSet(dir, []string{".databricks/*"}) require.NoError(t, err) excl, err = fileset.NewGlobSet(dir, []string{}) @@ -114,6 +131,34 @@ func TestGetFileSet(t *testing.T) { fileList, err = getFileList(ctx, s) require.NoError(t, err) - require.Equal(t, len(fileList), 8) - + require.Equal(t, len(fileList), 10) +} + +func TestRecursiveExclude(t *testing.T) { + ctx := context.Background() + + dir := setupFiles(t) + fileSet, err := git.NewFileSet(dir) + require.NoError(t, err) + + err = fileSet.EnsureValidGitIgnoreExists() + require.NoError(t, err) + + inc, err := fileset.NewGlobSet(dir, []string{}) + require.NoError(t, err) + + excl, err := fileset.NewGlobSet(dir, []string{"test/**"}) + require.NoError(t, err) + + s := &Sync{ + SyncOptions: &SyncOptions{}, + + fileSet: fileSet, + includeFileSet: inc, + excludeFileSet: excl, + } + + fileList, err := getFileList(ctx, s) + require.NoError(t, err) + require.Equal(t, len(fileList), 7) } From 803ecb5efd1c6553492cba104e804cb7632a1faa Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Oct 2023 12:58:02 +0200 Subject: [PATCH 146/310] Automatically create a release PR in homebrew-tap repo (#841) ## Changes Automatically create a release PR in homebrew-tap repo ## Tests Ran ` act -j create-homebrew-tap-release-pr -s DECO_GITHUB_TOKEN="$(gh auth token)"` Result: https://github.com/databricks/homebrew-tap/pull/29 --- .github/workflows/release.yml | 39 ++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8d09c963..9184893d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -30,7 +30,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - create-release-prs: + create-setup-cli-release-pr: needs: goreleaser runs-on: ubuntu-latest steps: @@ -53,3 +53,40 @@ jobs: version: "${{ env.VERSION }}", } }); + + create-homebrew-tap-release-pr: + needs: goreleaser + runs-on: ubuntu-latest + steps: + - name: Set VERSION variable from tag + run: | + VERSION=${{ github.ref_name }} + echo "VERSION=${VERSION:1}" >> $GITHUB_ENV + + - name: Update homebrew-tap + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.DECO_GITHUB_TOKEN }} + script: | + let artifacts = JSON.parse('${{ needs.goreleaser.outputs.artifacts }}') + artifacts = artifacts.filter(a => a.type == "Archive") + artifacts = new Map( + artifacts.map(a => [ + a.goos + "_" + a.goarch, + a.extra.Checksum.replace("sha256:", "") + ]) + ) + + await github.rest.actions.createWorkflowDispatch({ + owner: 'databricks', + repo: 'homebrew-tap', + workflow_id: 'release-pr.yml', + ref: 'main', + inputs: { + version: "${{ env.VERSION }}", + darwin_amd64_sha: artifacts.get('darwin_amd64'), + darwin_arm64_sha: artifacts.get('darwin_arm64'), + linux_amd64_sha: artifacts.get('linux_amd64'), + linux_arm64_sha: artifacts.get('linux_arm64') + } + }); From 943ea89728e18c068584aabb0abbcf351e4eba65 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Oct 2023 17:18:18 +0200 Subject: [PATCH 147/310] Allow target overrides for sync section (#856) ## Changes Allow target overrides for sync section ## Tests Added tests --- bundle/config/root.go | 7 +++ bundle/config/target.go | 2 + bundle/tests/override_sync/databricks.yml | 26 +++++++++++ .../override_sync_no_root/databricks.yml | 22 ++++++++++ bundle/tests/override_sync_test.go | 43 +++++++++++++++++++ 5 files changed, 100 insertions(+) create mode 100644 bundle/tests/override_sync/databricks.yml create mode 100644 bundle/tests/override_sync_no_root/databricks.yml create mode 100644 bundle/tests/override_sync_test.go diff --git a/bundle/config/root.go b/bundle/config/root.go index 3c79fb0b..bf203833 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -280,5 +280,12 @@ func (r *Root) MergeTargetOverrides(target *Target) error { git.OriginURL = target.Git.OriginURL } + if target.Sync != nil { + err = mergo.Merge(&r.Sync, target.Sync, mergo.WithAppendSlice) + if err != nil { + return err + } + } + return nil } diff --git a/bundle/config/target.go b/bundle/config/target.go index 2489efc3..fc776c7b 100644 --- a/bundle/config/target.go +++ b/bundle/config/target.go @@ -35,6 +35,8 @@ type Target struct { Git Git `json:"git,omitempty"` RunAs *jobs.JobRunAs `json:"run_as,omitempty"` + + Sync *Sync `json:"sync,omitempty"` } const ( diff --git a/bundle/tests/override_sync/databricks.yml b/bundle/tests/override_sync/databricks.yml new file mode 100644 index 00000000..1417b864 --- /dev/null +++ b/bundle/tests/override_sync/databricks.yml @@ -0,0 +1,26 @@ +bundle: + name: override_sync + +workspace: + host: https://acme.cloud.databricks.com/ + +sync: + include: + - src/* + +targets: + development: + sync: + include: + - tests/* + exclude: + - dist + + staging: + sync: + include: + - fixtures/* + + prod: + workspace: + host: https://acme-prod.cloud.databricks.com/ diff --git a/bundle/tests/override_sync_no_root/databricks.yml b/bundle/tests/override_sync_no_root/databricks.yml new file mode 100644 index 00000000..109d8da1 --- /dev/null +++ b/bundle/tests/override_sync_no_root/databricks.yml @@ -0,0 +1,22 @@ +bundle: + name: override_sync + +workspace: + host: https://acme.cloud.databricks.com/ + +targets: + development: + sync: + include: + - tests/* + exclude: + - dist + + staging: + sync: + include: + - fixtures/* + + prod: + workspace: + host: https://acme-prod.cloud.databricks.com/ diff --git a/bundle/tests/override_sync_test.go b/bundle/tests/override_sync_test.go new file mode 100644 index 00000000..a2d3a05f --- /dev/null +++ b/bundle/tests/override_sync_test.go @@ -0,0 +1,43 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOverrideSyncTarget(t *testing.T) { + b := load(t, "./override_sync") + assert.ElementsMatch(t, []string{"src/*"}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./override_sync", "development") + assert.ElementsMatch(t, []string{"src/*", "tests/*"}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{"dist"}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./override_sync", "staging") + assert.ElementsMatch(t, []string{"src/*", "fixtures/*"}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./override_sync", "prod") + assert.ElementsMatch(t, []string{"src/*"}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) +} + +func TestOverrideSyncTargetNoRootSync(t *testing.T) { + b := load(t, "./override_sync_no_root") + assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./override_sync_no_root", "development") + assert.ElementsMatch(t, []string{"tests/*"}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{"dist"}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./override_sync_no_root", "staging") + assert.ElementsMatch(t, []string{"fixtures/*"}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./override_sync_no_root", "prod") + assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) +} From 77101c9b8567b17fd543f7c44cec7cd8c167bc68 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Wed, 11 Oct 2023 13:12:18 +0200 Subject: [PATCH 148/310] Use profile information when getting a token using the CLI (#855) ## Changes Use stored profile information when the user provides the profile flag when using the `databricks auth token` command. ## Tests Run the command with and without the profile flag ``` ./cli auth token Databricks Host: https://e2-dogfood.staging.cloud.databricks.com/ { "access_token": "****", "token_type": "Bearer", "expiry": "2023-10-10T14:24:11.85617+02:00" }% ./cli auth token --profile DEFAULT { "access_token": "*****", "token_type": "Bearer", "expiry": "2023-10-10T14:24:11.85617+02:00" }% ./cli auth token https://e2-dogfood.staging.cloud.databricks.com/ { "access_token": "*****", "token_type": "Bearer", "expiry": "2023-10-11T09:24:55.046029+02:00" }% ./cli auth token --profile DEFAULT https://e2-dogfood.staging.cloud.databricks.com/ Error: providing both a profile and a host parameters is not supported ``` --- cmd/auth/login.go | 30 +++++++++++++++++++----------- cmd/auth/token.go | 17 +++++++++++++++-- 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/cmd/auth/login.go b/cmd/auth/login.go index a14c5ebe..3a3f3a6d 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -60,20 +60,10 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { profileName = profile } - // If the chosen profile has a hostname and the user hasn't specified a host, infer the host from the profile. - _, profiles, err := databrickscfg.LoadProfiles(func(p databrickscfg.Profile) bool { - return p.Name == profileName - }) + err := setHost(ctx, profileName, persistentAuth, args) if err != nil { return err } - if persistentAuth.Host == "" { - if len(profiles) > 0 && profiles[0].Host != "" { - persistentAuth.Host = profiles[0].Host - } else { - configureHost(ctx, persistentAuth, args, 0) - } - } defer persistentAuth.Close() // We need the config without the profile before it's used to initialise new workspace client below. @@ -135,3 +125,21 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { return cmd } + +func setHost(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { + // If the chosen profile has a hostname and the user hasn't specified a host, infer the host from the profile. + _, profiles, err := databrickscfg.LoadProfiles(func(p databrickscfg.Profile) bool { + return p.Name == profileName + }) + if err != nil { + return err + } + if persistentAuth.Host == "" { + if len(profiles) > 0 && profiles[0].Host != "" { + persistentAuth.Host = profiles[0].Host + } else { + configureHost(ctx, persistentAuth, args, 0) + } + } + return nil +} diff --git a/cmd/auth/token.go b/cmd/auth/token.go index 242a3dab..d763b956 100644 --- a/cmd/auth/token.go +++ b/cmd/auth/token.go @@ -3,6 +3,7 @@ package auth import ( "context" "encoding/json" + "errors" "time" "github.com/databricks/cli/libs/auth" @@ -21,8 +22,20 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - if persistentAuth.Host == "" { - configureHost(ctx, persistentAuth, args, 0) + + var profileName string + profileFlag := cmd.Flag("profile") + if profileFlag != nil { + profileName = profileFlag.Value.String() + // If a profile is provided we read the host from the .databrickscfg file + if profileName != "" && len(args) > 0 { + return errors.New("providing both a profile and a host parameters is not supported") + } + } + + err := setHost(ctx, profileName, persistentAuth, args) + if err != nil { + return err } defer persistentAuth.Close() From 420a01e67fcbb78a326b108d549f0f8e270c00bd Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 11 Oct 2023 14:29:41 +0200 Subject: [PATCH 149/310] Release v0.207.1 (#860) CLI: * Improve `workspace import` command by allowing references to local files for content ([#793](https://github.com/databricks/cli/pull/793)). * Add `--file` flag to workspace export command ([#794](https://github.com/databricks/cli/pull/794)). * Ensure profile flag is respected for sync command ([#837](https://github.com/databricks/cli/pull/837)). * Add hint to delete sync snapshot if parsing fails ([#853](https://github.com/databricks/cli/pull/853)). * Use profile information when getting a token using the CLI ([#855](https://github.com/databricks/cli/pull/855)). Bundles: * Minor template tweaks ([#832](https://github.com/databricks/cli/pull/832)). * Fixed using repo files as pipeline libraries ([#847](https://github.com/databricks/cli/pull/847)). * Support .gitignore syntax in sync section and make sure it works recursively ([#854](https://github.com/databricks/cli/pull/854)). * Allow target overrides for sync section ([#856](https://github.com/databricks/cli/pull/856)). Internal: * Fix import export integration tests on windows ([#842](https://github.com/databricks/cli/pull/842)). * Fix workspace import test ([#844](https://github.com/databricks/cli/pull/844)). * Automatically create a release PR in homebrew-tap repo ([#841](https://github.com/databricks/cli/pull/841)). Dependency updates: * Bump golang.org/x/term from 0.12.0 to 0.13.0 ([#852](https://github.com/databricks/cli/pull/852)). * Bump golang.org/x/mod from 0.12.0 to 0.13.0 ([#851](https://github.com/databricks/cli/pull/851)). * Bump golang.org/x/sync from 0.3.0 to 0.4.0 ([#849](https://github.com/databricks/cli/pull/849)). * Bump golang.org/x/oauth2 from 0.12.0 to 0.13.0 ([#850](https://github.com/databricks/cli/pull/850)). --- CHANGELOG.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b4740c5..5f1227f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Version changelog +## 0.207.1 + +CLI: + * Improve `workspace import` command by allowing references to local files for content ([#793](https://github.com/databricks/cli/pull/793)). + * Add `--file` flag to workspace export command ([#794](https://github.com/databricks/cli/pull/794)). + * Ensure profile flag is respected for sync command ([#837](https://github.com/databricks/cli/pull/837)). + * Add hint to delete sync snapshot if parsing fails ([#853](https://github.com/databricks/cli/pull/853)). + * Use profile information when getting a token using the CLI ([#855](https://github.com/databricks/cli/pull/855)). + +Bundles: + * Minor template tweaks ([#832](https://github.com/databricks/cli/pull/832)). + * Fixed using repo files as pipeline libraries ([#847](https://github.com/databricks/cli/pull/847)). + * Support .gitignore syntax in sync section and make sure it works recursively ([#854](https://github.com/databricks/cli/pull/854)). + * Allow target overrides for sync section ([#856](https://github.com/databricks/cli/pull/856)). + +Internal: + * Fix import export integration tests on windows ([#842](https://github.com/databricks/cli/pull/842)). + * Fix workspace import test ([#844](https://github.com/databricks/cli/pull/844)). + * Automatically create a release PR in homebrew-tap repo ([#841](https://github.com/databricks/cli/pull/841)). + + +Dependency updates: + * Bump golang.org/x/term from 0.12.0 to 0.13.0 ([#852](https://github.com/databricks/cli/pull/852)). + * Bump golang.org/x/mod from 0.12.0 to 0.13.0 ([#851](https://github.com/databricks/cli/pull/851)). + * Bump golang.org/x/sync from 0.3.0 to 0.4.0 ([#849](https://github.com/databricks/cli/pull/849)). + * Bump golang.org/x/oauth2 from 0.12.0 to 0.13.0 ([#850](https://github.com/databricks/cli/pull/850)). + ## 0.207.0 CLI: From bb92e0c0f00f18a3ab66db404d1af813a54b652a Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 11 Oct 2023 15:50:55 +0200 Subject: [PATCH 150/310] Define goreleaser job output (#861) ## Changes Define goreleaser job output --- .github/workflows/release.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9184893d..a7901dae 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,6 +9,8 @@ on: jobs: goreleaser: + outputs: + artifacts: ${{ steps.releaser.outputs.artifacts }} runs-on: ubuntu-latest steps: - name: Checkout repository and submodules @@ -23,6 +25,7 @@ jobs: go-version: 1.21.0 - name: Run GoReleaser + id: releaser uses: goreleaser/goreleaser-action@v4 with: version: latest From c0903b8b7bb1b71734c490c75fff89f7d26f0b93 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 10:01:42 +0200 Subject: [PATCH 151/310] Bump golang.org/x/net from 0.16.0 to 0.17.0 (#863) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.16.0 to 0.17.0.
Commits
  • b225e7c http2: limit maximum handler goroutines to MaxConcurrentStreams
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/net&package-manager=go_modules&previous-version=0.16.0&new-version=0.17.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/databricks/cli/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d6e72e47..fe9bf9cc 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/zclconf/go-cty v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/net v0.16.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/api v0.143.0 // indirect diff --git a/go.sum b/go.sum index 96eef68c..b040d0dc 100644 --- a/go.sum +++ b/go.sum @@ -185,8 +185,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= From ff01898b617a25f7e0235ac08f96dc1acd73cb48 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 13 Oct 2023 15:04:15 +0200 Subject: [PATCH 152/310] Use already instantiated WorkspaceClient in sync command (#867) ## Changes Since we use `root.MustWorkspaceClient` now, we should use already initialised version of WorkspaceClient instead of instantiating a new one. Fixes #836 --- cmd/sync/sync.go | 3 +-- cmd/sync/sync_test.go | 6 +++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 7cfc1f29..5416b573 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -13,7 +13,6 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/sync" - "github.com/databricks/databricks-sdk-go" "github.com/spf13/cobra" ) @@ -70,7 +69,7 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn // The sync code will automatically create this directory if it doesn't // exist and add it to the `.gitignore` file in the root. SnapshotBasePath: filepath.Join(args[0], ".databricks"), - WorkspaceClient: databricks.Must(databricks.NewWorkspaceClient()), + WorkspaceClient: root.WorkspaceClient(cmd.Context()), } return &opts, nil } diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index 06e97540..14f641ff 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -1,12 +1,14 @@ package sync import ( + "context" "flag" "path/filepath" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/cmd/root" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -49,7 +51,9 @@ func TestSyncOptionsFromArgsRequiredTwoArgs(t *testing.T) { func TestSyncOptionsFromArgs(t *testing.T) { f := syncFlags{} - opts, err := f.syncOptionsFromArgs(New(), []string{"/local", "/remote"}) + cmd := New() + cmd.SetContext(root.SetWorkspaceClient(context.Background(), nil)) + opts, err := f.syncOptionsFromArgs(cmd, []string{"/local", "/remote"}) require.NoError(t, err) assert.Equal(t, "/local", opts.LocalPath) assert.Equal(t, "/remote", opts.RemotePath) From 36f30c8b47418ed94455ba78066f784bb5a24df7 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Mon, 16 Oct 2023 08:56:06 +0200 Subject: [PATCH 153/310] Update Go SDK to 0.23.0 and use custom marshaller (#772) ## Changes Update Go SDK to 0.23.0 and use custom marshaller. ## Tests * Run unit tests * Run nightly * Manual test: ``` ./cli jobs create --json @myjob.json ``` with ``` { "name": "my-job-marshal-test-go", "tasks": [{ "task_key": "testgomarshaltask", "new_cluster": { "num_workers": 0, "spark_version": "10.4.x-scala2.12", "node_type_id": "Standard_DS3_v2" }, "libraries": [ { "jar": "dbfs:/max/jars/exampleJarTask.jar" } ], "spark_jar_task": { "main_class_name": "com.databricks.quickstart.exampleTask" } }] } ``` Main branch: ``` Error: Cluster validation error: Missing required field: settings.cluster_spec.new_cluster.size ``` This branch: ``` { "job_id": } ``` --------- Co-authored-by: Miles Yucht --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 - bundle/config/resources/job.go | 9 + bundle/config/resources/mlflow_experiment.go | 9 + bundle/config/resources/mlflow_model.go | 9 + .../resources/model_serving_endpoint.go | 9 + bundle/config/resources/pipeline.go | 9 + bundle/config/workspace.go | 9 + cmd/account/billable-usage/billable-usage.go | 5 +- cmd/account/cmd.go | 2 - cmd/account/network-policy/network-policy.go | 243 ------------------ .../storage-credentials.go | 12 +- cmd/workspace/clusters/clusters.go | 4 +- cmd/workspace/connections/connections.go | 2 + .../instance-pools/instance-pools.go | 7 - cmd/workspace/jobs/jobs.go | 8 +- cmd/workspace/pipelines/pipelines.go | 10 +- .../storage-credentials.go | 13 +- .../workspace-bindings/workspace-bindings.go | 161 +++++++++++- cmd/workspace/workspace/workspace.go | 6 +- go.mod | 4 +- go.sum | 8 +- internal/acc/debug.go | 3 +- internal/jobs_test.go | 25 ++ .../testjsons/create_job_without_workers.json | 35 +++ 25 files changed, 307 insertions(+), 298 deletions(-) delete mode 100755 cmd/account/network-policy/network-policy.go create mode 100644 internal/jobs_test.go create mode 100644 internal/testjsons/create_job_without_workers.json diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 7d4ee2a6..e36ae531 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -bcbf6e851e3d82fd910940910dd31c10c059746c \ No newline at end of file +493a76554afd3afdd15dc858773d01643f80352a \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 61527fed..f50218fe 100755 --- a/.gitattributes +++ b/.gitattributes @@ -10,7 +10,6 @@ cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/account/log-delivery/log-delivery.go linguist-generated=true cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true cmd/account/metastores/metastores.go linguist-generated=true -cmd/account/network-policy/network-policy.go linguist-generated=true cmd/account/networks/networks.go linguist-generated=true cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index 7fc5b761..edda8a92 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -2,6 +2,7 @@ package resources import ( "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/imdario/mergo" ) @@ -15,6 +16,14 @@ type Job struct { *jobs.JobSettings } +func (s *Job) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Job) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // MergeJobClusters merges job clusters with the same key. // The job clusters field is a slice, and as such, overrides are appended to it. // We can identify a job cluster by its key, however, so we can use this key diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index d843cf22..e4a9a8a8 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -2,6 +2,7 @@ package resources import ( "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -12,3 +13,11 @@ type MlflowExperiment struct { *ml.Experiment } + +func (s *MlflowExperiment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MlflowExperiment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 92617c95..51fb0e08 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -2,6 +2,7 @@ package resources import ( "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -12,3 +13,11 @@ type MlflowModel struct { *ml.Model } + +func (s *MlflowModel) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MlflowModel) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index dccecaa6..3847e6a6 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -2,6 +2,7 @@ package resources import ( "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/serving" ) @@ -22,3 +23,11 @@ type ModelServingEndpoint struct { // Implementation could be different based on the resource type. Permissions []Permission `json:"permissions,omitempty"` } + +func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ModelServingEndpoint) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index 94c0f2b0..5c741f8a 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -4,6 +4,7 @@ import ( "strings" "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/imdario/mergo" ) @@ -17,6 +18,14 @@ type Pipeline struct { *pipelines.PipelineSpec } +func (s *Pipeline) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Pipeline) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // MergeClusters merges cluster definitions with same label. // The clusters field is a slice, and as such, overrides are appended to it. // We can identify a cluster by its label, however, so we can use this label diff --git a/bundle/config/workspace.go b/bundle/config/workspace.go index 90cd59c6..f29d7c56 100644 --- a/bundle/config/workspace.go +++ b/bundle/config/workspace.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/iam" ) @@ -69,6 +70,14 @@ type User struct { *iam.User } +func (s *User) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s User) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { cfg := databricks.Config{ // Generic diff --git a/cmd/account/billable-usage/billable-usage.go b/cmd/account/billable-usage/billable-usage.go index b5b9749d..1dde38f6 100755 --- a/cmd/account/billable-usage/billable-usage.go +++ b/cmd/account/billable-usage/billable-usage.go @@ -4,6 +4,7 @@ package billable_usage import ( "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/databricks-sdk-go/service/billing" "github.com/spf13/cobra" ) @@ -80,11 +81,11 @@ func newDownload() *cobra.Command { downloadReq.StartMonth = args[0] downloadReq.EndMonth = args[1] - err = a.BillableUsage.Download(ctx, downloadReq) + response, err := a.BillableUsage.Download(ctx, downloadReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 38be7314..744b3670 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -16,7 +16,6 @@ import ( log_delivery "github.com/databricks/cli/cmd/account/log-delivery" account_metastore_assignments "github.com/databricks/cli/cmd/account/metastore-assignments" account_metastores "github.com/databricks/cli/cmd/account/metastores" - account_network_policy "github.com/databricks/cli/cmd/account/network-policy" networks "github.com/databricks/cli/cmd/account/networks" o_auth_enrollment "github.com/databricks/cli/cmd/account/o-auth-enrollment" o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps" @@ -50,7 +49,6 @@ func New() *cobra.Command { cmd.AddCommand(log_delivery.New()) cmd.AddCommand(account_metastore_assignments.New()) cmd.AddCommand(account_metastores.New()) - cmd.AddCommand(account_network_policy.New()) cmd.AddCommand(networks.New()) cmd.AddCommand(o_auth_enrollment.New()) cmd.AddCommand(o_auth_published_apps.New()) diff --git a/cmd/account/network-policy/network-policy.go b/cmd/account/network-policy/network-policy.go deleted file mode 100755 index 60db933a..00000000 --- a/cmd/account/network-policy/network-policy.go +++ /dev/null @@ -1,243 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package network_policy - -import ( - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/settings" - "github.com/spf13/cobra" -) - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cmdOverrides []func(*cobra.Command) - -func New() *cobra.Command { - cmd := &cobra.Command{ - Use: "network-policy", - Short: `Network policy is a set of rules that defines what can be accessed from your Databricks network.`, - Long: `Network policy is a set of rules that defines what can be accessed from your - Databricks network. E.g.: You can choose to block your SQL UDF to access - internet from your Databricks serverless clusters. - - There is only one instance of this setting per account. Since this setting has - a default value, this setting is present on all accounts even though it's - never set on a given account. Deletion reverts the value of the setting back - to the default value.`, - GroupID: "settings", - Annotations: map[string]string{ - "package": "settings", - }, - - // This service is being previewed; hide from help output. - Hidden: true, - } - - // Apply optional overrides to this command. - for _, fn := range cmdOverrides { - fn(cmd) - } - - return cmd -} - -// start delete-account-network-policy command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteAccountNetworkPolicyOverrides []func( - *cobra.Command, - *settings.DeleteAccountNetworkPolicyRequest, -) - -func newDeleteAccountNetworkPolicy() *cobra.Command { - cmd := &cobra.Command{} - - var deleteAccountNetworkPolicyReq settings.DeleteAccountNetworkPolicyRequest - - // TODO: short flags - - cmd.Use = "delete-account-network-policy ETAG" - cmd.Short = `Delete Account Network Policy.` - cmd.Long = `Delete Account Network Policy. - - Reverts back all the account network policies back to default.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - deleteAccountNetworkPolicyReq.Etag = args[0] - - response, err := a.NetworkPolicy.DeleteAccountNetworkPolicy(ctx, deleteAccountNetworkPolicyReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteAccountNetworkPolicyOverrides { - fn(cmd, &deleteAccountNetworkPolicyReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteAccountNetworkPolicy()) - }) -} - -// start read-account-network-policy command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var readAccountNetworkPolicyOverrides []func( - *cobra.Command, - *settings.ReadAccountNetworkPolicyRequest, -) - -func newReadAccountNetworkPolicy() *cobra.Command { - cmd := &cobra.Command{} - - var readAccountNetworkPolicyReq settings.ReadAccountNetworkPolicyRequest - - // TODO: short flags - - cmd.Use = "read-account-network-policy ETAG" - cmd.Short = `Get Account Network Policy.` - cmd.Long = `Get Account Network Policy. - - Gets the value of Account level Network Policy.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - readAccountNetworkPolicyReq.Etag = args[0] - - response, err := a.NetworkPolicy.ReadAccountNetworkPolicy(ctx, readAccountNetworkPolicyReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range readAccountNetworkPolicyOverrides { - fn(cmd, &readAccountNetworkPolicyReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReadAccountNetworkPolicy()) - }) -} - -// start update-account-network-policy command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updateAccountNetworkPolicyOverrides []func( - *cobra.Command, - *settings.UpdateAccountNetworkPolicyRequest, -) - -func newUpdateAccountNetworkPolicy() *cobra.Command { - cmd := &cobra.Command{} - - var updateAccountNetworkPolicyReq settings.UpdateAccountNetworkPolicyRequest - var updateAccountNetworkPolicyJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updateAccountNetworkPolicyJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().BoolVar(&updateAccountNetworkPolicyReq.AllowMissing, "allow-missing", updateAccountNetworkPolicyReq.AllowMissing, `This should always be set to true for Settings RPCs.`) - // TODO: complex arg: setting - - cmd.Use = "update-account-network-policy" - cmd.Short = `Update Account Network Policy.` - cmd.Long = `Update Account Network Policy. - - Updates the policy content of Account level Network Policy.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - if cmd.Flags().Changed("json") { - err = updateAccountNetworkPolicyJson.Unmarshal(&updateAccountNetworkPolicyReq) - if err != nil { - return err - } - } else { - } - - response, err := a.NetworkPolicy.UpdateAccountNetworkPolicy(ctx, updateAccountNetworkPolicyReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updateAccountNetworkPolicyOverrides { - fn(cmd, &updateAccountNetworkPolicyReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateAccountNetworkPolicy()) - }) -} - -// end service AccountNetworkPolicy diff --git a/cmd/account/storage-credentials/storage-credentials.go b/cmd/account/storage-credentials/storage-credentials.go index 451b7112..670bb26d 100755 --- a/cmd/account/storage-credentials/storage-credentials.go +++ b/cmd/account/storage-credentials/storage-credentials.go @@ -128,7 +128,7 @@ func newDelete() *cobra.Command { cmd.Flags().BoolVar(&deleteReq.Force, "force", deleteReq.Force, `Force deletion even if the Storage Credential is not empty.`) - cmd.Use = "delete METASTORE_ID NAME" + cmd.Use = "delete METASTORE_ID STORAGE_CREDENTIAL_NAME" cmd.Short = `Delete a storage credential.` cmd.Long = `Delete a storage credential. @@ -148,7 +148,7 @@ func newDelete() *cobra.Command { a := root.AccountClient(ctx) deleteReq.MetastoreId = args[0] - deleteReq.Name = args[1] + deleteReq.StorageCredentialName = args[1] err = a.StorageCredentials.Delete(ctx, deleteReq) if err != nil { @@ -191,7 +191,7 @@ func newGet() *cobra.Command { // TODO: short flags - cmd.Use = "get METASTORE_ID NAME" + cmd.Use = "get METASTORE_ID STORAGE_CREDENTIAL_NAME" cmd.Short = `Gets the named storage credential.` cmd.Long = `Gets the named storage credential. @@ -212,7 +212,7 @@ func newGet() *cobra.Command { a := root.AccountClient(ctx) getReq.MetastoreId = args[0] - getReq.Name = args[1] + getReq.StorageCredentialName = args[1] response, err := a.StorageCredentials.Get(ctx, getReq) if err != nil { @@ -321,7 +321,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: credential_info - cmd.Use = "update METASTORE_ID NAME" + cmd.Use = "update METASTORE_ID STORAGE_CREDENTIAL_NAME" cmd.Short = `Updates a storage credential.` cmd.Long = `Updates a storage credential. @@ -348,7 +348,7 @@ func newUpdate() *cobra.Command { } } updateReq.MetastoreId = args[0] - updateReq.Name = args[1] + updateReq.StorageCredentialName = args[1] response, err := a.StorageCredentials.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index f14864f0..07effe09 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -160,7 +160,7 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, `Note: This field won't be true for webapp requests.`) + cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, ``) // TODO: complex arg: autoscale cmd.Flags().IntVar(&createReq.AutoterminationMinutes, "autotermination-minutes", createReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`) // TODO: complex arg: aws_attributes @@ -387,7 +387,7 @@ func newEdit() *cobra.Command { // TODO: short flags cmd.Flags().Var(&editJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, `Note: This field won't be true for webapp requests.`) + cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, ``) // TODO: complex arg: autoscale cmd.Flags().IntVar(&editReq.AutoterminationMinutes, "autotermination-minutes", editReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`) // TODO: complex arg: aws_attributes diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index 917aeda9..26a5eec9 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -330,6 +330,8 @@ func newUpdate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of the connection.`) + cmd.Use = "update" cmd.Short = `Update a connection.` cmd.Long = `Update a connection. diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 9e7805ae..b03542c0 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -239,17 +239,10 @@ func newEdit() *cobra.Command { // TODO: short flags cmd.Flags().Var(&editJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: complex arg: aws_attributes - // TODO: complex arg: azure_attributes // TODO: map via StringToStringVar: custom_tags - // TODO: complex arg: disk_spec - cmd.Flags().BoolVar(&editReq.EnableElasticDisk, "enable-elastic-disk", editReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`) - // TODO: complex arg: gcp_attributes cmd.Flags().IntVar(&editReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", editReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`) cmd.Flags().IntVar(&editReq.MaxCapacity, "max-capacity", editReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`) cmd.Flags().IntVar(&editReq.MinIdleInstances, "min-idle-instances", editReq.MinIdleInstances, `Minimum number of idle instances to keep in the instance pool.`) - // TODO: array: preloaded_docker_images - // TODO: array: preloaded_spark_versions cmd.Use = "edit INSTANCE_POOL_ID INSTANCE_POOL_NAME NODE_TYPE_ID" cmd.Short = `Edit an existing instance pool.` diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 9edebb66..be0df694 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -1256,11 +1256,11 @@ func newReset() *cobra.Command { cmd.Flags().Var(&resetJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Use = "reset" - cmd.Short = `Overwrites all settings for a job.` - cmd.Long = `Overwrites all settings for a job. + cmd.Short = `Overwrite all settings for a job.` + cmd.Long = `Overwrite all settings for a job. - Overwrites all the settings for a specific job. Use the Update endpoint to - update job settings partially.` + Overwrite all settings for the given job. Use the Update endpoint to update + job settings partially.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 06d904d3..d24606cd 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -919,10 +919,11 @@ func newStartUpdate() *cobra.Command { // TODO: array: refresh_selection cmd.Use = "start-update PIPELINE_ID" - cmd.Short = `Queue a pipeline update.` - cmd.Long = `Queue a pipeline update. + cmd.Short = `Start a pipeline.` + cmd.Long = `Start a pipeline. - Starts or queues a pipeline update.` + Starts a new update for the pipeline. If there is already an active update for + the pipeline, the request will fail and the active update will remain running.` cmd.Annotations = make(map[string]string) @@ -1006,7 +1007,8 @@ func newStop() *cobra.Command { cmd.Short = `Stop a pipeline.` cmd.Long = `Stop a pipeline. - Stops a pipeline.` + Stops the pipeline by canceling the active update. If there is no active + update for the pipeline, this request is a no-op.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index b5dd5141..9754d0ff 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -77,14 +77,7 @@ func newCreate() *cobra.Command { cmd.Short = `Create a storage credential.` cmd.Long = `Create a storage credential. - Creates a new storage credential. The request object is specific to the cloud: - - * **AwsIamRole** for AWS credentials. * **AzureServicePrincipal** for Azure - credentials. * **AzureManagedIdentity** for Azure managed credentials. * - **DatabricksGcpServiceAccount** for GCP managed credentials. - - The caller must be a metastore admin and have the - **CREATE_STORAGE_CREDENTIAL** privilege on the metastore.` + Creates a new storage credential.` cmd.Annotations = make(map[string]string) @@ -371,9 +364,7 @@ func newUpdate() *cobra.Command { cmd.Short = `Update a credential.` cmd.Long = `Update a credential. - Updates a storage credential on the metastore. The caller must be the owner of - the storage credential or a metastore admin. If the caller is a metastore - admin, only the __owner__ credential can be changed.` + Updates a storage credential on the metastore.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/workspace-bindings/workspace-bindings.go b/cmd/workspace/workspace-bindings/workspace-bindings.go index 3d7fa677..2d2bb5ed 100755 --- a/cmd/workspace/workspace-bindings/workspace-bindings.go +++ b/cmd/workspace/workspace-bindings/workspace-bindings.go @@ -17,13 +17,25 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "workspace-bindings", - Short: `A catalog in Databricks can be configured as __OPEN__ or __ISOLATED__.`, - Long: `A catalog in Databricks can be configured as __OPEN__ or __ISOLATED__. An - __OPEN__ catalog can be accessed from any workspace, while an __ISOLATED__ - catalog can only be access from a configured list of workspaces. + Short: `A securable in Databricks can be configured as __OPEN__ or __ISOLATED__.`, + Long: `A securable in Databricks can be configured as __OPEN__ or __ISOLATED__. An + __OPEN__ securable can be accessed from any workspace, while an __ISOLATED__ + securable can only be accessed from a configured list of workspaces. This API + allows you to configure (bind) securables to workspaces. - A catalog's workspace bindings can be configured by a metastore admin or the - owner of the catalog.`, + NOTE: The __isolation_mode__ is configured for the securable itself (using its + Update method) and the workspace bindings are only consulted when the + securable's __isolation_mode__ is set to __ISOLATED__. + + A securable's workspace bindings can be configured by a metastore admin or the + owner of the securable. + + The original path (/api/2.1/unity-catalog/workspace-bindings/catalogs/{name}) + is deprecated. Please use the new path + (/api/2.1/unity-catalog/bindings/{securable_type}/{securable_name}) which + introduces the ability to bind a securable in READ_ONLY mode (catalogs only). + + Securables that support binding: - catalog`, GroupID: "catalog", Annotations: map[string]string{ "package": "catalog", @@ -100,6 +112,69 @@ func init() { }) } +// start get-bindings command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getBindingsOverrides []func( + *cobra.Command, + *catalog.GetBindingsRequest, +) + +func newGetBindings() *cobra.Command { + cmd := &cobra.Command{} + + var getBindingsReq catalog.GetBindingsRequest + + // TODO: short flags + + cmd.Use = "get-bindings SECURABLE_TYPE SECURABLE_NAME" + cmd.Short = `Get securable workspace bindings.` + cmd.Long = `Get securable workspace bindings. + + Gets workspace bindings of the securable. The caller must be a metastore admin + or an owner of the securable.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getBindingsReq.SecurableType = args[0] + getBindingsReq.SecurableName = args[1] + + response, err := w.WorkspaceBindings.GetBindings(ctx, getBindingsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getBindingsOverrides { + fn(cmd, &getBindingsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetBindings()) + }) +} + // start update command // Slice with functions to override default command behavior. @@ -173,4 +248,78 @@ func init() { }) } +// start update-bindings command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateBindingsOverrides []func( + *cobra.Command, + *catalog.UpdateWorkspaceBindingsParameters, +) + +func newUpdateBindings() *cobra.Command { + cmd := &cobra.Command{} + + var updateBindingsReq catalog.UpdateWorkspaceBindingsParameters + var updateBindingsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateBindingsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: add + // TODO: array: remove + + cmd.Use = "update-bindings SECURABLE_TYPE SECURABLE_NAME" + cmd.Short = `Update securable workspace bindings.` + cmd.Long = `Update securable workspace bindings. + + Updates workspace bindings of the securable. The caller must be a metastore + admin or an owner of the securable.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateBindingsJson.Unmarshal(&updateBindingsReq) + if err != nil { + return err + } + } + updateBindingsReq.SecurableType = args[0] + updateBindingsReq.SecurableName = args[1] + + response, err := w.WorkspaceBindings.UpdateBindings(ctx, updateBindingsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateBindingsOverrides { + fn(cmd, &updateBindingsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdateBindings()) + }) +} + // end service WorkspaceBindings diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 124680f0..2541c8e3 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -428,8 +428,10 @@ func newImport() *cobra.Command { Imports a workspace object (for example, a notebook or file) or the contents of an entire directory. If path already exists and overwrite is set to - false, this call returns an error RESOURCE_ALREADY_EXISTS. One can only - use DBC format to import a directory.` + false, this call returns an error RESOURCE_ALREADY_EXISTS. To import a + directory, you can use either the DBC format or the SOURCE format with the + language field unset. To import a single file as SOURCE, you must set the + language field.` cmd.Annotations = make(map[string]string) diff --git a/go.mod b/go.mod index fe9bf9cc..ffa44f74 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.22.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.23.0 // Apache 2.0 github.com/fatih/color v1.15.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.3.1 // BSD-3-Clause @@ -54,7 +54,7 @@ require ( golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/api v0.143.0 // indirect + google.golang.org/api v0.146.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect google.golang.org/grpc v1.58.2 // indirect diff --git a/go.sum b/go.sum index b040d0dc..c9e4cd1f 100644 --- a/go.sum +++ b/go.sum @@ -29,8 +29,8 @@ github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEM github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/databricks/databricks-sdk-go v0.22.0 h1:CIwNZcOV7wYZmRLl1NWA+07f2j6H9h5L6MhR5O/4dRw= -github.com/databricks/databricks-sdk-go v0.22.0/go.mod h1:COiklTN3IdieazXcs4TnMou5GQFwIM7uhMGrz7nEAAk= +github.com/databricks/databricks-sdk-go v0.23.0 h1:rdLMA7cDUPJiCSMyuUSufzDDmugqyp79SNiY/vc7kMI= +github.com/databricks/databricks-sdk-go v0.23.0/go.mod h1:a6rErRNh5bz+IJbO07nwW70iGyvtWidy1p/S5thepXI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -246,8 +246,8 @@ golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.143.0 h1:o8cekTkqhywkbZT6p1UHJPZ9+9uuCAJs/KYomxZB8fA= -google.golang.org/api v0.143.0/go.mod h1:FoX9DO9hT7DLNn97OuoZAGSDuNAXdJRuGK98rSUgurk= +google.golang.org/api v0.146.0 h1:9aBYT4vQXt9dhCuLNfwfd3zpwu8atg0yPkjBymwSrOM= +google.golang.org/api v0.146.0/go.mod h1:OARJqIfoYjXJj4C1AiBSXYZt03qsoz8FQYU6fBEfrHM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= diff --git a/internal/acc/debug.go b/internal/acc/debug.go index 467642e2..11663113 100644 --- a/internal/acc/debug.go +++ b/internal/acc/debug.go @@ -5,13 +5,14 @@ import ( "os" "path" "path/filepath" + "strings" "testing" ) // Detects if test is run from "debug test" feature in VS Code. func isInDebug() bool { ex, _ := os.Executable() - return path.Base(ex) == "__debug_bin" + return strings.HasPrefix(path.Base(ex), "__debug_bin") } // Loads debug environment from ~/.databricks/debug-env.json. diff --git a/internal/jobs_test.go b/internal/jobs_test.go new file mode 100644 index 00000000..8513168c --- /dev/null +++ b/internal/jobs_test.go @@ -0,0 +1,25 @@ +package internal + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/databricks/cli/internal/acc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccCreateJob(t *testing.T) { + acc.WorkspaceTest(t) + env := GetEnvOrSkipTest(t, "CLOUD_ENV") + if env != "azure" { + t.Skipf("Not running test on cloud %s", env) + } + stdout, stderr := RequireSuccessfulRun(t, "jobs", "create", "--json", "@testjsons/create_job_without_workers.json", "--log-level=debug") + assert.Empty(t, stderr.String()) + var output map[string]int + err := json.Unmarshal(stdout.Bytes(), &output) + require.NoError(t, err) + RequireSuccessfulRun(t, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug") +} diff --git a/internal/testjsons/create_job_without_workers.json b/internal/testjsons/create_job_without_workers.json new file mode 100644 index 00000000..e92ce826 --- /dev/null +++ b/internal/testjsons/create_job_without_workers.json @@ -0,0 +1,35 @@ +{ + "name": "create-job-without-workers", + "job_clusters": [{ + "job_cluster_key": "create-job-without-workers-cluster", + "new_cluster": { + "num_workers": 0, + "spark_version": "10.4.x-scala2.12", + "node_type_id": "Standard_DS3_v2" + } + }], + "tasks": [{ + "job_cluster_key": "create-job-without-workers-cluster", + "task_key": "create-job-without-workers-cluster1", + "libraries": [ + { + "jar": "dbfs:/max/jars/exampleJarTask.jar" + } + ], + "spark_jar_task": { + "main_class_name": "com.databricks.quickstart.exampleTask" + } + }, + { + "job_cluster_key": "create-job-without-workers-cluster", + "task_key": "create-job-without-workers-cluster2", + "libraries": [ + { + "jar": "dbfs:/max/jars/exampleJarTask.jar" + } + ], + "spark_jar_task": { + "main_class_name": "com.databricks.quickstart.exampleTask" + } + }] +} From b2cb69198825bba3a9611984509ca895a7ecc286 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:36:01 +0200 Subject: [PATCH 154/310] Add alias for mlops-stack template URL (#869) ## Changes Allows users to initialize mlops-stack by running `bundle init mlops-stack` ## Tests Manually ``` shreyas.goenka@THW32HFW6T playground % cli bundle init Template to use [default-python]: mlops-stack Project Name [my-mlops-project]: ^C shreyas.goenka@THW32HFW6T playground % cli bundle init mlops-stack Project Name [my-mlops-project]: ^C ``` --- cmd/bundle/init.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 3038cb7a..3625b7a9 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -18,6 +18,10 @@ var gitUrlPrefixes = []string{ "git@", } +var aliasedTemplates = map[string]string{ + "mlops-stack": "https://github.com/databricks/mlops-stack", +} + func isRepoUrl(url string) bool { result := false for _, prefix := range gitUrlPrefixes { @@ -68,6 +72,11 @@ func newInitCommand() *cobra.Command { } } + // Expand templatePath if it's an alias for a known template + if _, ok := aliasedTemplates[templatePath]; ok { + templatePath = aliasedTemplates[templatePath] + } + if !isRepoUrl(templatePath) { // skip downloading the repo because input arg is not a URL. We assume // it's a path on the local file system in that case From 30c4d2e8a7e027bc52b33a053ea9ee5a63ca26cc Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 16 Oct 2023 10:48:32 +0200 Subject: [PATCH 155/310] Fixed merging task libraries from targets (#868) ## Changes Previous we (erroneously) kept the reference and merged into the original tasks and not the copies which we later used to replace existing tasks. Thus the merging of slices and references was incorrect. Fixes #864 ## Tests Added a regression test --- bundle/config/resources/job.go | 4 ++-- bundle/config/resources/job_test.go | 20 ++++++++++++++++---- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index edda8a92..bf29106a 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -41,7 +41,7 @@ func (j *Job) MergeJobClusters() error { ref, ok := keys[key] if !ok { output = append(output, j.JobClusters[i]) - keys[key] = &j.JobClusters[i] + keys[key] = &output[len(output)-1] continue } @@ -74,7 +74,7 @@ func (j *Job) MergeTasks() error { ref, ok := keys[key] if !ok { tasks = append(tasks, j.Tasks[i]) - keys[key] = &j.Tasks[i] + keys[key] = &tasks[len(tasks)-1] continue } diff --git a/bundle/config/resources/job_test.go b/bundle/config/resources/job_test.go index 818d2ac2..24b82fab 100644 --- a/bundle/config/resources/job_test.go +++ b/bundle/config/resources/job_test.go @@ -67,6 +67,9 @@ func TestJobMergeTasks(t *testing.T) { NodeTypeId: "i3.xlarge", NumWorkers: 2, }, + Libraries: []compute.Library{ + {Whl: "package1"}, + }, }, { TaskKey: "bar", @@ -80,6 +83,11 @@ func TestJobMergeTasks(t *testing.T) { NodeTypeId: "i3.2xlarge", NumWorkers: 4, }, + Libraries: []compute.Library{ + {Pypi: &compute.PythonPyPiLibrary{ + Package: "package2", + }}, + }, }, }, }, @@ -93,10 +101,14 @@ func TestJobMergeTasks(t *testing.T) { assert.Equal(t, "bar", j.Tasks[1].TaskKey) // This task was merged with a subsequent one. - task0 := j.Tasks[0].NewCluster - assert.Equal(t, "13.3.x-scala2.12", task0.SparkVersion) - assert.Equal(t, "i3.2xlarge", task0.NodeTypeId) - assert.Equal(t, 4, task0.NumWorkers) + task0 := j.Tasks[0] + cluster := task0.NewCluster + assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion) + assert.Equal(t, "i3.2xlarge", cluster.NodeTypeId) + assert.Equal(t, 4, cluster.NumWorkers) + assert.Len(t, task0.Libraries, 2) + assert.Equal(t, task0.Libraries[0].Whl, "package1") + assert.Equal(t, task0.Libraries[1].Pypi.Package, "package2") // This task was left untouched. task1 := j.Tasks[1].NewCluster From b940c8631e2d6ade32caf82c4ffe05aad542b68a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 16 Oct 2023 14:52:16 +0200 Subject: [PATCH 156/310] Bump Terraform provider to v1.28.0 (#871) ## Changes Regenerate structs for Terraform provider v1.28.0 ([release](https://github.com/databricks/terraform-provider-databricks/releases/tag/v1.28.0)). ## Tests n/a --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../internal/tf/schema/data_source_cluster.go | 5 +++ .../tf/schema/data_source_current_user.go | 15 ++++---- .../internal/tf/schema/data_source_group.go | 1 + bundle/internal/tf/schema/data_source_job.go | 20 +++++++++-- .../schema/data_source_service_principal.go | 17 +++++----- bundle/internal/tf/schema/data_source_user.go | 19 ++++++----- bundle/internal/tf/schema/resource_catalog.go | 24 +++++++------ bundle/internal/tf/schema/resource_cluster.go | 5 +++ .../tf/schema/resource_external_location.go | 34 +++++++++++++------ bundle/internal/tf/schema/resource_grants.go | 2 ++ bundle/internal/tf/schema/resource_job.go | 20 +++++++++-- .../internal/tf/schema/resource_metastore.go | 2 ++ .../schema/resource_metastore_data_access.go | 10 ++++-- .../tf/schema/resource_mlflow_model.go | 4 +-- .../tf/schema/resource_model_serving.go | 7 ++++ .../internal/tf/schema/resource_pipeline.go | 5 +++ .../tf/schema/resource_registered_model.go | 12 +++++++ bundle/internal/tf/schema/resource_share.go | 1 + .../internal/tf/schema/resource_sql_alert.go | 27 ++++++++------- .../tf/schema/resource_sql_dashboard.go | 11 +++--- .../internal/tf/schema/resource_sql_query.go | 2 ++ .../internal/tf/schema/resource_sql_table.go | 2 +- .../tf/schema/resource_storage_credential.go | 6 +++- .../tf/schema/resource_system_schema.go | 10 ++++++ bundle/internal/tf/schema/resources.go | 6 ++++ 26 files changed, 196 insertions(+), 73 deletions(-) create mode 100644 bundle/internal/tf/schema/resource_registered_model.go create mode 100644 bundle/internal/tf/schema/resource_system_schema.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 84456731..c82218fc 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.23.0" +const ProviderVersion = "1.28.0" diff --git a/bundle/internal/tf/schema/data_source_cluster.go b/bundle/internal/tf/schema/data_source_cluster.go index 2aa6fb5d..ce1ad034 100644 --- a/bundle/internal/tf/schema/data_source_cluster.go +++ b/bundle/internal/tf/schema/data_source_cluster.go @@ -121,6 +121,10 @@ type DataSourceClusterClusterInfoInitScriptsS3 struct { Region string `json:"region,omitempty"` } +type DataSourceClusterClusterInfoInitScriptsVolumes struct { + Destination string `json:"destination,omitempty"` +} + type DataSourceClusterClusterInfoInitScriptsWorkspace struct { Destination string `json:"destination,omitempty"` } @@ -131,6 +135,7 @@ type DataSourceClusterClusterInfoInitScripts struct { File *DataSourceClusterClusterInfoInitScriptsFile `json:"file,omitempty"` Gcs *DataSourceClusterClusterInfoInitScriptsGcs `json:"gcs,omitempty"` S3 *DataSourceClusterClusterInfoInitScriptsS3 `json:"s3,omitempty"` + Volumes *DataSourceClusterClusterInfoInitScriptsVolumes `json:"volumes,omitempty"` Workspace *DataSourceClusterClusterInfoInitScriptsWorkspace `json:"workspace,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_current_user.go b/bundle/internal/tf/schema/data_source_current_user.go index 854a83b8..d59e6461 100644 --- a/bundle/internal/tf/schema/data_source_current_user.go +++ b/bundle/internal/tf/schema/data_source_current_user.go @@ -3,11 +3,12 @@ package schema type DataSourceCurrentUser struct { - Alphanumeric string `json:"alphanumeric,omitempty"` - ExternalId string `json:"external_id,omitempty"` - Home string `json:"home,omitempty"` - Id string `json:"id,omitempty"` - Repos string `json:"repos,omitempty"` - UserName string `json:"user_name,omitempty"` - WorkspaceUrl string `json:"workspace_url,omitempty"` + AclPrincipalId string `json:"acl_principal_id,omitempty"` + Alphanumeric string `json:"alphanumeric,omitempty"` + ExternalId string `json:"external_id,omitempty"` + Home string `json:"home,omitempty"` + Id string `json:"id,omitempty"` + Repos string `json:"repos,omitempty"` + UserName string `json:"user_name,omitempty"` + WorkspaceUrl string `json:"workspace_url,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_group.go b/bundle/internal/tf/schema/data_source_group.go index 64b1d690..862f3eac 100644 --- a/bundle/internal/tf/schema/data_source_group.go +++ b/bundle/internal/tf/schema/data_source_group.go @@ -3,6 +3,7 @@ package schema type DataSourceGroup struct { + AclPrincipalId string `json:"acl_principal_id,omitempty"` AllowClusterCreate bool `json:"allow_cluster_create,omitempty"` AllowInstancePoolCreate bool `json:"allow_instance_pool_create,omitempty"` ChildGroups []string `json:"child_groups,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index d251dfe5..569c8b81 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -155,6 +155,10 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 struct { Region string `json:"region,omitempty"` } +type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsVolumes struct { + Destination string `json:"destination,omitempty"` +} + type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsWorkspace struct { Destination string `json:"destination,omitempty"` } @@ -165,6 +169,7 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScripts struct { File *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsFile `json:"file,omitempty"` Gcs *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"` S3 *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsVolumes `json:"volumes,omitempty"` Workspace *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } @@ -337,6 +342,10 @@ type DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 struct { Region string `json:"region,omitempty"` } +type DataSourceJobJobSettingsSettingsNewClusterInitScriptsVolumes struct { + Destination string `json:"destination,omitempty"` +} + type DataSourceJobJobSettingsSettingsNewClusterInitScriptsWorkspace struct { Destination string `json:"destination,omitempty"` } @@ -347,6 +356,7 @@ type DataSourceJobJobSettingsSettingsNewClusterInitScripts struct { File *DataSourceJobJobSettingsSettingsNewClusterInitScriptsFile `json:"file,omitempty"` Gcs *DataSourceJobJobSettingsSettingsNewClusterInitScriptsGcs `json:"gcs,omitempty"` S3 *DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *DataSourceJobJobSettingsSettingsNewClusterInitScriptsVolumes `json:"volumes,omitempty"` Workspace *DataSourceJobJobSettingsSettingsNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } @@ -421,6 +431,7 @@ type DataSourceJobJobSettingsSettingsPythonWheelTask struct { } type DataSourceJobJobSettingsSettingsQueue struct { + Enabled bool `json:"enabled"` } type DataSourceJobJobSettingsSettingsRunAs struct { @@ -429,7 +440,7 @@ type DataSourceJobJobSettingsSettingsRunAs struct { } type DataSourceJobJobSettingsSettingsRunJobTask struct { - JobId string `json:"job_id"` + JobId int `json:"job_id"` JobParameters map[string]string `json:"job_parameters,omitempty"` } @@ -616,6 +627,10 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 struct { Region string `json:"region,omitempty"` } +type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsVolumes struct { + Destination string `json:"destination,omitempty"` +} + type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsWorkspace struct { Destination string `json:"destination,omitempty"` } @@ -626,6 +641,7 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterInitScripts struct { File *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsFile `json:"file,omitempty"` Gcs *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"` S3 *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsVolumes `json:"volumes,omitempty"` Workspace *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } @@ -696,7 +712,7 @@ type DataSourceJobJobSettingsSettingsTaskPythonWheelTask struct { } type DataSourceJobJobSettingsSettingsTaskRunJobTask struct { - JobId string `json:"job_id"` + JobId int `json:"job_id"` JobParameters map[string]string `json:"job_parameters,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_service_principal.go b/bundle/internal/tf/schema/data_source_service_principal.go index 83d2d131..7d29ea42 100644 --- a/bundle/internal/tf/schema/data_source_service_principal.go +++ b/bundle/internal/tf/schema/data_source_service_principal.go @@ -3,12 +3,13 @@ package schema type DataSourceServicePrincipal struct { - Active bool `json:"active,omitempty"` - ApplicationId string `json:"application_id,omitempty"` - DisplayName string `json:"display_name,omitempty"` - ExternalId string `json:"external_id,omitempty"` - Home string `json:"home,omitempty"` - Id string `json:"id,omitempty"` - Repos string `json:"repos,omitempty"` - SpId string `json:"sp_id,omitempty"` + AclPrincipalId string `json:"acl_principal_id,omitempty"` + Active bool `json:"active,omitempty"` + ApplicationId string `json:"application_id,omitempty"` + DisplayName string `json:"display_name,omitempty"` + ExternalId string `json:"external_id,omitempty"` + Home string `json:"home,omitempty"` + Id string `json:"id,omitempty"` + Repos string `json:"repos,omitempty"` + SpId string `json:"sp_id,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_user.go b/bundle/internal/tf/schema/data_source_user.go index 3e3272e7..78981f29 100644 --- a/bundle/internal/tf/schema/data_source_user.go +++ b/bundle/internal/tf/schema/data_source_user.go @@ -3,13 +3,14 @@ package schema type DataSourceUser struct { - Alphanumeric string `json:"alphanumeric,omitempty"` - ApplicationId string `json:"application_id,omitempty"` - DisplayName string `json:"display_name,omitempty"` - ExternalId string `json:"external_id,omitempty"` - Home string `json:"home,omitempty"` - Id string `json:"id,omitempty"` - Repos string `json:"repos,omitempty"` - UserId string `json:"user_id,omitempty"` - UserName string `json:"user_name,omitempty"` + AclPrincipalId string `json:"acl_principal_id,omitempty"` + Alphanumeric string `json:"alphanumeric,omitempty"` + ApplicationId string `json:"application_id,omitempty"` + DisplayName string `json:"display_name,omitempty"` + ExternalId string `json:"external_id,omitempty"` + Home string `json:"home,omitempty"` + Id string `json:"id,omitempty"` + Repos string `json:"repos,omitempty"` + UserId string `json:"user_id,omitempty"` + UserName string `json:"user_name,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_catalog.go b/bundle/internal/tf/schema/resource_catalog.go index 0ae59114..a54f1c27 100644 --- a/bundle/internal/tf/schema/resource_catalog.go +++ b/bundle/internal/tf/schema/resource_catalog.go @@ -3,15 +3,17 @@ package schema type ResourceCatalog struct { - Comment string `json:"comment,omitempty"` - ForceDestroy bool `json:"force_destroy,omitempty"` - Id string `json:"id,omitempty"` - IsolationMode string `json:"isolation_mode,omitempty"` - MetastoreId string `json:"metastore_id,omitempty"` - Name string `json:"name"` - Owner string `json:"owner,omitempty"` - Properties map[string]string `json:"properties,omitempty"` - ProviderName string `json:"provider_name,omitempty"` - ShareName string `json:"share_name,omitempty"` - StorageRoot string `json:"storage_root,omitempty"` + Comment string `json:"comment,omitempty"` + ConnectionName string `json:"connection_name,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` + Id string `json:"id,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Options map[string]string `json:"options,omitempty"` + Owner string `json:"owner,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + ProviderName string `json:"provider_name,omitempty"` + ShareName string `json:"share_name,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index bb4e3582..1d5a5ef2 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -99,6 +99,10 @@ type ResourceClusterInitScriptsS3 struct { Region string `json:"region,omitempty"` } +type ResourceClusterInitScriptsVolumes struct { + Destination string `json:"destination,omitempty"` +} + type ResourceClusterInitScriptsWorkspace struct { Destination string `json:"destination,omitempty"` } @@ -109,6 +113,7 @@ type ResourceClusterInitScripts struct { File *ResourceClusterInitScriptsFile `json:"file,omitempty"` Gcs *ResourceClusterInitScriptsGcs `json:"gcs,omitempty"` S3 *ResourceClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *ResourceClusterInitScriptsVolumes `json:"volumes,omitempty"` Workspace *ResourceClusterInitScriptsWorkspace `json:"workspace,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_external_location.go b/bundle/internal/tf/schema/resource_external_location.go index 24e3fd0a..af64c677 100644 --- a/bundle/internal/tf/schema/resource_external_location.go +++ b/bundle/internal/tf/schema/resource_external_location.go @@ -2,15 +2,27 @@ package schema -type ResourceExternalLocation struct { - Comment string `json:"comment,omitempty"` - CredentialName string `json:"credential_name"` - ForceDestroy bool `json:"force_destroy,omitempty"` - Id string `json:"id,omitempty"` - MetastoreId string `json:"metastore_id,omitempty"` - Name string `json:"name"` - Owner string `json:"owner,omitempty"` - ReadOnly bool `json:"read_only,omitempty"` - SkipValidation bool `json:"skip_validation,omitempty"` - Url string `json:"url"` +type ResourceExternalLocationEncryptionDetailsSseEncryptionDetails struct { + Algorithm string `json:"algorithm,omitempty"` + AwsKmsKeyArn string `json:"aws_kms_key_arn,omitempty"` +} + +type ResourceExternalLocationEncryptionDetails struct { + SseEncryptionDetails *ResourceExternalLocationEncryptionDetailsSseEncryptionDetails `json:"sse_encryption_details,omitempty"` +} + +type ResourceExternalLocation struct { + AccessPoint string `json:"access_point,omitempty"` + Comment string `json:"comment,omitempty"` + CredentialName string `json:"credential_name"` + ForceDestroy bool `json:"force_destroy,omitempty"` + ForceUpdate bool `json:"force_update,omitempty"` + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Owner string `json:"owner,omitempty"` + ReadOnly bool `json:"read_only,omitempty"` + SkipValidation bool `json:"skip_validation,omitempty"` + Url string `json:"url"` + EncryptionDetails *ResourceExternalLocationEncryptionDetails `json:"encryption_details,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_grants.go b/bundle/internal/tf/schema/resource_grants.go index fb754cc6..09b958f8 100644 --- a/bundle/internal/tf/schema/resource_grants.go +++ b/bundle/internal/tf/schema/resource_grants.go @@ -10,10 +10,12 @@ type ResourceGrantsGrant struct { type ResourceGrants struct { Catalog string `json:"catalog,omitempty"` ExternalLocation string `json:"external_location,omitempty"` + ForeignConnection string `json:"foreign_connection,omitempty"` Function string `json:"function,omitempty"` Id string `json:"id,omitempty"` MaterializedView string `json:"materialized_view,omitempty"` Metastore string `json:"metastore,omitempty"` + Model string `json:"model,omitempty"` Schema string `json:"schema,omitempty"` Share string `json:"share,omitempty"` StorageCredential string `json:"storage_credential,omitempty"` diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 50101400..7af07560 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -155,6 +155,10 @@ type ResourceJobJobClusterNewClusterInitScriptsS3 struct { Region string `json:"region,omitempty"` } +type ResourceJobJobClusterNewClusterInitScriptsVolumes struct { + Destination string `json:"destination,omitempty"` +} + type ResourceJobJobClusterNewClusterInitScriptsWorkspace struct { Destination string `json:"destination,omitempty"` } @@ -165,6 +169,7 @@ type ResourceJobJobClusterNewClusterInitScripts struct { File *ResourceJobJobClusterNewClusterInitScriptsFile `json:"file,omitempty"` Gcs *ResourceJobJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"` S3 *ResourceJobJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *ResourceJobJobClusterNewClusterInitScriptsVolumes `json:"volumes,omitempty"` Workspace *ResourceJobJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } @@ -337,6 +342,10 @@ type ResourceJobNewClusterInitScriptsS3 struct { Region string `json:"region,omitempty"` } +type ResourceJobNewClusterInitScriptsVolumes struct { + Destination string `json:"destination,omitempty"` +} + type ResourceJobNewClusterInitScriptsWorkspace struct { Destination string `json:"destination,omitempty"` } @@ -347,6 +356,7 @@ type ResourceJobNewClusterInitScripts struct { File *ResourceJobNewClusterInitScriptsFile `json:"file,omitempty"` Gcs *ResourceJobNewClusterInitScriptsGcs `json:"gcs,omitempty"` S3 *ResourceJobNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *ResourceJobNewClusterInitScriptsVolumes `json:"volumes,omitempty"` Workspace *ResourceJobNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } @@ -421,6 +431,7 @@ type ResourceJobPythonWheelTask struct { } type ResourceJobQueue struct { + Enabled bool `json:"enabled"` } type ResourceJobRunAs struct { @@ -429,7 +440,7 @@ type ResourceJobRunAs struct { } type ResourceJobRunJobTask struct { - JobId string `json:"job_id"` + JobId int `json:"job_id"` JobParameters map[string]string `json:"job_parameters,omitempty"` } @@ -616,6 +627,10 @@ type ResourceJobTaskNewClusterInitScriptsS3 struct { Region string `json:"region,omitempty"` } +type ResourceJobTaskNewClusterInitScriptsVolumes struct { + Destination string `json:"destination,omitempty"` +} + type ResourceJobTaskNewClusterInitScriptsWorkspace struct { Destination string `json:"destination,omitempty"` } @@ -626,6 +641,7 @@ type ResourceJobTaskNewClusterInitScripts struct { File *ResourceJobTaskNewClusterInitScriptsFile `json:"file,omitempty"` Gcs *ResourceJobTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"` S3 *ResourceJobTaskNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *ResourceJobTaskNewClusterInitScriptsVolumes `json:"volumes,omitempty"` Workspace *ResourceJobTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } @@ -696,7 +712,7 @@ type ResourceJobTaskPythonWheelTask struct { } type ResourceJobTaskRunJobTask struct { - JobId string `json:"job_id"` + JobId int `json:"job_id"` JobParameters map[string]string `json:"job_parameters,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_metastore.go b/bundle/internal/tf/schema/resource_metastore.go index 24e50265..3561d2bf 100644 --- a/bundle/internal/tf/schema/resource_metastore.go +++ b/bundle/internal/tf/schema/resource_metastore.go @@ -13,10 +13,12 @@ type ResourceMetastore struct { ForceDestroy bool `json:"force_destroy,omitempty"` GlobalMetastoreId string `json:"global_metastore_id,omitempty"` Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name"` Owner string `json:"owner,omitempty"` Region string `json:"region,omitempty"` StorageRoot string `json:"storage_root"` + StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"` UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_metastore_data_access.go b/bundle/internal/tf/schema/resource_metastore_data_access.go index 6d26776d..86df5e4b 100644 --- a/bundle/internal/tf/schema/resource_metastore_data_access.go +++ b/bundle/internal/tf/schema/resource_metastore_data_access.go @@ -8,6 +8,8 @@ type ResourceMetastoreDataAccessAwsIamRole struct { type ResourceMetastoreDataAccessAzureManagedIdentity struct { AccessConnectorId string `json:"access_connector_id"` + CredentialId string `json:"credential_id,omitempty"` + ManagedIdentityId string `json:"managed_identity_id,omitempty"` } type ResourceMetastoreDataAccessAzureServicePrincipal struct { @@ -17,7 +19,8 @@ type ResourceMetastoreDataAccessAzureServicePrincipal struct { } type ResourceMetastoreDataAccessDatabricksGcpServiceAccount struct { - Email string `json:"email,omitempty"` + CredentialId string `json:"credential_id,omitempty"` + Email string `json:"email,omitempty"` } type ResourceMetastoreDataAccessGcpServiceAccountKey struct { @@ -27,11 +30,14 @@ type ResourceMetastoreDataAccessGcpServiceAccountKey struct { } type ResourceMetastoreDataAccess struct { - ConfigurationType string `json:"configuration_type,omitempty"` + Comment string `json:"comment,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` Id string `json:"id,omitempty"` IsDefault bool `json:"is_default,omitempty"` MetastoreId string `json:"metastore_id"` Name string `json:"name"` + Owner string `json:"owner,omitempty"` + ReadOnly bool `json:"read_only,omitempty"` AwsIamRole *ResourceMetastoreDataAccessAwsIamRole `json:"aws_iam_role,omitempty"` AzureManagedIdentity *ResourceMetastoreDataAccessAzureManagedIdentity `json:"azure_managed_identity,omitempty"` AzureServicePrincipal *ResourceMetastoreDataAccessAzureServicePrincipal `json:"azure_service_principal,omitempty"` diff --git a/bundle/internal/tf/schema/resource_mlflow_model.go b/bundle/internal/tf/schema/resource_mlflow_model.go index 90c1f61d..406c124f 100644 --- a/bundle/internal/tf/schema/resource_mlflow_model.go +++ b/bundle/internal/tf/schema/resource_mlflow_model.go @@ -3,8 +3,8 @@ package schema type ResourceMlflowModelTags struct { - Key string `json:"key"` - Value string `json:"value"` + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` } type ResourceMlflowModel struct { diff --git a/bundle/internal/tf/schema/resource_model_serving.go b/bundle/internal/tf/schema/resource_model_serving.go index cc5c3257..b0cabbe5 100644 --- a/bundle/internal/tf/schema/resource_model_serving.go +++ b/bundle/internal/tf/schema/resource_model_serving.go @@ -10,6 +10,7 @@ type ResourceModelServingConfigServedModels struct { Name string `json:"name,omitempty"` ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"` WorkloadSize string `json:"workload_size"` + WorkloadType string `json:"workload_type,omitempty"` } type ResourceModelServingConfigTrafficConfigRoutes struct { @@ -26,9 +27,15 @@ type ResourceModelServingConfig struct { TrafficConfig *ResourceModelServingConfigTrafficConfig `json:"traffic_config,omitempty"` } +type ResourceModelServingTags struct { + Key string `json:"key"` + Value string `json:"value,omitempty"` +} + type ResourceModelServing struct { Id string `json:"id,omitempty"` Name string `json:"name"` ServingEndpointId string `json:"serving_endpoint_id,omitempty"` Config *ResourceModelServingConfig `json:"config,omitempty"` + Tags []ResourceModelServingTags `json:"tags,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 5c5de9a7..72354f62 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -77,6 +77,10 @@ type ResourcePipelineClusterInitScriptsS3 struct { Region string `json:"region,omitempty"` } +type ResourcePipelineClusterInitScriptsVolumes struct { + Destination string `json:"destination,omitempty"` +} + type ResourcePipelineClusterInitScriptsWorkspace struct { Destination string `json:"destination,omitempty"` } @@ -87,6 +91,7 @@ type ResourcePipelineClusterInitScripts struct { File *ResourcePipelineClusterInitScriptsFile `json:"file,omitempty"` Gcs *ResourcePipelineClusterInitScriptsGcs `json:"gcs,omitempty"` S3 *ResourcePipelineClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *ResourcePipelineClusterInitScriptsVolumes `json:"volumes,omitempty"` Workspace *ResourcePipelineClusterInitScriptsWorkspace `json:"workspace,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_registered_model.go b/bundle/internal/tf/schema/resource_registered_model.go new file mode 100644 index 00000000..e4f1c088 --- /dev/null +++ b/bundle/internal/tf/schema/resource_registered_model.go @@ -0,0 +1,12 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceRegisteredModel struct { + CatalogName string `json:"catalog_name"` + Comment string `json:"comment,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + SchemaName string `json:"schema_name"` + StorageLocation string `json:"storage_location,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_share.go b/bundle/internal/tf/schema/resource_share.go index 72d7f810..e531e777 100644 --- a/bundle/internal/tf/schema/resource_share.go +++ b/bundle/internal/tf/schema/resource_share.go @@ -32,5 +32,6 @@ type ResourceShare struct { CreatedBy string `json:"created_by,omitempty"` Id string `json:"id,omitempty"` Name string `json:"name"` + Owner string `json:"owner,omitempty"` Object []ResourceShareObject `json:"object,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_sql_alert.go b/bundle/internal/tf/schema/resource_sql_alert.go index 22a369d4..a9768fd8 100644 --- a/bundle/internal/tf/schema/resource_sql_alert.go +++ b/bundle/internal/tf/schema/resource_sql_alert.go @@ -3,19 +3,22 @@ package schema type ResourceSqlAlertOptions struct { - Column string `json:"column"` - CustomBody string `json:"custom_body,omitempty"` - CustomSubject string `json:"custom_subject,omitempty"` - Muted bool `json:"muted,omitempty"` - Op string `json:"op"` - Value string `json:"value"` + Column string `json:"column"` + CustomBody string `json:"custom_body,omitempty"` + CustomSubject string `json:"custom_subject,omitempty"` + EmptyResultState string `json:"empty_result_state,omitempty"` + Muted bool `json:"muted,omitempty"` + Op string `json:"op"` + Value string `json:"value"` } type ResourceSqlAlert struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - Parent string `json:"parent,omitempty"` - QueryId string `json:"query_id"` - Rearm int `json:"rearm,omitempty"` - Options *ResourceSqlAlertOptions `json:"options,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + Parent string `json:"parent,omitempty"` + QueryId string `json:"query_id"` + Rearm int `json:"rearm,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` + Options *ResourceSqlAlertOptions `json:"options,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_sql_dashboard.go b/bundle/internal/tf/schema/resource_sql_dashboard.go index 5c3bd896..fc97bbde 100644 --- a/bundle/internal/tf/schema/resource_sql_dashboard.go +++ b/bundle/internal/tf/schema/resource_sql_dashboard.go @@ -3,8 +3,11 @@ package schema type ResourceSqlDashboard struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - Parent string `json:"parent,omitempty"` - Tags []string `json:"tags,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + Parent string `json:"parent,omitempty"` + Tags []string `json:"tags,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_sql_query.go b/bundle/internal/tf/schema/resource_sql_query.go index 5016d8d7..27c653fc 100644 --- a/bundle/internal/tf/schema/resource_sql_query.go +++ b/bundle/internal/tf/schema/resource_sql_query.go @@ -118,6 +118,7 @@ type ResourceSqlQuerySchedule struct { } type ResourceSqlQuery struct { + CreatedAt string `json:"created_at,omitempty"` DataSourceId string `json:"data_source_id"` Description string `json:"description,omitempty"` Id string `json:"id,omitempty"` @@ -126,6 +127,7 @@ type ResourceSqlQuery struct { Query string `json:"query"` RunAsRole string `json:"run_as_role,omitempty"` Tags []string `json:"tags,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` Parameter []ResourceSqlQueryParameter `json:"parameter,omitempty"` Schedule *ResourceSqlQuerySchedule `json:"schedule,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_sql_table.go b/bundle/internal/tf/schema/resource_sql_table.go index 2891975c..077645c2 100644 --- a/bundle/internal/tf/schema/resource_sql_table.go +++ b/bundle/internal/tf/schema/resource_sql_table.go @@ -6,7 +6,7 @@ type ResourceSqlTableColumn struct { Comment string `json:"comment,omitempty"` Name string `json:"name"` Nullable bool `json:"nullable,omitempty"` - Type string `json:"type"` + Type string `json:"type,omitempty"` } type ResourceSqlTable struct { diff --git a/bundle/internal/tf/schema/resource_storage_credential.go b/bundle/internal/tf/schema/resource_storage_credential.go index eab5810b..1687c79a 100644 --- a/bundle/internal/tf/schema/resource_storage_credential.go +++ b/bundle/internal/tf/schema/resource_storage_credential.go @@ -8,6 +8,8 @@ type ResourceStorageCredentialAwsIamRole struct { type ResourceStorageCredentialAzureManagedIdentity struct { AccessConnectorId string `json:"access_connector_id"` + CredentialId string `json:"credential_id,omitempty"` + ManagedIdentityId string `json:"managed_identity_id,omitempty"` } type ResourceStorageCredentialAzureServicePrincipal struct { @@ -17,7 +19,8 @@ type ResourceStorageCredentialAzureServicePrincipal struct { } type ResourceStorageCredentialDatabricksGcpServiceAccount struct { - Email string `json:"email,omitempty"` + CredentialId string `json:"credential_id,omitempty"` + Email string `json:"email,omitempty"` } type ResourceStorageCredentialGcpServiceAccountKey struct { @@ -28,6 +31,7 @@ type ResourceStorageCredentialGcpServiceAccountKey struct { type ResourceStorageCredential struct { Comment string `json:"comment,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` Id string `json:"id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name"` diff --git a/bundle/internal/tf/schema/resource_system_schema.go b/bundle/internal/tf/schema/resource_system_schema.go new file mode 100644 index 00000000..09a86103 --- /dev/null +++ b/bundle/internal/tf/schema/resource_system_schema.go @@ -0,0 +1,10 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceSystemSchema struct { + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Schema string `json:"schema,omitempty"` + State string `json:"state,omitempty"` +} diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index c2361254..cf98f9a9 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -12,6 +12,7 @@ type Resources struct { CatalogWorkspaceBinding map[string]*ResourceCatalogWorkspaceBinding `json:"databricks_catalog_workspace_binding,omitempty"` Cluster map[string]*ResourceCluster `json:"databricks_cluster,omitempty"` ClusterPolicy map[string]*ResourceClusterPolicy `json:"databricks_cluster_policy,omitempty"` + Connection map[string]*ResourceConnection `json:"databricks_connection,omitempty"` DbfsFile map[string]*ResourceDbfsFile `json:"databricks_dbfs_file,omitempty"` Directory map[string]*ResourceDirectory `json:"databricks_directory,omitempty"` Entitlements map[string]*ResourceEntitlements `json:"databricks_entitlements,omitempty"` @@ -52,6 +53,7 @@ type Resources struct { Pipeline map[string]*ResourcePipeline `json:"databricks_pipeline,omitempty"` Provider map[string]*ResourceProvider `json:"databricks_provider,omitempty"` Recipient map[string]*ResourceRecipient `json:"databricks_recipient,omitempty"` + RegisteredModel map[string]*ResourceRegisteredModel `json:"databricks_registered_model,omitempty"` Repo map[string]*ResourceRepo `json:"databricks_repo,omitempty"` Schema map[string]*ResourceSchema `json:"databricks_schema,omitempty"` Secret map[string]*ResourceSecret `json:"databricks_secret,omitempty"` @@ -71,6 +73,7 @@ type Resources struct { SqlVisualization map[string]*ResourceSqlVisualization `json:"databricks_sql_visualization,omitempty"` SqlWidget map[string]*ResourceSqlWidget `json:"databricks_sql_widget,omitempty"` StorageCredential map[string]*ResourceStorageCredential `json:"databricks_storage_credential,omitempty"` + SystemSchema map[string]*ResourceSystemSchema `json:"databricks_system_schema,omitempty"` Table map[string]*ResourceTable `json:"databricks_table,omitempty"` Token map[string]*ResourceToken `json:"databricks_token,omitempty"` User map[string]*ResourceUser `json:"databricks_user,omitempty"` @@ -92,6 +95,7 @@ func NewResources() *Resources { CatalogWorkspaceBinding: make(map[string]*ResourceCatalogWorkspaceBinding), Cluster: make(map[string]*ResourceCluster), ClusterPolicy: make(map[string]*ResourceClusterPolicy), + Connection: make(map[string]*ResourceConnection), DbfsFile: make(map[string]*ResourceDbfsFile), Directory: make(map[string]*ResourceDirectory), Entitlements: make(map[string]*ResourceEntitlements), @@ -132,6 +136,7 @@ func NewResources() *Resources { Pipeline: make(map[string]*ResourcePipeline), Provider: make(map[string]*ResourceProvider), Recipient: make(map[string]*ResourceRecipient), + RegisteredModel: make(map[string]*ResourceRegisteredModel), Repo: make(map[string]*ResourceRepo), Schema: make(map[string]*ResourceSchema), Secret: make(map[string]*ResourceSecret), @@ -151,6 +156,7 @@ func NewResources() *Resources { SqlVisualization: make(map[string]*ResourceSqlVisualization), SqlWidget: make(map[string]*ResourceSqlWidget), StorageCredential: make(map[string]*ResourceStorageCredential), + SystemSchema: make(map[string]*ResourceSystemSchema), Table: make(map[string]*ResourceTable), Token: make(map[string]*ResourceToken), User: make(map[string]*ResourceUser), From e0e9046cf33a26a283e29b818873c0c648cd7ce5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 15:58:53 +0200 Subject: [PATCH 157/310] Bump github.com/hashicorp/hc-install from 0.6.0 to 0.6.1 (#870) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/hashicorp/hc-install](https://github.com/hashicorp/hc-install) from 0.6.0 to 0.6.1.
Release notes

Sourced from github.com/hashicorp/hc-install's releases.

v0.6.1

What's Changed

BUG FIXES:

INTERNALS:

New Contributors

Full Changelog: https://github.com/hashicorp/hc-install/compare/v0.6.0...v0.6.1

Commits
  • 76f347d Prepare for 0.6.1 release
  • eab6333 LatestVersion - use version Constraints passed in (#159)
  • a7e09b6 build(deps): bump golang.org/x/mod from 0.12.0 to 0.13.0 (#158)
  • 77b2d8a build(deps): bump actions/checkout from 4.0.0 to 4.1.0 (#157)
  • 6efd715 Ensure go-version is 1.20.8 (#156)
  • 456c6f9 build(deps): bump github.com/go-git/go-git/v5 from 5.8.1 to 5.9.0 (#155)
  • 8c1e239 build(deps): bump actions/upload-artifact from 3.1.2 to 3.1.3 (#154)
  • c8937cf build(deps): bump actions/checkout from 3.6.0 to 4.0.0 (#153)
  • b3ee57a Add back dev suffix to VERSION
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/hc-install&package-manager=go_modules&previous-version=0.6.0&new-version=0.6.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 18 ++++++++++-------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index ffa44f74..a732b9c1 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.3.1 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 - github.com/hashicorp/hc-install v0.6.0 // MPL 2.0 + github.com/hashicorp/hc-install v0.6.1 // MPL 2.0 github.com/hashicorp/terraform-exec v0.19.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.17.1 // MPL 2.0 github.com/imdario/mergo v0.3.15 // BSD-3-Clause @@ -34,7 +34,7 @@ require ( require ( cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/cloudflare/circl v1.3.3 // indirect diff --git a/go.sum b/go.sum index c9e4cd1f..3e88ad17 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs= -github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= @@ -29,6 +29,8 @@ github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEM github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/databricks/databricks-sdk-go v0.23.0 h1:rdLMA7cDUPJiCSMyuUSufzDDmugqyp79SNiY/vc7kMI= github.com/databricks/databricks-sdk-go v0.23.0/go.mod h1:a6rErRNh5bz+IJbO07nwW70iGyvtWidy1p/S5thepXI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -47,10 +49,10 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= -github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= -github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A= -github.com/go-git/go-git/v5 v5.8.1/go.mod h1:FHFuoD6yGz5OSKEBK+aWN9Oah0q54Jxl0abmj6GnqAo= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= +github.com/go-git/go-git/v5 v5.9.0/go.mod h1:RKIqga24sWdMGZF+1Ekv9kylsDz6LzdTSI2s/OsZWE0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -94,8 +96,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.0 h1:fDHnU7JNFNSQebVKYhHZ0va1bC6SrPQ8fpebsvNr2w4= -github.com/hashicorp/hc-install v0.6.0/go.mod h1:10I912u3nntx9Umo1VAeYPUUuehk0aRQJYpMwbX5wQA= +github.com/hashicorp/hc-install v0.6.1 h1:IGxShH7AVhPaSuSJpKtVi/EFORNjO+OYVJJrAtGG2mY= +github.com/hashicorp/hc-install v0.6.1/go.mod h1:0fW3jpg+wraYSnFDJ6Rlie3RvLf1bIqVIkzoon4KoVE= github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA= From 61cf4fbe8d8cdff1040f467abcd675af1eabfb7a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 16 Oct 2023 17:27:46 +0200 Subject: [PATCH 158/310] Propagate Terraform provider version into generated config (#874) ## Changes The preparations for this change were in place (see #713) but it wasn't actually used. ## Tests n/a --- bundle/internal/tf/codegen/templates/root.go.tmpl | 2 +- bundle/internal/tf/schema/root.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bundle/internal/tf/codegen/templates/root.go.tmpl b/bundle/internal/tf/codegen/templates/root.go.tmpl index 3beb3007..5530427c 100644 --- a/bundle/internal/tf/codegen/templates/root.go.tmpl +++ b/bundle/internal/tf/codegen/templates/root.go.tmpl @@ -24,7 +24,7 @@ func NewRoot() *Root { "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ "source": "databricks/databricks", - "version": "1.23.0", + "version": "{{ .ProviderVersion }}", }, }, }, diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 3beb3007..74f4db1a 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -24,7 +24,7 @@ func NewRoot() *Root { "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ "source": "databricks/databricks", - "version": "1.23.0", + "version": "1.28.0", }, }, }, From 24cc67563e2d9935d2b5c2892a0f82eeef7964c2 Mon Sep 17 00:00:00 2001 From: Arpit Jasapara <87999496+arpitjasa-db@users.noreply.github.com> Date: Mon, 16 Oct 2023 08:32:49 -0700 Subject: [PATCH 159/310] Support Unity Catalog Registered Models in bundles (#846) ## Changes Add UC Registered Models support to Databricks Asset Bundles as new resource `registered_model`. Also added UC Permission support via new resource `grant`. ## Tests Tested via unit tests and manual testing with [example PR](https://github.com/databricks/bundle-examples-internal/pull/80) and [custom Terraform provider](https://github.com/databricks/terraform-provider-databricks/pull/2771). Screenshot 2023-10-08 at 4 57 23 PM Screenshot 2023-10-08 at 4 56 47 PM Screenshot 2023-10-08 at 4 56 57 PM --------- Signed-off-by: Arpit Jasapara Co-authored-by: Andrew Nester Co-authored-by: Pieter Noordhuis --- Makefile | 3 +- bundle/config/mutator/process_target_mode.go | 6 ++ .../mutator/process_target_mode_test.go | 11 +++- bundle/config/resources.go | 17 ++++++ bundle/config/resources/grant.go | 9 +++ .../resources/model_serving_endpoint.go | 4 +- bundle/config/resources/registered_model.go | 34 +++++++++++ bundle/config/resources_test.go | 30 ++++++++++ bundle/deploy/terraform/convert.go | 36 ++++++++++++ bundle/deploy/terraform/convert_test.go | 56 +++++++++++++++++++ bundle/deploy/terraform/interpolate.go | 3 + bundle/schema/openapi.go | 18 ++++++ bundle/tests/registered_model/databricks.yml | 32 +++++++++++ bundle/tests/registered_model_test.go | 47 ++++++++++++++++ 14 files changed, 302 insertions(+), 4 deletions(-) create mode 100644 bundle/config/resources/grant.go create mode 100644 bundle/config/resources/registered_model.go create mode 100644 bundle/tests/registered_model/databricks.yml create mode 100644 bundle/tests/registered_model_test.go diff --git a/Makefile b/Makefile index 6067d45b..3c55b8cf 100644 --- a/Makefile +++ b/Makefile @@ -30,4 +30,5 @@ vendor: @echo "✓ Filling vendor folder with library code ..." @go mod vendor -.PHONY: build vendor coverage test lint fmt \ No newline at end of file +.PHONY: build vendor coverage test lint fmt + diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 2f80fe3b..c11bd1c5 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -87,6 +87,12 @@ func transformDevelopmentMode(b *bundle.Bundle) error { // (model serving doesn't yet support tags) } + for i := range r.RegisteredModels { + prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_" + r.RegisteredModels[i].Name = prefix + r.RegisteredModels[i].Name + // (registered models in Unity Catalog don't yet support tags) + } + return nil } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index a0b2bac8..a9da0b0f 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/tags" sdkconfig "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" @@ -59,6 +60,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle { ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ "servingendpoint1": {CreateServingEndpoint: &serving.CreateServingEndpoint{Name: "servingendpoint1"}}, }, + RegisteredModels: map[string]*resources.RegisteredModel{ + "registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}}, + }, }, }, // Use AWS implementation for testing. @@ -86,6 +90,7 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Experiment 1 assert.Equal(t, "/Users/lennart.kats@databricks.com/[dev lennart] experiment1", bundle.Config.Resources.Experiments["experiment1"].Name) assert.Contains(t, bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags, ml.ExperimentTag{Key: "dev", Value: "lennart"}) + assert.Equal(t, "dev", bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key) // Experiment 2 assert.Equal(t, "[dev lennart] experiment2", bundle.Config.Resources.Experiments["experiment2"].Name) @@ -96,7 +101,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Model serving endpoint 1 assert.Equal(t, "dev_lennart_servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) - assert.Equal(t, "dev", bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key) + + // Registered model 1 + assert.Equal(t, "dev_lennart_registeredmodel1", bundle.Config.Resources.RegisteredModels["registeredmodel1"].Name) } func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { @@ -151,6 +158,7 @@ func TestProcessTargetModeDefault(t *testing.T) { assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name) assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) assert.Equal(t, "servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) + assert.Equal(t, "registeredmodel1", bundle.Config.Resources.RegisteredModels["registeredmodel1"].Name) } func TestProcessTargetModeProduction(t *testing.T) { @@ -187,6 +195,7 @@ func TestProcessTargetModeProduction(t *testing.T) { assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name) assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) assert.Equal(t, "servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) + assert.Equal(t, "registeredmodel1", bundle.Config.Resources.RegisteredModels["registeredmodel1"].Name) } func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { diff --git a/bundle/config/resources.go b/bundle/config/resources.go index ad1d6e9a..2b453c66 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -14,6 +14,7 @@ type Resources struct { Models map[string]*resources.MlflowModel `json:"models,omitempty"` Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"` ModelServingEndpoints map[string]*resources.ModelServingEndpoint `json:"model_serving_endpoints,omitempty"` + RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"` } type UniqueResourceIdTracker struct { @@ -107,6 +108,19 @@ func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker, tracker.Type[k] = "model_serving_endpoint" tracker.ConfigPath[k] = r.ModelServingEndpoints[k].ConfigFilePath } + for k := range r.RegisteredModels { + if _, ok := tracker.Type[k]; ok { + return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)", + k, + tracker.Type[k], + tracker.ConfigPath[k], + "registered_model", + r.RegisteredModels[k].ConfigFilePath, + ) + } + tracker.Type[k] = "registered_model" + tracker.ConfigPath[k] = r.RegisteredModels[k].ConfigFilePath + } return tracker, nil } @@ -129,6 +143,9 @@ func (r *Resources) SetConfigFilePath(path string) { for _, e := range r.ModelServingEndpoints { e.ConfigFilePath = path } + for _, e := range r.RegisteredModels { + e.ConfigFilePath = path + } } // Merge iterates over all resources and merges chunks of the diff --git a/bundle/config/resources/grant.go b/bundle/config/resources/grant.go new file mode 100644 index 00000000..f0ecd876 --- /dev/null +++ b/bundle/config/resources/grant.go @@ -0,0 +1,9 @@ +package resources + +// Grant holds the grant level settings for a single principal in Unity Catalog. +// Multiple of these can be defined on any Unity Catalog resource. +type Grant struct { + Privileges []string `json:"privileges"` + + Principal string `json:"principal"` +} diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index 3847e6a6..88a55ac8 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -15,8 +15,8 @@ type ModelServingEndpoint struct { // as a reference in other resources. This value is returned by terraform. ID string - // Local path where the bundle is defined. All bundle resources include - // this for interpolation purposes. + // Path to config file where the resource is defined. All bundle resources + // include this for interpolation purposes. paths.Paths // This is a resource agnostic implementation of permissions for ACLs. diff --git a/bundle/config/resources/registered_model.go b/bundle/config/resources/registered_model.go new file mode 100644 index 00000000..32a451a2 --- /dev/null +++ b/bundle/config/resources/registered_model.go @@ -0,0 +1,34 @@ +package resources + +import ( + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/databricks-sdk-go/marshal" + "github.com/databricks/databricks-sdk-go/service/catalog" +) + +type RegisteredModel struct { + // This is a resource agnostic implementation of grants. + // Implementation could be different based on the resource type. + Grants []Grant `json:"grants,omitempty"` + + // This represents the id which is the full name of the model + // (catalog_name.schema_name.model_name) that can be used + // as a reference in other resources. This value is returned by terraform. + ID string + + // Path to config file where the resource is defined. All bundle resources + // include this for interpolation purposes. + paths.Paths + + // This represents the input args for terraform, and will get converted + // to a HCL representation for CRUD + *catalog.CreateRegisteredModelRequest +} + +func (s *RegisteredModel) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegisteredModel) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index 82cb9f45..9c4104e4 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -95,3 +95,33 @@ func TestVerifySafeMergeForSameResourceType(t *testing.T) { err := r.VerifySafeMerge(&other) assert.ErrorContains(t, err, "multiple resources named foo (job at foo.yml, job at foo2.yml)") } + +func TestVerifySafeMergeForRegisteredModels(t *testing.T) { + r := Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + Paths: paths.Paths{ + ConfigFilePath: "foo.yml", + }, + }, + }, + RegisteredModels: map[string]*resources.RegisteredModel{ + "bar": { + Paths: paths.Paths{ + ConfigFilePath: "bar.yml", + }, + }, + }, + } + other := Resources{ + RegisteredModels: map[string]*resources.RegisteredModel{ + "bar": { + Paths: paths.Paths{ + ConfigFilePath: "bar2.yml", + }, + }, + }, + } + err := r.VerifySafeMerge(&other) + assert.ErrorContains(t, err, "multiple resources named bar (registered_model at bar.yml, registered_model at bar2.yml)") +} diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 7d95e719..3bfc8b83 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -44,6 +44,22 @@ func convPermission(ac resources.Permission) schema.ResourcePermissionsAccessCon return dst } +func convGrants(acl []resources.Grant) *schema.ResourceGrants { + if len(acl) == 0 { + return nil + } + + resource := schema.ResourceGrants{} + for _, ac := range acl { + resource.Grant = append(resource.Grant, schema.ResourceGrantsGrant{ + Privileges: ac.Privileges, + Principal: ac.Principal, + }) + } + + return &resource +} + // BundleToTerraform converts resources in a bundle configuration // to the equivalent Terraform JSON representation. // @@ -174,6 +190,19 @@ func BundleToTerraform(config *config.Root) *schema.Root { } } + for k, src := range config.Resources.RegisteredModels { + noResources = false + var dst schema.ResourceRegisteredModel + conv(src, &dst) + tfroot.Resource.RegisteredModel[k] = &dst + + // Configure permissions for this resource. + if rp := convGrants(src.Grants); rp != nil { + rp.Function = fmt.Sprintf("${databricks_registered_model.%s.id}", k) + tfroot.Resource.Grants["registered_model_"+k] = rp + } + } + // We explicitly set "resource" to nil to omit it from a JSON encoding. // This is required because the terraform CLI requires >= 1 resources defined // if the "resource" property is used in a .tf.json file. @@ -221,7 +250,14 @@ func TerraformToBundle(state *tfjson.State, config *config.Root) error { cur := config.Resources.ModelServingEndpoints[resource.Name] conv(tmp, &cur) config.Resources.ModelServingEndpoints[resource.Name] = cur + case "databricks_registered_model": + var tmp schema.ResourceRegisteredModel + conv(resource.AttributeValues, &tmp) + cur := config.Resources.RegisteredModels[resource.Name] + conv(tmp, &cur) + config.Resources.RegisteredModels[resource.Name] = cur case "databricks_permissions": + case "databricks_grants": // Ignore; no need to pull these back into the configuration. default: return fmt.Errorf("missing mapping for %s", resource.Type) diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index b6b29f35..bb5a63ec 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" @@ -366,3 +367,58 @@ func TestConvertModelServingPermissions(t *testing.T) { assert.Equal(t, "CAN_VIEW", p.PermissionLevel) } + +func TestConvertRegisteredModel(t *testing.T) { + var src = resources.RegisteredModel{ + CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ + Name: "name", + CatalogName: "catalog", + SchemaName: "schema", + Comment: "comment", + }, + } + + var config = config.Root{ + Resources: config.Resources{ + RegisteredModels: map[string]*resources.RegisteredModel{ + "my_registered_model": &src, + }, + }, + } + + out := BundleToTerraform(&config) + resource := out.Resource.RegisteredModel["my_registered_model"] + assert.Equal(t, "name", resource.Name) + assert.Equal(t, "catalog", resource.CatalogName) + assert.Equal(t, "schema", resource.SchemaName) + assert.Equal(t, "comment", resource.Comment) + assert.Nil(t, out.Data) +} + +func TestConvertRegisteredModelGrants(t *testing.T) { + var src = resources.RegisteredModel{ + Grants: []resources.Grant{ + { + Privileges: []string{"EXECUTE"}, + Principal: "jane@doe.com", + }, + }, + } + + var config = config.Root{ + Resources: config.Resources{ + RegisteredModels: map[string]*resources.RegisteredModel{ + "my_registered_model": &src, + }, + }, + } + + out := BundleToTerraform(&config) + assert.NotEmpty(t, out.Resource.Grants["registered_model_my_registered_model"].Function) + assert.Len(t, out.Resource.Grants["registered_model_my_registered_model"].Grant, 1) + + p := out.Resource.Grants["registered_model_my_registered_model"].Grant[0] + assert.Equal(t, "jane@doe.com", p.Principal) + assert.Equal(t, "EXECUTE", p.Privileges[0]) + +} diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index ea3c99aa..4f00c27e 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -28,6 +28,9 @@ func interpolateTerraformResourceIdentifiers(path string, lookup map[string]stri case "model_serving_endpoints": path = strings.Join(append([]string{"databricks_model_serving"}, parts[2:]...), interpolation.Delimiter) return fmt.Sprintf("${%s}", path), nil + case "registered_models": + path = strings.Join(append([]string{"databricks_registered_model"}, parts[2:]...), interpolation.Delimiter) + return fmt.Sprintf("${%s}", path), nil default: panic("TODO: " + parts[1]) } diff --git a/bundle/schema/openapi.go b/bundle/schema/openapi.go index 1a8b76ed..0b64c43e 100644 --- a/bundle/schema/openapi.go +++ b/bundle/schema/openapi.go @@ -223,6 +223,19 @@ func (reader *OpenapiReader) modelServingEndpointsDocs() (*Docs, error) { return modelServingEndpointsAllDocs, nil } +func (reader *OpenapiReader) registeredModelDocs() (*Docs, error) { + registeredModelsSpecSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "catalog.CreateRegisteredModelRequest") + if err != nil { + return nil, err + } + registeredModelsDocs := schemaToDocs(registeredModelsSpecSchema) + registeredModelsAllDocs := &Docs{ + Description: "List of Registered Models", + AdditionalProperties: registeredModelsDocs, + } + return registeredModelsAllDocs, nil +} + func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) { jobsDocs, err := reader.jobsDocs() if err != nil { @@ -244,6 +257,10 @@ func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) { if err != nil { return nil, err } + registeredModelsDocs, err := reader.registeredModelDocs() + if err != nil { + return nil, err + } return &Docs{ Description: "Collection of Databricks resources to deploy.", @@ -253,6 +270,7 @@ func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) { "experiments": experimentsDocs, "models": modelsDocs, "model_serving_endpoints": modelServingEndpointsDocs, + "registered_models": registeredModelsDocs, }, }, nil } diff --git a/bundle/tests/registered_model/databricks.yml b/bundle/tests/registered_model/databricks.yml new file mode 100644 index 00000000..b7b8ea5d --- /dev/null +++ b/bundle/tests/registered_model/databricks.yml @@ -0,0 +1,32 @@ +resources: + registered_models: + my_registered_model: + name: "my-model" + comment: "comment" + catalog_name: "main" + schema_name: "default" + grants: + - privileges: + - EXECUTE + principal: "account users" + +targets: + development: + mode: development + resources: + registered_models: + my_registered_model: + name: "my-dev-model" + + staging: + resources: + registered_models: + my_registered_model: + name: "my-staging-model" + + production: + mode: production + resources: + registered_models: + my_registered_model: + name: "my-prod-model" diff --git a/bundle/tests/registered_model_test.go b/bundle/tests/registered_model_test.go new file mode 100644 index 00000000..920a2ac7 --- /dev/null +++ b/bundle/tests/registered_model_test.go @@ -0,0 +1,47 @@ +package config_tests + +import ( + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/stretchr/testify/assert" +) + +func assertExpectedModel(t *testing.T, p *resources.RegisteredModel) { + assert.Equal(t, "registered_model/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) + assert.Equal(t, "main", p.CatalogName) + assert.Equal(t, "default", p.SchemaName) + assert.Equal(t, "comment", p.Comment) + assert.Equal(t, "account users", p.Grants[0].Principal) + assert.Equal(t, "EXECUTE", p.Grants[0].Privileges[0]) +} + +func TestRegisteredModelDevelopment(t *testing.T) { + b := loadTarget(t, "./registered_model", "development") + assert.Len(t, b.Config.Resources.RegisteredModels, 1) + assert.Equal(t, b.Config.Bundle.Mode, config.Development) + + p := b.Config.Resources.RegisteredModels["my_registered_model"] + assert.Equal(t, "my-dev-model", p.Name) + assertExpectedModel(t, p) +} + +func TestRegisteredModelStaging(t *testing.T) { + b := loadTarget(t, "./registered_model", "staging") + assert.Len(t, b.Config.Resources.RegisteredModels, 1) + + p := b.Config.Resources.RegisteredModels["my_registered_model"] + assert.Equal(t, "my-staging-model", p.Name) + assertExpectedModel(t, p) +} + +func TestRegisteredModelProduction(t *testing.T) { + b := loadTarget(t, "./registered_model", "production") + assert.Len(t, b.Config.Resources.RegisteredModels, 1) + + p := b.Config.Resources.RegisteredModels["my_registered_model"] + assert.Equal(t, "my-prod-model", p.Name) + assertExpectedModel(t, p) +} From da4397276b27d4e1d85f5ee7a3d7f5dc4cfb0c3f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 16 Oct 2023 18:54:55 +0200 Subject: [PATCH 160/310] Release v0.208.0 (#875) Note: this release includes a fix for the issue where zero values (for example `num_workers: 0`) were not included in the request body. CLI: * Use already instantiated WorkspaceClient in sync command ([#867](https://github.com/databricks/cli/pull/867)). Bundles: * Support Unity Catalog Registered Models in bundles ([#846](https://github.com/databricks/cli/pull/846)). * Fixed merging task libraries from targets ([#868](https://github.com/databricks/cli/pull/868)). * Add alias for mlops-stack template URL ([#869](https://github.com/databricks/cli/pull/869)). API Changes: * Changed `databricks account billable-usage download` command to start returning output. * Changed `databricks account storage-credentials delete` command with new required argument order. * Changed `databricks account storage-credentials get` command with new required argument order. * Changed `databricks account storage-credentials update` command with new required argument order. * Added `databricks workspace-bindings get-bindings` command. * Added `databricks workspace-bindings update-bindings` command. * Removed `databricks account network-policy` command group. * Changed `databricks ip-access-lists list` command to return output. OpenAPI commit 493a76554afd3afdd15dc858773d01643f80352a (2023-10-12) Dependency updates: * Update Go SDK to 0.23.0 and use custom marshaller ([#772](https://github.com/databricks/cli/pull/772)). * Bump Terraform provider to v1.28.0 ([#871](https://github.com/databricks/cli/pull/871)). * Bump golang.org/x/net from 0.16.0 to 0.17.0 ([#863](https://github.com/databricks/cli/pull/863)). * Bump github.com/hashicorp/hc-install from 0.6.0 to 0.6.1 ([#870](https://github.com/databricks/cli/pull/870)). --- CHANGELOG.md | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f1227f6..a52607b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,36 @@ # Version changelog +## 0.208.0 + +Note: this release includes a fix for the issue where zero values (for example +`num_workers: 0`) were not included in the request body. + +CLI: + * Use already instantiated WorkspaceClient in sync command ([#867](https://github.com/databricks/cli/pull/867)). + +Bundles: + * Support Unity Catalog Registered Models in bundles ([#846](https://github.com/databricks/cli/pull/846)). + * Fixed merging task libraries from targets ([#868](https://github.com/databricks/cli/pull/868)). + * Add alias for mlops-stack template URL ([#869](https://github.com/databricks/cli/pull/869)). + +API Changes: + * Changed `databricks account billable-usage download` command to start returning output. + * Changed `databricks account storage-credentials delete` command with new required argument order. + * Changed `databricks account storage-credentials get` command with new required argument order. + * Changed `databricks account storage-credentials update` command with new required argument order. + * Added `databricks workspace-bindings get-bindings` command. + * Added `databricks workspace-bindings update-bindings` command. + * Removed `databricks account network-policy` command group. + * Changed `databricks ip-access-lists list` command to return output. + +OpenAPI commit 493a76554afd3afdd15dc858773d01643f80352a (2023-10-12) + +Dependency updates: + * Update Go SDK to 0.23.0 and use custom marshaller ([#772](https://github.com/databricks/cli/pull/772)). + * Bump Terraform provider to v1.28.0 ([#871](https://github.com/databricks/cli/pull/871)). + * Bump golang.org/x/net from 0.16.0 to 0.17.0 ([#863](https://github.com/databricks/cli/pull/863)). + * Bump github.com/hashicorp/hc-install from 0.6.0 to 0.6.1 ([#870](https://github.com/databricks/cli/pull/870)). + ## 0.207.1 CLI: From 4bf32cb666c6cd4c362d905b4d27d0c00875d5be Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 17 Oct 2023 12:07:55 +0200 Subject: [PATCH 161/310] Fix rendering of streaming response (#876) ## Changes The update to the Go SDK v0.23.0 in #772 included a change to make the billable usage API return its streaming response. This still did not make the command print out the CSV returned by the API, however. To do so, we call `cmdio.RenderReader` in case the response is a byte stream. Note: there is an opportunity to parse the CSV and return JSON if requested, but that is out of scope for this PR (it is a rather big customization of the command). Fixes #574. ## Tests Manually confirmed that `databricks account billable-usage download` now returns CSV. --- .codegen/service.go.tmpl | 11 ++++++++--- cmd/account/billable-usage/billable-usage.go | 3 ++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 4ede142d..1ee19acb 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -268,9 +268,14 @@ func init() { if err != nil { return err } - {{if .Response -}} - return cmdio.Render(ctx, response) - {{- else -}} + {{ if .Response -}} + {{- if .IsResponseByteStream -}} + defer response.{{.ResponseBodyField.PascalName}}.Close() + return cmdio.RenderReader(ctx, response.{{.ResponseBodyField.PascalName}}) + {{- else -}} + return cmdio.Render(ctx, response) + {{- end -}} + {{ else -}} return nil {{- end -}} {{- end -}} diff --git a/cmd/account/billable-usage/billable-usage.go b/cmd/account/billable-usage/billable-usage.go index 1dde38f6..5e9b33f8 100755 --- a/cmd/account/billable-usage/billable-usage.go +++ b/cmd/account/billable-usage/billable-usage.go @@ -85,7 +85,8 @@ func newDownload() *cobra.Command { if err != nil { return err } - return cmdio.Render(ctx, response) + defer response.Contents.Close() + return cmdio.RenderReader(ctx, response.Contents) } // Disable completions since they are not applicable. From 1b992c0c1c0b60ce82b6e3ac129756bde9ec5045 Mon Sep 17 00:00:00 2001 From: Arpit Jasapara <87999496+arpitjasa-db@users.noreply.github.com> Date: Wed, 18 Oct 2023 01:53:01 -0700 Subject: [PATCH 162/310] Rename MLOps Stack to MLOps Stacks (#881) ## Changes Rename `mlops-stack` `bundle init` redirect to `mlops-stacks`. ## Tests N/A --- cmd/bundle/init.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 3625b7a9..eec05687 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -19,7 +19,8 @@ var gitUrlPrefixes = []string{ } var aliasedTemplates = map[string]string{ - "mlops-stack": "https://github.com/databricks/mlops-stack", + "mlops-stack": "https://github.com/databricks/mlops-stacks", + "mlops-stacks": "https://github.com/databricks/mlops-stacks", } func isRepoUrl(url string) bool { From 5273d0c51a1cd473557ef40d42e39ad10136f235 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 18 Oct 2023 12:20:43 +0200 Subject: [PATCH 163/310] Support Python wheels larger than 10MB (#879) ## Changes Previously we only supported uploading Python wheels smaller than 10mb due to using Workspace.Import API and `content ` field https://docs.databricks.com/api/workspace/workspace/import By switching to use `WorkspaceFilesClient` we overcome the limit because it uses POST body for the API instead. ## Tests `TestAccUploadArtifactFileToCorrectRemotePath` integration test passes ``` === RUN TestAccUploadArtifactFileToCorrectRemotePath artifacts_test.go:28: gcp 2023/10/17 15:24:04 INFO Using Google Credentials sdk=true helpers.go:356: Creating /Users/.../integration-test-wsfs-ekggbkcfdkid artifacts.Upload(test.whl): Uploading... 2023/10/17 15:24:06 INFO Using Google Credentials mutator=artifacts.Upload(test) sdk=true artifacts.Upload(test.whl): Upload succeeded helpers.go:362: Removing /Users/.../integration-test-wsfs-ekggbkcfdkid --- PASS: TestAccUploadArtifactFileToCorrectRemotePath (5.66s) PASS coverage: 14.9% of statements in ./... ok github.com/databricks/cli/internal 6.109s coverage: 14.9% of statements in ./... ``` --- bundle/artifacts/artifacts.go | 47 +++++------ bundle/artifacts/artifacts_test.go | 123 ----------------------------- internal/bundle/artifacts_test.go | 69 ++++++++++++++++ 3 files changed, 93 insertions(+), 146 deletions(-) delete mode 100644 bundle/artifacts/artifacts_test.go create mode 100644 internal/bundle/artifacts_test.go diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index 0331adb7..e55ae4e8 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -1,9 +1,9 @@ package artifacts import ( + "bytes" "context" "crypto/sha256" - "encoding/base64" "errors" "fmt" "os" @@ -14,7 +14,7 @@ import ( "github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/databricks/cli/libs/filer" ) type mutatorFactory = func(name string) bundle.Mutator @@ -83,7 +83,7 @@ func BasicUpload(name string) bundle.Mutator { } func (m *basicUpload) Name() string { - return fmt.Sprintf("artifacts.Build(%s)", m.name) + return fmt.Sprintf("artifacts.Upload(%s)", m.name) } func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error { @@ -96,7 +96,17 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error { return fmt.Errorf("artifact source is not configured: %s", m.name) } - err := uploadArtifact(ctx, artifact, b) + uploadPath, err := getUploadBasePath(b) + if err != nil { + return err + } + + client, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath) + if err != nil { + return err + } + + err = uploadArtifact(ctx, artifact, uploadPath, client) if err != nil { return fmt.Errorf("artifacts.Upload(%s): %w", m.name, err) } @@ -104,13 +114,14 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error { return nil } -func uploadArtifact(ctx context.Context, a *config.Artifact, b *bundle.Bundle) error { +func uploadArtifact(ctx context.Context, a *config.Artifact, uploadPath string, client filer.Filer) error { for i := range a.Files { f := &a.Files[i] if f.NeedsUpload() { filename := filepath.Base(f.Source) cmdio.LogString(ctx, fmt.Sprintf("artifacts.Upload(%s): Uploading...", filename)) - remotePath, err := uploadArtifactFile(ctx, f.Source, b) + + remotePath, err := uploadArtifactFile(ctx, f.Source, uploadPath, client) if err != nil { return err } @@ -125,32 +136,22 @@ func uploadArtifact(ctx context.Context, a *config.Artifact, b *bundle.Bundle) e } // Function to upload artifact file to Workspace -func uploadArtifactFile(ctx context.Context, file string, b *bundle.Bundle) (string, error) { +func uploadArtifactFile(ctx context.Context, file string, uploadPath string, client filer.Filer) (string, error) { raw, err := os.ReadFile(file) if err != nil { return "", fmt.Errorf("unable to read %s: %w", file, errors.Unwrap(err)) } - uploadPath, err := getUploadBasePath(b) - if err != nil { - return "", err - } - fileHash := sha256.Sum256(raw) - remotePath := path.Join(uploadPath, fmt.Sprintf("%x", fileHash), filepath.Base(file)) - // Make sure target directory exists. - err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, path.Dir(remotePath)) + relPath := path.Join(fmt.Sprintf("%x", fileHash), filepath.Base(file)) + remotePath := path.Join(uploadPath, relPath) + + err = client.Mkdir(ctx, path.Dir(relPath)) if err != nil { - return "", fmt.Errorf("unable to create directory for %s: %w", remotePath, err) + return "", fmt.Errorf("unable to import %s: %w", remotePath, err) } - // Import to workspace. - err = b.WorkspaceClient().Workspace.Import(ctx, workspace.Import{ - Path: remotePath, - Overwrite: true, - Format: workspace.ImportFormatAuto, - Content: base64.StdEncoding.EncodeToString(raw), - }) + err = client.Write(ctx, relPath, bytes.NewReader(raw), filer.OverwriteIfExists, filer.CreateParentDirectories) if err != nil { return "", fmt.Errorf("unable to import %s: %w", remotePath, err) } diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go deleted file mode 100644 index bbae44ef..00000000 --- a/bundle/artifacts/artifacts_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package artifacts - -import ( - "context" - "os" - "path/filepath" - "regexp" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/workspace" - "github.com/stretchr/testify/require" -) - -func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) - require.NoError(t, err) - f, err := os.Create(path) - require.NoError(t, err) - f.Close() -} - -type MockWorkspaceService struct { -} - -// Delete implements workspace.WorkspaceService. -func (MockWorkspaceService) Delete(ctx context.Context, request workspace.Delete) error { - panic("unimplemented") -} - -// Export implements workspace.WorkspaceService. -func (MockWorkspaceService) Export(ctx context.Context, request workspace.ExportRequest) (*workspace.ExportResponse, error) { - panic("unimplemented") -} - -// GetStatus implements workspace.WorkspaceService. -func (MockWorkspaceService) GetStatus(ctx context.Context, request workspace.GetStatusRequest) (*workspace.ObjectInfo, error) { - panic("unimplemented") -} - -// Import implements workspace.WorkspaceService. -func (MockWorkspaceService) Import(ctx context.Context, request workspace.Import) error { - return nil -} - -// List implements workspace.WorkspaceService. -func (MockWorkspaceService) List(ctx context.Context, request workspace.ListWorkspaceRequest) (*workspace.ListResponse, error) { - panic("unimplemented") -} - -// Mkdirs implements workspace.WorkspaceService. -func (MockWorkspaceService) Mkdirs(ctx context.Context, request workspace.Mkdirs) error { - return nil -} - -// GetPermissionLevels implements workspace.WorkspaceService. -func (MockWorkspaceService) GetPermissionLevels( - ctx context.Context, - request workspace.GetWorkspaceObjectPermissionLevelsRequest, -) (*workspace.GetWorkspaceObjectPermissionLevelsResponse, error) { - panic("unimplemented") -} - -// GetPermissions implements workspace.WorkspaceService. -func (MockWorkspaceService) GetPermissions( - ctx context.Context, - request workspace.GetWorkspaceObjectPermissionsRequest, -) (*workspace.WorkspaceObjectPermissions, error) { - panic("unimplemented") -} - -// SetPermissions implements workspace.WorkspaceService. -func (MockWorkspaceService) SetPermissions( - ctx context.Context, - request workspace.WorkspaceObjectPermissionsRequest, -) (*workspace.WorkspaceObjectPermissions, error) { - panic("unimplemented") -} - -// UpdatePermissions implements workspace.WorkspaceService. -func (MockWorkspaceService) UpdatePermissions( - ctx context.Context, - request workspace.WorkspaceObjectPermissionsRequest, -) (*workspace.WorkspaceObjectPermissions, error) { - panic("unimplemented") -} - -func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) { - dir := t.TempDir() - whlPath := filepath.Join(dir, "dist", "test.whl") - touchEmptyFile(t, whlPath) - b := &bundle.Bundle{ - Config: config.Root{ - Path: dir, - Bundle: config.Bundle{ - Target: "whatever", - }, - Workspace: config.Workspace{ - ArtifactsPath: "/Users/test@databricks.com/whatever", - }, - }, - } - - b.WorkspaceClient().Workspace.WithImpl(MockWorkspaceService{}) - artifact := &config.Artifact{ - Type: "whl", - Files: []config.ArtifactFile{ - { - Source: whlPath, - Libraries: []*compute.Library{ - {Whl: "dist\\test.whl"}, - }, - }, - }, - } - - err := uploadArtifact(context.Background(), artifact, b) - require.NoError(t, err) - require.Regexp(t, regexp.MustCompile("/Users/test@databricks.com/whatever/.internal/[a-z0-9]+/test.whl"), artifact.Files[0].RemotePath) - require.Regexp(t, regexp.MustCompile("/Workspace/Users/test@databricks.com/whatever/.internal/[a-z0-9]+/test.whl"), artifact.Files[0].Libraries[0].Whl) -} diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go new file mode 100644 index 00000000..1e77cae9 --- /dev/null +++ b/internal/bundle/artifacts_test.go @@ -0,0 +1,69 @@ +package bundle + +import ( + "context" + "os" + "path" + "path/filepath" + "regexp" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/artifacts" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/require" +) + +func touchEmptyFile(t *testing.T, path string) { + err := os.MkdirAll(filepath.Dir(path), 0700) + require.NoError(t, err) + f, err := os.Create(path) + require.NoError(t, err) + f.Close() +} + +func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { + t.Log(internal.GetEnvOrSkipTest(t, "CLOUD_ENV")) + + dir := t.TempDir() + whlPath := filepath.Join(dir, "dist", "test.whl") + touchEmptyFile(t, whlPath) + + artifact := &config.Artifact{ + Type: "whl", + Files: []config.ArtifactFile{ + { + Source: whlPath, + Libraries: []*compute.Library{ + {Whl: "dist\\test.whl"}, + }, + }, + }, + } + + w := databricks.Must(databricks.NewWorkspaceClient()) + wsDir := internal.TemporaryWorkspaceDir(t, w) + + b := &bundle.Bundle{ + Config: config.Root{ + Path: dir, + Bundle: config.Bundle{ + Target: "whatever", + }, + Workspace: config.Workspace{ + ArtifactsPath: wsDir, + }, + Artifacts: config.Artifacts{ + "test": artifact, + }, + }, + } + + err := bundle.Apply(context.Background(), b, artifacts.BasicUpload("test")) + require.NoError(t, err) + require.Regexp(t, regexp.MustCompile(path.Join(wsDir, ".internal/[a-z0-9]+/test.whl")), artifact.Files[0].RemotePath) + require.Regexp(t, regexp.MustCompile(path.Join("/Workspace", wsDir, ".internal/[a-z0-9]+/test.whl")), artifact.Files[0].Libraries[0].Whl) +} From a2ee8bb45bf672b2b5f52005fc7f093a0ac929a6 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Thu, 19 Oct 2023 09:08:36 +0200 Subject: [PATCH 164/310] Improve the output of the `databricks bundle init` command (#795) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Improve the output of help, prompts, and so on for `databricks bundle init` and the default template. Among other things, this PR adds support for a new `welcome_message` property that lets a template print a custom message on success: ``` $ databricks bundle init Template to use [default-python]: Unique name for this project [my_project]: lennart_project Include a stub (sample) notebook in 'lennart_project/src': yes Include a stub (sample) Delta Live Tables pipeline in 'lennart_project/src': yes Include a stub (sample) Python package in 'lennart_project/src': yes ✨ Your new project has been created in the 'lennart_project' directory! Please refer to the README.md of your project for further instructions on getting started. Or read the documentation on Databricks Asset Bundles at https://docs.databricks.com/dev-tools/bundles/index.html. ``` --------- Co-authored-by: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> --- cmd/bundle/init.go | 16 +++++- libs/jsonschema/extension.go | 3 ++ libs/jsonschema/instance.go | 7 ++- libs/template/config.go | 52 +++++++++++++------ libs/template/config_test.go | 24 ++++++++- libs/template/helpers.go | 8 ++- libs/template/materialize.go | 26 +++++++--- libs/template/renderer.go | 2 +- libs/template/renderer_test.go | 2 +- .../databricks_template_schema.json | 13 ++--- .../config-test-schema/test-schema.json | 2 +- .../templated-defaults/library/my_funcs.tmpl | 7 +++ .../{{template `file_name`}} | 0 13 files changed, 121 insertions(+), 41 deletions(-) create mode 100644 libs/template/testdata/templated-defaults/library/my_funcs.tmpl create mode 100644 libs/template/testdata/templated-defaults/template/{{template `dir_name`}}/{{template `file_name`}} diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index eec05687..603878be 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -45,20 +45,29 @@ func repoName(url string) string { func newInitCommand() *cobra.Command { cmd := &cobra.Command{ Use: "init [TEMPLATE_PATH]", - Short: "Initialize Template", + Short: "Initialize using a bundle template", Args: cobra.MaximumNArgs(1), + Long: `Initialize using a bundle template. + +TEMPLATE_PATH optionally specifies which template to use. It can be one of the following: +- 'default-python' for the default Python template +- a local file system path with a template directory +- a Git repository URL, e.g. https://github.com/my/repository + +See https://docs.databricks.com//dev-tools/bundles/templates.html for more information on templates.`, } var configFile string var outputDir string var templateDir string cmd.Flags().StringVar(&configFile, "config-file", "", "File containing input parameters for template initialization.") - cmd.Flags().StringVar(&templateDir, "template-dir", "", "Directory within repository that holds the template specification.") + cmd.Flags().StringVar(&templateDir, "template-dir", "", "Directory path within a Git repository containing the template.") cmd.Flags().StringVar(&outputDir, "output-dir", "", "Directory to write the initialized template to.") cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() + var templatePath string if len(args) > 0 { templatePath = args[0] @@ -79,6 +88,9 @@ func newInitCommand() *cobra.Command { } if !isRepoUrl(templatePath) { + if templateDir != "" { + return errors.New("--template-dir can only be used with a Git repository URL") + } // skip downloading the repo because input arg is not a URL. We assume // it's a path on the local file system in that case return template.Materialize(ctx, configFile, templatePath, outputDir) diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go index 57f3e873..572c248a 100644 --- a/libs/jsonschema/extension.go +++ b/libs/jsonschema/extension.go @@ -12,6 +12,9 @@ type Extension struct { // that do have an order defined. Order *int `json:"order,omitempty"` + // The message to print after the template is successfully initalized + SuccessMessage string `json:"success_message,omitempty"` + // PatternMatchFailureMessage is a user defined message that is displayed to the // user if a JSON schema pattern match fails. PatternMatchFailureMessage string `json:"pattern_match_failure_message,omitempty"` diff --git a/libs/jsonschema/instance.go b/libs/jsonschema/instance.go index 6b3e3af4..d08ed519 100644 --- a/libs/jsonschema/instance.go +++ b/libs/jsonschema/instance.go @@ -39,14 +39,17 @@ func (s *Schema) LoadInstance(path string) (map[string]any, error) { return instance, s.ValidateInstance(instance) } +// Validate an instance against the schema func (s *Schema) ValidateInstance(instance map[string]any) error { - for _, fn := range []func(map[string]any) error{ + validations := []func(map[string]any) error{ s.validateAdditionalProperties, s.validateEnum, s.validateRequired, s.validateTypes, s.validatePattern, - } { + } + + for _, fn := range validations { err := fn(instance) if err != nil { return err diff --git a/libs/template/config.go b/libs/template/config.go index 2062f320..8ace307b 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -65,7 +65,7 @@ func (c *config) assignValuesFromFile(path string) error { } // Assigns default values from schema to input config map -func (c *config) assignDefaultValues() error { +func (c *config) assignDefaultValues(r *renderer) error { for name, property := range c.schema.Properties { // Config already has a value assigned if _, ok := c.values[name]; ok { @@ -75,13 +75,25 @@ func (c *config) assignDefaultValues() error { if property.Default == nil { continue } - c.values[name] = property.Default + defaultVal, err := jsonschema.ToString(property.Default, property.Type) + if err != nil { + return err + } + defaultVal, err = r.executeTemplate(defaultVal) + if err != nil { + return err + } + defaultValTyped, err := jsonschema.FromString(defaultVal, property.Type) + if err != nil { + return err + } + c.values[name] = defaultValTyped } return nil } // Prompts user for values for properties that do not have a value set yet -func (c *config) promptForValues() error { +func (c *config) promptForValues(r *renderer) error { for _, p := range c.schema.OrderedProperties() { name := p.Name property := p.Schema @@ -95,10 +107,19 @@ func (c *config) promptForValues() error { var defaultVal string var err error if property.Default != nil { - defaultVal, err = jsonschema.ToString(property.Default, property.Type) + defaultValRaw, err := jsonschema.ToString(property.Default, property.Type) if err != nil { return err } + defaultVal, err = r.executeTemplate(defaultValRaw) + if err != nil { + return err + } + } + + description, err := r.executeTemplate(property.Description) + if err != nil { + return err } // Get user input by running the prompt @@ -109,21 +130,15 @@ func (c *config) promptForValues() error { if err != nil { return err } - userInput, err = cmdio.AskSelect(c.ctx, property.Description, enums) + userInput, err = cmdio.AskSelect(c.ctx, description, enums) if err != nil { return err } } else { - userInput, err = cmdio.Ask(c.ctx, property.Description, defaultVal) + userInput, err = cmdio.Ask(c.ctx, description, defaultVal) if err != nil { return err } - - } - - // Validate the property matches any specified regex pattern. - if err := jsonschema.ValidatePatternMatch(name, userInput, property); err != nil { - return err } // Convert user input string back to a value @@ -131,23 +146,28 @@ func (c *config) promptForValues() error { if err != nil { return err } + + // Validate the partial config based on this update + if err := c.schema.ValidateInstance(c.values); err != nil { + return err + } } return nil } // Prompt user for any missing config values. Assign default values if // terminal is not TTY -func (c *config) promptOrAssignDefaultValues() error { +func (c *config) promptOrAssignDefaultValues(r *renderer) error { if cmdio.IsOutTTY(c.ctx) && cmdio.IsInTTY(c.ctx) { - return c.promptForValues() + return c.promptForValues(r) } - return c.assignDefaultValues() + return c.assignDefaultValues(r) } // Validates the configuration. If passes, the configuration is ready to be used // to initialize the template. func (c *config) validate() error { - // All properties in the JSON schema should have a value defined. + // For final validation, all properties in the JSON schema should have a value defined. c.schema.Required = maps.Keys(c.schema.Properties) if err := c.schema.ValidateInstance(c.values); err != nil { return fmt.Errorf("validation for template input parameters failed. %w", err) diff --git a/libs/template/config_test.go b/libs/template/config_test.go index 1b1fc338..9a0a9931 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/jsonschema" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -52,11 +53,17 @@ func TestTemplateConfigAssignValuesFromFileDoesNotOverwriteExistingConfigs(t *te func TestTemplateConfigAssignDefaultValues(t *testing.T) { c := testConfig(t) - err := c.assignDefaultValues() + ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/template-in-path/template", "./testdata/template-in-path/library", t.TempDir()) + require.NoError(t, err) + + err = c.assignDefaultValues(r) assert.NoError(t, err) assert.Len(t, c.values, 2) - assert.Equal(t, "abc", c.values["string_val"]) + assert.Equal(t, "my_file", c.values["string_val"]) assert.Equal(t, int64(123), c.values["int_val"]) } @@ -169,3 +176,16 @@ func TestTemplateEnumValidation(t *testing.T) { } assert.NoError(t, c.validate()) } + +func TestAssignDefaultValuesWithTemplatedDefaults(t *testing.T) { + c := testConfig(t) + ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/templated-defaults/template", "./testdata/templated-defaults/library", t.TempDir()) + require.NoError(t, err) + + err = c.assignDefaultValues(r) + assert.NoError(t, err) + assert.Equal(t, "my_file", c.values["string_val"]) +} diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 31752270..7f306a3a 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/url" + "os" "regexp" "text/template" @@ -65,7 +66,7 @@ func loadHelpers(ctx context.Context) template.FuncMap { // Get smallest node type (follows Terraform's GetSmallestNodeType) "smallest_node_type": func() (string, error) { if w.Config.Host == "" { - return "", errors.New("cannot determine target workspace, please first setup a configuration profile using 'databricks auth login'") + return "", errors.New("cannot determine target workspace, please first setup a configuration profile using 'databricks configure'") } if w.Config.IsAzure() { return "Standard_D3_v2", nil @@ -74,9 +75,12 @@ func loadHelpers(ctx context.Context) template.FuncMap { } return "i3.xlarge", nil }, + "path_separator": func() string { + return string(os.PathSeparator) + }, "workspace_host": func() (string, error) { if w.Config.Host == "" { - return "", errors.New("cannot determine target workspace, please first setup a configuration profile using 'databricks auth login'") + return "", errors.New("cannot determine target workspace, please first setup a configuration profile using 'databricks configure'") } return w.Config.Host, nil }, diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 8517858f..ec62e41f 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -56,23 +56,23 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st } } - // Prompt user for any missing config values. Assign default values if - // terminal is not TTY - err = config.promptOrAssignDefaultValues() + r, err := newRenderer(ctx, config.values, helpers, templatePath, libraryPath, outputDir) if err != nil { return err } + // Prompt user for any missing config values. Assign default values if + // terminal is not TTY + err = config.promptOrAssignDefaultValues(r) + if err != nil { + return err + } err = config.validate() if err != nil { return err } // Walk and render the template, since input configuration is complete - r, err := newRenderer(ctx, config.values, helpers, templatePath, libraryPath, outputDir) - if err != nil { - return err - } err = r.walk() if err != nil { return err @@ -82,7 +82,17 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st if err != nil { return err } - cmdio.LogString(ctx, "✨ Successfully initialized template") + + success := config.schema.SuccessMessage + if success == "" { + cmdio.LogString(ctx, "✨ Successfully initialized template") + } else { + success, err = r.executeTemplate(success) + if err != nil { + return err + } + cmdio.LogString(ctx, success) + } return nil } diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 09ccc3f5..6415cd84 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -311,7 +311,7 @@ func (r *renderer) persistToDisk() error { path := file.DstPath().absPath() _, err := os.Stat(path) if err == nil { - return fmt.Errorf("failed to persist to disk, conflict with existing file: %s", path) + return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path) } if err != nil && !os.IsNotExist(err) { return fmt.Errorf("error while verifying file %s does not already exist: %w", path, err) diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 254b06cf..d513eac8 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -516,7 +516,7 @@ func TestRendererErrorOnConflictingFile(t *testing.T) { }, } err = r.persistToDisk() - assert.EqualError(t, err, fmt.Sprintf("failed to persist to disk, conflict with existing file: %s", filepath.Join(tmpDir, "a"))) + assert.EqualError(t, err, fmt.Sprintf("failed to initialize template, one or more files already exist: %s", filepath.Join(tmpDir, "a"))) } func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { diff --git a/libs/template/templates/default-python/databricks_template_schema.json b/libs/template/templates/default-python/databricks_template_schema.json index 59697a61..8b26ee70 100644 --- a/libs/template/templates/default-python/databricks_template_schema.json +++ b/libs/template/templates/default-python/databricks_template_schema.json @@ -5,29 +5,30 @@ "default": "my_project", "description": "Unique name for this project", "order": 1, - "pattern": "^[A-Za-z0-9_]*$", - "pattern_match_failure_message": "Must consist of letter and underscores only." + "pattern": "^[A-Za-z0-9_]+$", + "pattern_match_failure_message": "Name must consist of letters, numbers, and underscores." }, "include_notebook": { "type": "string", "default": "yes", "enum": ["yes", "no"], - "description": "Include a stub (sample) notebook in 'my_project/src'", + "description": "Include a stub (sample) notebook in '{{.project_name}}{{path_separator}}src'", "order": 2 }, "include_dlt": { "type": "string", "default": "yes", "enum": ["yes", "no"], - "description": "Include a stub (sample) DLT pipeline in 'my_project/src'", + "description": "Include a stub (sample) Delta Live Tables pipeline in '{{.project_name}}{{path_separator}}src'", "order": 3 }, "include_python": { "type": "string", "default": "yes", "enum": ["yes", "no"], - "description": "Include a stub (sample) Python package in '{{.project_name}}/src'", + "description": "Include a stub (sample) Python package in '{{.project_name}}{{path_separator}}src'", "order": 4 } - } + }, + "success_message": "\n✨ Your new project has been created in the '{{.project_name}}' directory!\n\nPlease refer to the README.md of your project for further instructions on getting started.\nOr read the documentation on Databricks Asset Bundles at https://docs.databricks.com/dev-tools/bundles/index.html." } diff --git a/libs/template/testdata/config-test-schema/test-schema.json b/libs/template/testdata/config-test-schema/test-schema.json index 41eb8251..6daf4959 100644 --- a/libs/template/testdata/config-test-schema/test-schema.json +++ b/libs/template/testdata/config-test-schema/test-schema.json @@ -12,7 +12,7 @@ }, "string_val": { "type": "string", - "default": "abc" + "default": "{{template \"file_name\"}}" } } } diff --git a/libs/template/testdata/templated-defaults/library/my_funcs.tmpl b/libs/template/testdata/templated-defaults/library/my_funcs.tmpl new file mode 100644 index 00000000..3415ad77 --- /dev/null +++ b/libs/template/testdata/templated-defaults/library/my_funcs.tmpl @@ -0,0 +1,7 @@ +{{define "dir_name" -}} +my_directory +{{- end}} + +{{define "file_name" -}} +my_file +{{- end}} diff --git a/libs/template/testdata/templated-defaults/template/{{template `dir_name`}}/{{template `file_name`}} b/libs/template/testdata/templated-defaults/template/{{template `dir_name`}}/{{template `file_name`}} new file mode 100644 index 00000000..e69de29b From ec9425445abf5ed229f1645354c3e98201f7f693 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 19 Oct 2023 13:48:09 +0200 Subject: [PATCH 165/310] Release v0.208.1 (#887) CLI: * Fix rendering of streaming response ([#876](https://github.com/databricks/cli/pull/876)). Bundles: * Rename MLOps Stack to MLOps Stacks ([#881](https://github.com/databricks/cli/pull/881)). * Support Python wheels larger than 10MB ([#879](https://github.com/databricks/cli/pull/879)). * Improve the output of the `databricks bundle init` command ([#795](https://github.com/databricks/cli/pull/795)). --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a52607b0..917301d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Version changelog +## 0.208.1 + +CLI: + * Fix rendering of streaming response ([#876](https://github.com/databricks/cli/pull/876)). + +Bundles: + * Rename MLOps Stack to MLOps Stacks ([#881](https://github.com/databricks/cli/pull/881)). + * Support Python wheels larger than 10MB ([#879](https://github.com/databricks/cli/pull/879)). + * Improve the output of the `databricks bundle init` command ([#795](https://github.com/databricks/cli/pull/795)). + + + ## 0.208.0 Note: this release includes a fix for the issue where zero values (for example From 996d6273c76853ab72a2e054672be8885649edaa Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 19 Oct 2023 14:06:46 +0200 Subject: [PATCH 166/310] Escape workspace path string in regexp in artifacts integration test (#886) ## Changes Escape workspace path string in regexp in artifacts integration test ## Tests ``` Environment: aws-prod === RUN TestAccUploadArtifactFileToCorrectRemotePath artifacts_test.go:29: aws helpers.go:356: Creating /Users/serge.smertin+deco@databricks.com/integration-test-wsfs-leakafecllkc artifacts.Upload(test.whl): Uploading... artifacts.Upload(test.whl): Upload succeeded helpers.go:362: Removing /Users/serge.smertin+deco@databricks.com/integration-test-wsfs-leakafecllkc --- PASS: TestAccUploadArtifactFileToCorrectRemotePath (2.12s) PASS coverage: 0.0% of statements in ./... ok github.com/databricks/cli/internal/bundle 2.788s coverage: 0.0% of statements in ./... ``` --- internal/bundle/artifacts_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 1e77cae9..9d1a171a 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -64,6 +64,6 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { err := bundle.Apply(context.Background(), b, artifacts.BasicUpload("test")) require.NoError(t, err) - require.Regexp(t, regexp.MustCompile(path.Join(wsDir, ".internal/[a-z0-9]+/test.whl")), artifact.Files[0].RemotePath) - require.Regexp(t, regexp.MustCompile(path.Join("/Workspace", wsDir, ".internal/[a-z0-9]+/test.whl")), artifact.Files[0].Libraries[0].Whl) + require.Regexp(t, regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/[a-z0-9]+/test\.whl`)), artifact.Files[0].RemotePath) + require.Regexp(t, regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/[a-z0-9]+/test\.whl`)), artifact.Files[0].Libraries[0].Whl) } From 5a53b118a7d2c509e7470f0fdcb96666f740b845 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 19 Oct 2023 14:34:20 +0200 Subject: [PATCH 167/310] Skip prompt on completion hook (#888) ## Changes The first stab at this was added in #837 but only included the `NoPrompt` check in `MustAccountClient`. I renamed it to `SkipPrompt` (in preparation for another option that skips bundle load) and made it work for `MustWorkspaceClient` as well. ## Tests Manually confirmed that the completion hook no longer prompts for a profile (when called directly with `databricks __complete`). --- cmd/root/auth.go | 20 ++------------------ cmd/root/auth_options.go | 26 ++++++++++++++++++++++++++ cmd/root/auth_options_test.go | 16 ++++++++++++++++ cmd/sync/sync.go | 3 +-- 4 files changed, 45 insertions(+), 20 deletions(-) create mode 100644 cmd/root/auth_options.go create mode 100644 cmd/root/auth_options_test.go diff --git a/cmd/root/auth.go b/cmd/root/auth.go index ed91090e..1e051aef 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -92,8 +92,7 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { } } - noPrompt, ok := cmd.Context().Value(noPromptKey).(bool) - allowPrompt := !hasProfileFlag && (!ok || !noPrompt) + allowPrompt := !hasProfileFlag && !shouldSkipPrompt(cmd.Context()) a, err := accountClientOrPrompt(cmd.Context(), cfg, allowPrompt) if err != nil { return err @@ -103,21 +102,6 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { return nil } -type noPrompt int - -var noPromptKey noPrompt - -// NoPrompt allows to skip prompt for profile configuration in MustWorkspaceClient. -// -// When calling MustWorkspaceClient we want to be able to customise if to show prompt or not. -// Since we can't change function interface, in the code we only have an access to `cmd“ object. -// Command struct does not have any state flag which indicates that it's being called in completion mode and -// thus the Context object seems to be the only viable option for us to configure prompt behaviour based on -// the context it's executed from. -func NoPrompt(ctx context.Context) context.Context { - return context.WithValue(ctx, noPromptKey, true) -} - // Helper function to create a workspace client or prompt once if the given configuration is not valid. func workspaceClientOrPrompt(ctx context.Context, cfg *config.Config, allowPrompt bool) (*databricks.WorkspaceClient, error) { w, err := databricks.NewWorkspaceClient((*databricks.Config)(cfg)) @@ -174,7 +158,7 @@ func MustWorkspaceClient(cmd *cobra.Command, args []string) error { cfg = currentBundle.WorkspaceClient().Config } - allowPrompt := !hasProfileFlag + allowPrompt := !hasProfileFlag && !shouldSkipPrompt(cmd.Context()) w, err := workspaceClientOrPrompt(cmd.Context(), cfg, allowPrompt) if err != nil { return err diff --git a/cmd/root/auth_options.go b/cmd/root/auth_options.go new file mode 100644 index 00000000..3961f1aa --- /dev/null +++ b/cmd/root/auth_options.go @@ -0,0 +1,26 @@ +package root + +import ( + "context" +) + +type skipPrompt int + +var skipPromptKey skipPrompt + +// SkipPrompt allows to skip prompt for profile configuration in MustWorkspaceClient. +// +// When calling MustWorkspaceClient we want to be able to customise if to show prompt or not. +// Since we can't change function interface, in the code we only have an access to `cmd` object. +// Command struct does not have any state flag which indicates that it's being called in completion mode and +// thus the Context object seems to be the only viable option for us to configure prompt behaviour based on +// the context it's executed from. +func SkipPrompt(ctx context.Context) context.Context { + return context.WithValue(ctx, skipPromptKey, true) +} + +// shouldSkipPrompt returns whether or not [SkipPrompt] has been set on the specified context. +func shouldSkipPrompt(ctx context.Context) bool { + skipPrompt, ok := ctx.Value(skipPromptKey).(bool) + return ok && skipPrompt +} diff --git a/cmd/root/auth_options_test.go b/cmd/root/auth_options_test.go new file mode 100644 index 00000000..61c68acc --- /dev/null +++ b/cmd/root/auth_options_test.go @@ -0,0 +1,16 @@ +package root + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSkipPrompt(t *testing.T) { + ctx := context.Background() + assert.False(t, shouldSkipPrompt(ctx)) + + ctx = SkipPrompt(ctx) + assert.True(t, shouldSkipPrompt(ctx)) +} diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 5416b573..dffa5345 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -149,8 +149,7 @@ func New() *cobra.Command { } cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - ctx := cmd.Context() - cmd.SetContext(root.NoPrompt(ctx)) + cmd.SetContext(root.SkipPrompt(cmd.Context())) err := root.MustWorkspaceClient(cmd, args) if err != nil { From 7139487c2f500c2a1c1ba1c8b9d4195c6633eb9d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 19 Oct 2023 14:50:46 +0200 Subject: [PATCH 168/310] Never load authentication configuration from bundle for sync command (#889) ## Changes This is used for the sync command, where we need to ensure that a bundle configuration never taints the authentication setup as prepared in the environment (by our VS Code extension). Once the VS Code extension fully builds on bundles, we can remove this check again. ## Tests Manually confirmed that calling `databricks sync` from a bundle directory no longer picks up its authentication configuration. --- cmd/root/auth.go | 19 +++++++++---------- cmd/root/auth_options.go | 19 +++++++++++++++++++ cmd/root/auth_options_test.go | 8 ++++++++ cmd/sync/sync.go | 10 ++++++++-- 4 files changed, 44 insertions(+), 12 deletions(-) diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 1e051aef..81c71479 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -146,16 +146,15 @@ func MustWorkspaceClient(cmd *cobra.Command, args []string) error { cfg.Profile = profile } - // try configuring a bundle - err := TryConfigureBundle(cmd, args) - if err != nil { - return err - } - - // and load the config from there - currentBundle := bundle.GetOrNil(cmd.Context()) - if currentBundle != nil { - cfg = currentBundle.WorkspaceClient().Config + // Try to load a bundle configuration if we're allowed to by the caller (see `./auth_options.go`). + if !shouldSkipLoadBundle(cmd.Context()) { + err := TryConfigureBundle(cmd, args) + if err != nil { + return err + } + if b := bundle.GetOrNil(cmd.Context()); b != nil { + cfg = b.WorkspaceClient().Config + } } allowPrompt := !hasProfileFlag && !shouldSkipPrompt(cmd.Context()) diff --git a/cmd/root/auth_options.go b/cmd/root/auth_options.go index 3961f1aa..701900d4 100644 --- a/cmd/root/auth_options.go +++ b/cmd/root/auth_options.go @@ -24,3 +24,22 @@ func shouldSkipPrompt(ctx context.Context) bool { skipPrompt, ok := ctx.Value(skipPromptKey).(bool) return ok && skipPrompt } + +type skipLoadBundle int + +var skipLoadBundleKey skipLoadBundle + +// SkipLoadBundle instructs [MustWorkspaceClient] to never try and load a bundle for configuration options. +// +// This is used for the sync command, where we need to ensure that a bundle configuration never taints +// the authentication setup as prepared in the environment (by our VS Code extension). +// Once the VS Code extension fully builds on bundles, we can remove this check again. +func SkipLoadBundle(ctx context.Context) context.Context { + return context.WithValue(ctx, skipLoadBundleKey, true) +} + +// shouldSkipLoadBundle returns whether or not [SkipLoadBundle] has been set on the specified context. +func shouldSkipLoadBundle(ctx context.Context) bool { + skipLoadBundle, ok := ctx.Value(skipLoadBundleKey).(bool) + return ok && skipLoadBundle +} diff --git a/cmd/root/auth_options_test.go b/cmd/root/auth_options_test.go index 61c68acc..477c2029 100644 --- a/cmd/root/auth_options_test.go +++ b/cmd/root/auth_options_test.go @@ -14,3 +14,11 @@ func TestSkipPrompt(t *testing.T) { ctx = SkipPrompt(ctx) assert.True(t, shouldSkipPrompt(ctx)) } + +func TestSkipLoadBundle(t *testing.T) { + ctx := context.Background() + assert.False(t, shouldSkipLoadBundle(ctx)) + + ctx = SkipLoadBundle(ctx) + assert.True(t, shouldSkipLoadBundle(ctx)) +} diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index dffa5345..2870e1e0 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -89,7 +89,13 @@ func New() *cobra.Command { cmd.Flags().BoolVar(&f.watch, "watch", false, "watch local file system for changes") cmd.Flags().Var(&f.output, "output", "type of output format") - cmd.PreRunE = root.MustWorkspaceClient + // Wrapper for [root.MustWorkspaceClient] that disables loading authentication configuration from a bundle. + mustWorkspaceClient := func(cmd *cobra.Command, args []string) error { + cmd.SetContext(root.SkipLoadBundle(cmd.Context())) + return root.MustWorkspaceClient(cmd, args) + } + + cmd.PreRunE = mustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) error { var opts *sync.SyncOptions var err error @@ -151,7 +157,7 @@ func New() *cobra.Command { cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { cmd.SetContext(root.SkipPrompt(cmd.Context())) - err := root.MustWorkspaceClient(cmd, args) + err := mustWorkspaceClient(cmd, args) if err != nil { return nil, cobra.ShellCompDirectiveError } From 3700785dfa6f43d05c73511de639ea78135a8330 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 19 Oct 2023 16:01:48 +0200 Subject: [PATCH 169/310] Add support for validating CLI version when loading a jsonschema object (#883) ## Changes Updates to bundle templates can require updated versions of the CLI. This PR extends the JSON schema representation to allow template authors to set a min CLI version they require for their templates. This is required to make improvements/additions to the mlops-stacks repo ## Tests Tested using unit tests and manually. For manualy testing, I created a custom build of the CLI using go releaser and then tested it against a local instance of mlops-stack When mlops-stack schema has: ``` "min_databricks_cli_version": "v5000.1.1", ``` output (error as expected) ``` shreyas.goenka@THW32HFW6T bricks % ./dist/cli_darwin_arm64/databricks bundle init ~/mlops-stack Error: minimum CLI version "v5000.1.1" is greater than current CLI version "v0.207.2-dev+1b992c0". Please upgrade your current Databricks CLI ``` When the mlops-stack schema has: ``` "min_databricks_cli_version": "v0.1.1", ``` output (validation passes) ``` shreyas.goenka@THW32HFW6T bricks % ./dist/cli_darwin_arm64/databricks bundle init ~/mlops-stack Welcome to MLOps Stack. For detailed information on project generation, see the README at https://github.com/databricks/mlops-stack/blob/main/README.md. Project Name [my-mlops-project]: ^C ``` --- internal/build/info.go | 4 +- libs/jsonschema/extension.go | 5 +++ libs/jsonschema/schema.go | 68 +++++++++++++++++++++++++++++++--- libs/jsonschema/schema_test.go | 39 +++++++++++++++++++ 4 files changed, 109 insertions(+), 7 deletions(-) diff --git a/internal/build/info.go b/internal/build/info.go index 41b2600e..ca24ea7f 100644 --- a/internal/build/info.go +++ b/internal/build/info.go @@ -33,6 +33,8 @@ var info Info var once sync.Once +var DefaultSemver = "0.0.0-dev" + // getDefaultBuildVersion uses build information stored by Go itself // to synthesize a build version if one wasn't set. // This is necessary if the binary was not built through goreleaser. @@ -47,7 +49,7 @@ func getDefaultBuildVersion() string { m[s.Key] = s.Value } - out := "0.0.0-dev" + out := DefaultSemver // Append revision as build metadata. if v, ok := m["vcs.revision"]; ok { diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go index 572c248a..8503ab2d 100644 --- a/libs/jsonschema/extension.go +++ b/libs/jsonschema/extension.go @@ -18,4 +18,9 @@ type Extension struct { // PatternMatchFailureMessage is a user defined message that is displayed to the // user if a JSON schema pattern match fails. PatternMatchFailureMessage string `json:"pattern_match_failure_message,omitempty"` + + // Set the minimum semver version of this CLI to validate when loading this schema. + // If the CLI version is less than this value, then validation for this + // schema will fail. + MinDatabricksCliVersion string `json:"min_databricks_cli_version,omitempty"` } diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index dc319bfe..f1a89e7b 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -6,6 +6,9 @@ import ( "os" "regexp" "slices" + + "github.com/databricks/cli/internal/build" + "golang.org/x/mod/semver" ) // defines schema for a json object @@ -67,8 +70,8 @@ const ( IntegerType Type = "integer" ) -func (schema *Schema) validate() error { - // Validate property types are all valid JSON schema types. +// Validate property types are all valid JSON schema types. +func (schema *Schema) validateSchemaPropertyTypes() error { for _, v := range schema.Properties { switch v.Type { case NumberType, BooleanType, StringType, IntegerType: @@ -83,8 +86,11 @@ func (schema *Schema) validate() error { return fmt.Errorf("type %s is not a recognized json schema type", v.Type) } } + return nil +} - // Validate default property values are consistent with types. +// Validate default property values are consistent with types. +func (schema *Schema) validateSchemaDefaultValueTypes() error { for name, property := range schema.Properties { if property.Default == nil { continue @@ -93,8 +99,11 @@ func (schema *Schema) validate() error { return fmt.Errorf("type validation for default value of property %s failed: %w", name, err) } } + return nil +} - // Validate enum field values for properties are consistent with types. +// Validate enum field values for properties are consistent with types. +func (schema *Schema) validateSchemaEnumValueTypes() error { for name, property := range schema.Properties { if property.Enum == nil { continue @@ -106,8 +115,11 @@ func (schema *Schema) validate() error { } } } + return nil +} - // Validate default value is contained in the list of enums if both are defined. +// Validate default value is contained in the list of enums if both are defined. +func (schema *Schema) validateSchemaDefaultValueIsInEnums() error { for name, property := range schema.Properties { if property.Default == nil || property.Enum == nil { continue @@ -118,8 +130,11 @@ func (schema *Schema) validate() error { return fmt.Errorf("list of enum values for property %s does not contain default value %v: %v", name, property.Default, property.Enum) } } + return nil +} - // Validate usage of "pattern" is consistent. +// Validate usage of "pattern" is consistent. +func (schema *Schema) validateSchemaPattern() error { for name, property := range schema.Properties { pattern := property.Pattern if pattern == "" { @@ -153,6 +168,47 @@ func (schema *Schema) validate() error { return nil } +func (schema *Schema) validateSchemaMinimumCliVersion(currentVersion string) func() error { + return func() error { + if schema.MinDatabricksCliVersion == "" { + return nil + } + + // Ignore this validation rule for local builds. + if semver.Compare("v"+build.DefaultSemver, currentVersion) == 0 { + return nil + } + + // Confirm that MinDatabricksCliVersion is a valid semver. + if !semver.IsValid(schema.MinDatabricksCliVersion) { + return fmt.Errorf("invalid minimum CLI version %q specified. Please specify the version in the format v0.0.0", schema.MinDatabricksCliVersion) + } + + // Confirm that MinDatabricksCliVersion is less than or equal to the current version. + if semver.Compare(schema.MinDatabricksCliVersion, currentVersion) > 0 { + return fmt.Errorf("minimum CLI version %q is greater than current CLI version %q. Please upgrade your current Databricks CLI", schema.MinDatabricksCliVersion, currentVersion) + } + return nil + } +} + +func (schema *Schema) validate() error { + for _, fn := range []func() error{ + schema.validateSchemaPropertyTypes, + schema.validateSchemaDefaultValueTypes, + schema.validateSchemaEnumValueTypes, + schema.validateSchemaDefaultValueIsInEnums, + schema.validateSchemaPattern, + schema.validateSchemaMinimumCliVersion("v" + build.GetInfo().Version), + } { + err := fn() + if err != nil { + return err + } + } + return nil +} + func Load(path string) (*Schema, error) { b, err := os.ReadFile(path) if err != nil { diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go index aff2d962..8826a32b 100644 --- a/libs/jsonschema/schema_test.go +++ b/libs/jsonschema/schema_test.go @@ -222,3 +222,42 @@ func TestSchemaValidatePatternEnum(t *testing.T) { } assert.NoError(t, s.validate()) } + +func TestValidateSchemaMinimumCliVersionWithInvalidSemver(t *testing.T) { + s := &Schema{ + Extension: Extension{ + MinDatabricksCliVersion: "1.0.5", + }, + } + err := s.validateSchemaMinimumCliVersion("v2.0.1")() + assert.ErrorContains(t, err, "invalid minimum CLI version \"1.0.5\" specified. Please specify the version in the format v0.0.0") + + s.MinDatabricksCliVersion = "v1.0.5" + err = s.validateSchemaMinimumCliVersion("v2.0.1")() + assert.NoError(t, err) +} + +func TestValidateSchemaMinimumCliVersion(t *testing.T) { + s := &Schema{ + Extension: Extension{ + MinDatabricksCliVersion: "v1.0.5", + }, + } + err := s.validateSchemaMinimumCliVersion("v2.0.1")() + assert.NoError(t, err) + + err = s.validateSchemaMinimumCliVersion("v1.0.5")() + assert.NoError(t, err) + + err = s.validateSchemaMinimumCliVersion("v1.0.6")() + assert.NoError(t, err) + + err = s.validateSchemaMinimumCliVersion("v1.0.4")() + assert.ErrorContains(t, err, `minimum CLI version "v1.0.5" is greater than current CLI version "v1.0.4". Please upgrade your current Databricks CLI`) + + err = s.validateSchemaMinimumCliVersion("v0.0.1")() + assert.ErrorContains(t, err, "minimum CLI version \"v1.0.5\" is greater than current CLI version \"v0.0.1\". Please upgrade your current Databricks CLI") + + err = s.validateSchemaMinimumCliVersion("v0.0.0-dev")() + assert.NoError(t, err) +} From 4ad68eb314fe9bd27dabbfc850fea4af1a6ef0de Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 19 Oct 2023 16:19:17 +0200 Subject: [PATCH 170/310] Fixed requiring positional arguments for API URL parameters (#878) ## Changes Some commands such as update commands have an argument in their url, for example in pipeline we have `PUT pipelines/` to update the pipeline. Such parameters must be required and respected even if `--json` flag with the payload passed. Note: this depends on these PRs in Go SDK: https://github.com/databricks/databricks-sdk-go/pull/660 https://github.com/databricks/databricks-sdk-go/pull/661 ## Tests Manually running `databricks pipelines update` --- .codegen/service.go.tmpl | 9 ++--- cmd/account/groups/groups.go | 37 +++++++++---------- .../service-principals/service-principals.go | 37 +++++++++---------- cmd/account/users/users.go | 37 +++++++++---------- cmd/workspace/catalogs/catalogs.go | 6 +-- .../external-locations/external-locations.go | 6 +-- cmd/workspace/groups/groups.go | 37 +++++++++---------- cmd/workspace/pipelines/pipelines.go | 37 +++++++++---------- cmd/workspace/providers/providers.go | 37 +++++++------------ cmd/workspace/recipients/recipients.go | 37 +++++++++---------- .../registered-models/registered-models.go | 22 +++-------- .../service-principals/service-principals.go | 37 +++++++++---------- cmd/workspace/shares/shares.go | 6 +-- .../storage-credentials.go | 37 +++++++++---------- cmd/workspace/users/users.go | 37 +++++++++---------- 15 files changed, 188 insertions(+), 231 deletions(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 1ee19acb..b4b6b4d4 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -78,8 +78,7 @@ var {{.CamelName}}Overrides []func( func new{{.PascalName}}() *cobra.Command { cmd := &cobra.Command{} - {{- $useJsonForAllFields := or .IsJsonOnly (and .Request (or (not .Request.IsAllRequiredFieldsPrimitive) .Request.HasRequiredNonBodyField)) -}} - {{- $needJsonFlag := or $useJsonForAllFields (and .Request (not .Request.IsOnlyPrimitiveFields)) -}} + {{- $needJsonFlag := or .CanSetRequiredFieldsFromJson (and .Request (not .Request.IsOnlyPrimitiveFields)) -}} {{- if .Request}} @@ -143,7 +142,7 @@ func new{{.PascalName}}() *cobra.Command { {{if $hasRequiredArgs }} cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs({{len .Request.RequiredFields}}) - {{- if $useJsonForAllFields }} + {{- if .CanSetRequiredFieldsFromJson }} if cmd.Flags().Changed("json") { check = cobra.ExactArgs(0) } @@ -162,7 +161,7 @@ func new{{.PascalName}}() *cobra.Command { if err != nil { return err } - }{{end}}{{if $useJsonForAllFields }} else { + }{{end}}{{if .CanSetRequiredFieldsFromJson }} else { {{- end }} {{- if $hasIdPrompt}} if len(args) == 0 { @@ -196,7 +195,7 @@ func new{{.PascalName}}() *cobra.Command { {{- else -}} return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") {{- end -}} - {{if $useJsonForAllFields }} + {{if .CanSetRequiredFieldsFromJson }} } {{end }} {{end}} diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index 6e3b98c0..2ecaa3a7 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -478,26 +478,25 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Account Groups drop-down." - names, err := a.Groups.GroupDisplayNameToIdMap(ctx, iam.ListAccountGroupsRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Account Groups drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks group ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks group id") - } - updateReq.Id = args[0] } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Account Groups drop-down." + names, err := a.Groups.GroupDisplayNameToIdMap(ctx, iam.ListAccountGroupsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Account Groups drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Databricks group ID") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have databricks group id") + } + updateReq.Id = args[0] err = a.Groups.Update(ctx, updateReq) if err != nil { diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index f5823c69..be210b35 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -480,26 +480,25 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Account Service Principals drop-down." - names, err := a.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListAccountServicePrincipalsRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Account Service Principals drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks service principal ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks service principal id") - } - updateReq.Id = args[0] } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Account Service Principals drop-down." + names, err := a.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListAccountServicePrincipalsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Account Service Principals drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Databricks service principal ID") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have databricks service principal id") + } + updateReq.Id = args[0] err = a.ServicePrincipals.Update(ctx, updateReq) if err != nil { diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 375dd5b5..c826ab0d 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -488,26 +488,25 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Account Users drop-down." - names, err := a.Users.UserUserNameToIdMap(ctx, iam.ListAccountUsersRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Account Users drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks user ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks user id") - } - updateReq.Id = args[0] } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Account Users drop-down." + names, err := a.Users.UserUserNameToIdMap(ctx, iam.ListAccountUsersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Account Users drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Databricks user ID") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have databricks user id") + } + updateReq.Id = args[0] err = a.Users.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 5896c5cb..462b6450 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -337,9 +337,6 @@ func newUpdate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(1) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -353,9 +350,8 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - updateReq.Name = args[0] } + updateReq.Name = args[0] response, err := w.Catalogs.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 7f67b26b..fd1b44e4 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -346,9 +346,6 @@ func newUpdate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(1) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -362,9 +359,8 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - updateReq.Name = args[0] } + updateReq.Name = args[0] response, err := w.ExternalLocations.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index 3f46abbc..55d231fc 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -478,26 +478,25 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Groups drop-down." - names, err := w.Groups.GroupDisplayNameToIdMap(ctx, iam.ListGroupsRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Groups drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks group ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks group id") - } - updateReq.Id = args[0] } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Groups drop-down." + names, err := w.Groups.GroupDisplayNameToIdMap(ctx, iam.ListGroupsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Groups drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Databricks group ID") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have databricks group id") + } + updateReq.Id = args[0] err = w.Groups.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index d24606cd..8c1cf4f4 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -1130,26 +1130,25 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down." - names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Unique identifier for this pipeline") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have unique identifier for this pipeline") - } - updateReq.PipelineId = args[0] } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down." + names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Unique identifier for this pipeline") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have unique identifier for this pipeline") + } + updateReq.PipelineId = args[0] err = w.Pipelines.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index db2a9844..d57451cb 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -432,10 +432,8 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq sharing.UpdateProvider - var updateJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Description about the provider.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the Provider.`) @@ -458,31 +456,24 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = updateJson.Unmarshal(&updateReq) + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No NAME argument specified. Loading names for Providers drop-down." + names, err := w.Providers.ProviderInfoNameToMetastoreIdMap(ctx, sharing.ListProvidersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Providers drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The name of the Provider") if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NAME argument specified. Loading names for Providers drop-down." - names, err := w.Providers.ProviderInfoNameToMetastoreIdMap(ctx, sharing.ListProvidersRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Providers drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "The name of the Provider") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have the name of the provider") - } - updateReq.Name = args[0] + args = append(args, id) } + if len(args) != 1 { + return fmt.Errorf("expected to have the name of the provider") + } + updateReq.Name = args[0] response, err := w.Providers.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index ceed5784..736daab0 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -547,26 +547,25 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down." - names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Name of Recipient") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have name of recipient") - } - updateReq.Name = args[0] } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No NAME argument specified. Loading names for Recipients drop-down." + names, err := w.Recipients.RecipientInfoNameToMetastoreIdMap(ctx, sharing.ListRecipientsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Name of Recipient") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have name of recipient") + } + updateReq.Name = args[0] err = w.Recipients.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index cc782253..d914e1b6 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -477,10 +477,8 @@ func newSetAlias() *cobra.Command { cmd := &cobra.Command{} var setAliasReq catalog.SetRegisteredModelAliasRequest - var setAliasJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&setAliasJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Use = "set-alias FULL_NAME ALIAS VERSION_NUM" cmd.Short = `Set a Registered Model Alias.` @@ -497,9 +495,6 @@ func newSetAlias() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(3) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -508,18 +503,11 @@ func newSetAlias() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = setAliasJson.Unmarshal(&setAliasReq) - if err != nil { - return err - } - } else { - setAliasReq.FullName = args[0] - setAliasReq.Alias = args[1] - _, err = fmt.Sscan(args[2], &setAliasReq.VersionNum) - if err != nil { - return fmt.Errorf("invalid VERSION_NUM: %s", args[2]) - } + setAliasReq.FullName = args[0] + setAliasReq.Alias = args[1] + _, err = fmt.Sscan(args[2], &setAliasReq.VersionNum) + if err != nil { + return fmt.Errorf("invalid VERSION_NUM: %s", args[2]) } response, err := w.RegisteredModels.SetAlias(ctx, setAliasReq) diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index 1958dd21..baeec349 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -480,26 +480,25 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Service Principals drop-down." - names, err := w.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListServicePrincipalsRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Service Principals drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks service principal ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks service principal id") - } - updateReq.Id = args[0] } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Service Principals drop-down." + names, err := w.ServicePrincipals.ServicePrincipalDisplayNameToIdMap(ctx, iam.ListServicePrincipalsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Service Principals drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Databricks service principal ID") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have databricks service principal id") + } + updateReq.Id = args[0] err = w.ServicePrincipals.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index cf96b8b3..b542196d 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -398,9 +398,6 @@ func newUpdate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(1) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -414,9 +411,8 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - updateReq.Name = args[0] } + updateReq.Name = args[0] response, err := w.Shares.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 9754d0ff..fb0ae475 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -378,26 +378,25 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down." - names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "The credential name") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have the credential name") - } - updateReq.Name = args[0] } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down." + names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The credential name") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the credential name") + } + updateReq.Name = args[0] response, err := w.StorageCredentials.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index b1a8b057..6fe4b4f6 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -660,26 +660,25 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Users drop-down." - names, err := w.Users.UserUserNameToIdMap(ctx, iam.ListUsersRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Users drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks user ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks user id") - } - updateReq.Id = args[0] } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Users drop-down." + names, err := w.Users.UserUserNameToIdMap(ctx, iam.ListUsersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Users drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Databricks user ID") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have databricks user id") + } + updateReq.Id = args[0] err = w.Users.Update(ctx, updateReq) if err != nil { From 5712845329b9b1603e471008cf839753dc96f1e5 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 19 Oct 2023 20:56:54 +0200 Subject: [PATCH 171/310] Make default dev semver a const (#891) ## Changes ## Tests --- internal/build/info.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/build/info.go b/internal/build/info.go index ca24ea7f..8ddf4d4f 100644 --- a/internal/build/info.go +++ b/internal/build/info.go @@ -33,7 +33,7 @@ var info Info var once sync.Once -var DefaultSemver = "0.0.0-dev" +const DefaultSemver = "0.0.0-dev" // getDefaultBuildVersion uses build information stored by Go itself // to synthesize a build version if one wasn't set. From 7b1d972b3338f5792362235ae46d77acfd5b8184 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 20 Oct 2023 14:32:04 +0200 Subject: [PATCH 172/310] Do not emit wheel wrapper error when python_wheel_wrapper setting is true (#894) ## Changes Do not emit wheel wrapper error when python_wheel_wrapper setting is true Fixes #892 ## Tests Added an regression test --- bundle/python/warning.go | 8 ++++++ bundle/python/warning_test.go | 46 +++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/bundle/python/warning.go b/bundle/python/warning.go index 443b8fd2..01b639ef 100644 --- a/bundle/python/warning.go +++ b/bundle/python/warning.go @@ -20,12 +20,20 @@ func WrapperWarning() bundle.Mutator { } func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) error { + if isPythonWheelWrapperOn(b) { + return nil + } + if hasIncompatibleWheelTasks(ctx, b) { return fmt.Errorf("python wheel tasks with local libraries require compute with DBR 13.1+. Please change your cluster configuration or set experimental 'python_wheel_wrapper' setting to 'true'") } return nil } +func isPythonWheelWrapperOn(b *bundle.Bundle) bool { + return b.Config.Experimental != nil && b.Config.Experimental.PythonWheelWrapper +} + func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool { tasks := libraries.FindAllWheelTasksWithLocalLibraries(b) for _, task := range tasks { diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go index 83bc142f..f822f113 100644 --- a/bundle/python/warning_test.go +++ b/bundle/python/warning_test.go @@ -209,6 +209,9 @@ func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) { } require.True(t, hasIncompatibleWheelTasks(context.Background(), b)) + + err := bundle.Apply(context.Background(), b, WrapperWarning()) + require.ErrorContains(t, err, "python wheel tasks with local libraries require compute with DBR 13.1+.") } func TestIncompatibleWheelTasksWithExistingClusterId(t *testing.T) { @@ -337,6 +340,49 @@ func TestNoIncompatibleWheelTasks(t *testing.T) { require.False(t, hasIncompatibleWheelTasks(context.Background(), b)) } +func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Experimental: &config.Experimental{ + PythonWheelWrapper: true, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{}, + NewCluster: &compute.ClusterSpec{ + SparkVersion: "12.2.x-scala2.12", + }, + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + { + TaskKey: "key2", + PythonWheelTask: &jobs.PythonWheelTask{}, + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.1.x-scala2.12", + }, + Libraries: []compute.Library{ + {Whl: "./dist/test.whl"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, WrapperWarning()) + require.NoError(t, err) +} + func TestSparkVersionLowerThanExpected(t *testing.T) { testCases := map[string]bool{ "13.1.x-scala2.12": false, From ab05f8e6e7ec2c7302dfc57744abea8db07b5f2f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 20 Oct 2023 14:56:59 +0200 Subject: [PATCH 173/310] New YAML loader to support configuration location (#828) ## Changes In order to support variable interpolation on fields that aren't a string in the resource types, we need a separate representation of the bundle configuration tree with the type equivalent of Go's `any`. But instead of using `any` directly, we can do better and use a custom type equivalent to `any` that captures additional metadata. In this PR, the additional metadata is limited to the origin of the configuration value (file, line number, and column). The YAML in this commit uses the upstream YAML parser's `yaml.Node` type to get access to location information. It reimplements the loader that takes the `yaml.Node` structure and turns it into the configuration tree we need. Next steps after this PR: * Implement configuration tree type checking (against a Go type) * Implement configuration tree merging (to replace the current merge functionality) * Implement conversion to and from the bundle configuration struct * Perform variable interpolation against this configuration tree (to support variable interpolation for ints) * (later) Implement a `jsonloader` that produces the same tree and includes location information ## Tests The tests in `yamlloader` perform an equality check on the untyped output of loading a YAML file between the upstream YAML loader and this loader. The YAML examples were generated by prompting ChatGPT for examples that showcase anchors, primitive values, edge cases, etc. --- go.mod | 3 +- libs/config/location.go | 13 + libs/config/location_test.go | 13 + libs/config/value.go | 110 +++++++++ libs/config/value_test.go | 37 +++ libs/config/yamlloader/loader.go | 227 ++++++++++++++++++ libs/config/yamlloader/testdata/anchor_01.yml | 12 + libs/config/yamlloader/testdata/anchor_02.yml | 13 + libs/config/yamlloader/testdata/anchor_03.yml | 10 + libs/config/yamlloader/testdata/anchor_04.yml | 16 ++ libs/config/yamlloader/testdata/anchor_05.yml | 15 ++ libs/config/yamlloader/testdata/anchor_06.yml | 5 + libs/config/yamlloader/testdata/anchor_07.yml | 10 + libs/config/yamlloader/testdata/anchor_08.yml | 10 + libs/config/yamlloader/testdata/error_01.yml | 6 + libs/config/yamlloader/testdata/error_02.yml | 6 + libs/config/yamlloader/testdata/error_03.yml | 4 + libs/config/yamlloader/testdata/mix_01.yml | 90 +++++++ libs/config/yamlloader/testdata/mix_02.yml | 49 ++++ libs/config/yamlloader/yaml.go | 19 ++ libs/config/yamlloader/yaml_anchor_test.go | 117 +++++++++ libs/config/yamlloader/yaml_error_test.go | 36 +++ libs/config/yamlloader/yaml_mix_test.go | 26 ++ libs/config/yamlloader/yaml_test.go | 30 +++ 24 files changed, 876 insertions(+), 1 deletion(-) create mode 100644 libs/config/location.go create mode 100644 libs/config/location_test.go create mode 100644 libs/config/value.go create mode 100644 libs/config/value_test.go create mode 100644 libs/config/yamlloader/loader.go create mode 100644 libs/config/yamlloader/testdata/anchor_01.yml create mode 100644 libs/config/yamlloader/testdata/anchor_02.yml create mode 100644 libs/config/yamlloader/testdata/anchor_03.yml create mode 100644 libs/config/yamlloader/testdata/anchor_04.yml create mode 100644 libs/config/yamlloader/testdata/anchor_05.yml create mode 100644 libs/config/yamlloader/testdata/anchor_06.yml create mode 100644 libs/config/yamlloader/testdata/anchor_07.yml create mode 100644 libs/config/yamlloader/testdata/anchor_08.yml create mode 100644 libs/config/yamlloader/testdata/error_01.yml create mode 100644 libs/config/yamlloader/testdata/error_02.yml create mode 100644 libs/config/yamlloader/testdata/error_03.yml create mode 100644 libs/config/yamlloader/testdata/mix_01.yml create mode 100644 libs/config/yamlloader/testdata/mix_02.yml create mode 100644 libs/config/yamlloader/yaml.go create mode 100644 libs/config/yamlloader/yaml_anchor_test.go create mode 100644 libs/config/yamlloader/yaml_error_test.go create mode 100644 libs/config/yamlloader/yaml_mix_test.go create mode 100644 libs/config/yamlloader/yaml_test.go diff --git a/go.mod b/go.mod index a732b9c1..c94adbb9 100644 --- a/go.mod +++ b/go.mod @@ -31,6 +31,8 @@ require ( gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) +require gopkg.in/yaml.v3 v3.0.1 + require ( cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect @@ -60,5 +62,4 @@ require ( google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/libs/config/location.go b/libs/config/location.go new file mode 100644 index 00000000..534b21c2 --- /dev/null +++ b/libs/config/location.go @@ -0,0 +1,13 @@ +package config + +import "fmt" + +type Location struct { + File string + Line int + Column int +} + +func (l Location) String() string { + return fmt.Sprintf("%s:%d:%d", l.File, l.Line, l.Column) +} diff --git a/libs/config/location_test.go b/libs/config/location_test.go new file mode 100644 index 00000000..31013193 --- /dev/null +++ b/libs/config/location_test.go @@ -0,0 +1,13 @@ +package config_test + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" +) + +func TestLocation(t *testing.T) { + loc := config.Location{File: "file", Line: 1, Column: 2} + assert.Equal(t, "file:1:2", loc.String()) +} diff --git a/libs/config/value.go b/libs/config/value.go new file mode 100644 index 00000000..994aec38 --- /dev/null +++ b/libs/config/value.go @@ -0,0 +1,110 @@ +package config + +import "time" + +type Value struct { + v any + l Location + + // Whether or not this value is an anchor. + // If this node doesn't map to a type, we don't need to warn about it. + anchor bool +} + +// NilValue is equal to the zero-value of Value. +var NilValue = Value{} + +// NewValue constructs a new Value with the given value and location. +func NewValue(v any, loc Location) Value { + return Value{ + v: v, + l: loc, + } +} + +func (v Value) AsMap() (map[string]Value, bool) { + m, ok := v.v.(map[string]Value) + return m, ok +} + +func (v Value) Location() Location { + return v.l +} + +func (v Value) AsAny() any { + switch vv := v.v.(type) { + case map[string]Value: + m := make(map[string]any) + for k, v := range vv { + m[k] = v.AsAny() + } + return m + case []Value: + a := make([]any, len(vv)) + for i, v := range vv { + a[i] = v.AsAny() + } + return a + case string: + return vv + case bool: + return vv + case int: + return vv + case int32: + return vv + case int64: + return vv + case float32: + return vv + case float64: + return vv + case time.Time: + return vv + case nil: + return nil + default: + // Panic because we only want to deal with known types. + panic("not handled") + } +} + +func (v Value) Get(key string) Value { + m, ok := v.AsMap() + if !ok { + return NilValue + } + + vv, ok := m[key] + if !ok { + return NilValue + } + + return vv +} + +func (v Value) Index(i int) Value { + s, ok := v.v.([]Value) + if !ok { + return NilValue + } + + if i < 0 || i >= len(s) { + return NilValue + } + + return s[i] +} + +func (v Value) MarkAnchor() Value { + return Value{ + v: v.v, + l: v.l, + + anchor: true, + } +} + +func (v Value) IsAnchor() bool { + return v.anchor +} diff --git a/libs/config/value_test.go b/libs/config/value_test.go new file mode 100644 index 00000000..cb8ef16a --- /dev/null +++ b/libs/config/value_test.go @@ -0,0 +1,37 @@ +package config_test + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" +) + +func TestValueIsAnchor(t *testing.T) { + var zero config.Value + assert.False(t, zero.IsAnchor()) + mark := zero.MarkAnchor() + assert.True(t, mark.IsAnchor()) +} + +func TestValueAsMap(t *testing.T) { + var zeroValue config.Value + m, ok := zeroValue.AsMap() + assert.False(t, ok) + assert.Nil(t, m) + + var intValue = config.NewValue(1, config.Location{}) + m, ok = intValue.AsMap() + assert.False(t, ok) + assert.Nil(t, m) + + var mapValue = config.NewValue( + map[string]config.Value{ + "key": config.NewValue("value", config.Location{File: "file", Line: 1, Column: 2}), + }, + config.Location{File: "file", Line: 1, Column: 2}, + ) + m, ok = mapValue.AsMap() + assert.True(t, ok) + assert.Len(t, m, 1) +} diff --git a/libs/config/yamlloader/loader.go b/libs/config/yamlloader/loader.go new file mode 100644 index 00000000..6472c137 --- /dev/null +++ b/libs/config/yamlloader/loader.go @@ -0,0 +1,227 @@ +package yamlloader + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + "github.com/databricks/cli/libs/config" + "gopkg.in/yaml.v3" +) + +type loader struct { + path string +} + +func errorf(loc config.Location, format string, args ...interface{}) error { + return fmt.Errorf("yaml (%s): %s", loc, fmt.Sprintf(format, args...)) +} + +func newLoader(path string) *loader { + return &loader{ + path: path, + } +} + +func (d *loader) location(node *yaml.Node) config.Location { + return config.Location{ + File: d.path, + Line: node.Line, + Column: node.Column, + } +} + +func (d *loader) load(node *yaml.Node) (config.Value, error) { + loc := config.Location{ + File: d.path, + Line: node.Line, + Column: node.Column, + } + + var value config.Value + var err error + + switch node.Kind { + case yaml.DocumentNode: + value, err = d.loadDocument(node, loc) + case yaml.SequenceNode: + value, err = d.loadSequence(node, loc) + case yaml.MappingNode: + value, err = d.loadMapping(node, loc) + case yaml.ScalarNode: + value, err = d.loadScalar(node, loc) + case yaml.AliasNode: + value, err = d.loadAlias(node, loc) + default: + return config.NilValue, errorf(loc, "unknown node kind: %v", node.Kind) + } + + if err != nil { + return value, err + } + + // Mark value as anchor if needed. + // If this node doesn't map to a type, we don't need to warn about it. + if node.Anchor != "" { + value = value.MarkAnchor() + } + + return value, nil +} + +func (d *loader) loadDocument(node *yaml.Node, loc config.Location) (config.Value, error) { + return d.load(node.Content[0]) +} + +func (d *loader) loadSequence(node *yaml.Node, loc config.Location) (config.Value, error) { + acc := make([]config.Value, len(node.Content)) + for i, n := range node.Content { + v, err := d.load(n) + if err != nil { + return config.NilValue, err + } + + acc[i] = v + } + + return config.NewValue(acc, loc), nil +} + +func (d *loader) loadMapping(node *yaml.Node, loc config.Location) (config.Value, error) { + var merge *yaml.Node + + acc := make(map[string]config.Value) + for i := 0; i < len(node.Content); i += 2 { + key := node.Content[i] + val := node.Content[i+1] + + // Assert that keys are strings + if key.Kind != yaml.ScalarNode { + return config.NilValue, errorf(loc, "key is not a scalar") + } + + st := key.ShortTag() + switch st { + case "!!str": + // OK + case "!!merge": + if merge != nil { + panic("merge node already set") + } + merge = val + continue + default: + return config.NilValue, errorf(loc, "invalid key tag: %v", st) + } + + v, err := d.load(val) + if err != nil { + return config.NilValue, err + } + + acc[key.Value] = v + } + + if merge == nil { + return config.NewValue(acc, loc), nil + } + + // Build location for the merge node. + var mloc = d.location(merge) + var merr = errorf(mloc, "map merge requires map or sequence of maps as the value") + + // Flatten the merge node into a slice of nodes. + // It can be either a single node or a sequence of nodes. + var mnodes []*yaml.Node + switch merge.Kind { + case yaml.SequenceNode: + mnodes = merge.Content + case yaml.AliasNode: + mnodes = []*yaml.Node{merge} + default: + return config.NilValue, merr + } + + // Build a sequence of values to merge. + // The entries that we already accumulated have precedence. + var seq []map[string]config.Value + for _, n := range mnodes { + v, err := d.load(n) + if err != nil { + return config.NilValue, err + } + m, ok := v.AsMap() + if !ok { + return config.NilValue, merr + } + seq = append(seq, m) + } + + // Append the accumulated entries to the sequence. + seq = append(seq, acc) + out := make(map[string]config.Value) + for _, m := range seq { + for k, v := range m { + out[k] = v + } + } + + return config.NewValue(out, loc), nil +} + +func (d *loader) loadScalar(node *yaml.Node, loc config.Location) (config.Value, error) { + st := node.ShortTag() + switch st { + case "!!str": + return config.NewValue(node.Value, loc), nil + case "!!bool": + switch strings.ToLower(node.Value) { + case "true": + return config.NewValue(true, loc), nil + case "false": + return config.NewValue(false, loc), nil + default: + return config.NilValue, errorf(loc, "invalid bool value: %v", node.Value) + } + case "!!int": + i64, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return config.NilValue, errorf(loc, "invalid int value: %v", node.Value) + } + // Use regular int type instead of int64 if possible. + if i64 >= math.MinInt32 && i64 <= math.MaxInt32 { + return config.NewValue(int(i64), loc), nil + } + return config.NewValue(i64, loc), nil + case "!!float": + f64, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return config.NilValue, errorf(loc, "invalid float value: %v", node.Value) + } + return config.NewValue(f64, loc), nil + case "!!null": + return config.NewValue(nil, loc), nil + case "!!timestamp": + // Try a couple of layouts + for _, layout := range []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + } { + t, terr := time.Parse(layout, node.Value) + if terr == nil { + return config.NewValue(t, loc), nil + } + } + return config.NilValue, errorf(loc, "invalid timestamp value: %v", node.Value) + default: + return config.NilValue, errorf(loc, "unknown tag: %v", st) + } +} + +func (d *loader) loadAlias(node *yaml.Node, loc config.Location) (config.Value, error) { + return d.load(node.Alias) +} diff --git a/libs/config/yamlloader/testdata/anchor_01.yml b/libs/config/yamlloader/testdata/anchor_01.yml new file mode 100644 index 00000000..b677d60b --- /dev/null +++ b/libs/config/yamlloader/testdata/anchor_01.yml @@ -0,0 +1,12 @@ +# 1. Basic Anchor and Alias +defaults: &DEFAULTS + color: red + size: large + +shirt1: + <<: *DEFAULTS + pattern: striped + +shirt2: + <<: *DEFAULTS + pattern: plain diff --git a/libs/config/yamlloader/testdata/anchor_02.yml b/libs/config/yamlloader/testdata/anchor_02.yml new file mode 100644 index 00000000..86650987 --- /dev/null +++ b/libs/config/yamlloader/testdata/anchor_02.yml @@ -0,0 +1,13 @@ +# 2. Merging Anchors +# Here, multiple anchors can be merged into a single item. +defaults: &DEFAULTS + color: red + size: large + +materials: &MATERIALS + primary: cotton + secondary: polyester + +shirt: + <<: [*DEFAULTS, *MATERIALS] + pattern: striped diff --git a/libs/config/yamlloader/testdata/anchor_03.yml b/libs/config/yamlloader/testdata/anchor_03.yml new file mode 100644 index 00000000..f06b7999 --- /dev/null +++ b/libs/config/yamlloader/testdata/anchor_03.yml @@ -0,0 +1,10 @@ +# 3. Overriding Merged Anchor Values +# You can override values when merging. +defaults: &DEFAULTS + color: red + size: large + pattern: plain + +shirt: + <<: *DEFAULTS + color: blue diff --git a/libs/config/yamlloader/testdata/anchor_04.yml b/libs/config/yamlloader/testdata/anchor_04.yml new file mode 100644 index 00000000..bbc4bc02 --- /dev/null +++ b/libs/config/yamlloader/testdata/anchor_04.yml @@ -0,0 +1,16 @@ +# 4. Nested Anchors +# This demonstrates the reuse of nested content. +address: &ADDRESS + city: San Francisco + state: CA + country: USA + +person1: + name: Alice + address: *ADDRESS + +person2: + name: Bob + address: + <<: *ADDRESS + city: Los Angeles diff --git a/libs/config/yamlloader/testdata/anchor_05.yml b/libs/config/yamlloader/testdata/anchor_05.yml new file mode 100644 index 00000000..9a4c961b --- /dev/null +++ b/libs/config/yamlloader/testdata/anchor_05.yml @@ -0,0 +1,15 @@ +# 5. Using Anchors for List Items +# You can use anchors for list items too. +features: &FEATURES + - wifi + - bluetooth + +phone1: + name: PhoneA + features: *FEATURES + +phone2: + name: PhoneB + features: + - camera + - *FEATURES diff --git a/libs/config/yamlloader/testdata/anchor_06.yml b/libs/config/yamlloader/testdata/anchor_06.yml new file mode 100644 index 00000000..f27650fa --- /dev/null +++ b/libs/config/yamlloader/testdata/anchor_06.yml @@ -0,0 +1,5 @@ +# 6. String Anchors +commonMessage: &msg "Hello, World!" + +greeting1: *msg +greeting2: *msg diff --git a/libs/config/yamlloader/testdata/anchor_07.yml b/libs/config/yamlloader/testdata/anchor_07.yml new file mode 100644 index 00000000..b8f32214 --- /dev/null +++ b/libs/config/yamlloader/testdata/anchor_07.yml @@ -0,0 +1,10 @@ +# 7. Number Anchors +defaultAge: &age 25 + +person1: + name: Alice + age: *age + +person2: + name: Bob + age: *age diff --git a/libs/config/yamlloader/testdata/anchor_08.yml b/libs/config/yamlloader/testdata/anchor_08.yml new file mode 100644 index 00000000..8b01e21f --- /dev/null +++ b/libs/config/yamlloader/testdata/anchor_08.yml @@ -0,0 +1,10 @@ +# 8. Boolean Anchors +isActive: &active true + +user1: + username: user1 + active: *active + +user2: + username: user2 + active: *active diff --git a/libs/config/yamlloader/testdata/error_01.yml b/libs/config/yamlloader/testdata/error_01.yml new file mode 100644 index 00000000..11b96486 --- /dev/null +++ b/libs/config/yamlloader/testdata/error_01.yml @@ -0,0 +1,6 @@ +# Use string anchor to extend a mapping. +str: &str "Hello world!" + +map: + <<: *str + key: value diff --git a/libs/config/yamlloader/testdata/error_02.yml b/libs/config/yamlloader/testdata/error_02.yml new file mode 100644 index 00000000..5e9c6276 --- /dev/null +++ b/libs/config/yamlloader/testdata/error_02.yml @@ -0,0 +1,6 @@ +# Use string anchor inside sequence to extend a mapping. +str: &str "Hello world!" + +map: + <<: [*str] + key: value diff --git a/libs/config/yamlloader/testdata/error_03.yml b/libs/config/yamlloader/testdata/error_03.yml new file mode 100644 index 00000000..c4d3c397 --- /dev/null +++ b/libs/config/yamlloader/testdata/error_03.yml @@ -0,0 +1,4 @@ +# Extend a mapping with a literal string. +map: + <<: value + key: value diff --git a/libs/config/yamlloader/testdata/mix_01.yml b/libs/config/yamlloader/testdata/mix_01.yml new file mode 100644 index 00000000..98706bf6 --- /dev/null +++ b/libs/config/yamlloader/testdata/mix_01.yml @@ -0,0 +1,90 @@ +# Scalars + +# Strings can be unquoted +name: John Doe + +# Strings can be single quoted +single_quoted_string: 'This is a single quoted string' + +# Strings can be double quoted (allows for escape sequences) +double_quoted_string: "This is a double quoted string with an escaped newline \n and tab \t." + +# Multiline string with folded style (newlines become spaces) +folded_style: > + This is a very + long string that + spans several lines. + +# Multiline string with literal block style (newlines are preserved) +literal_style: | + Line 1 + Line 2 + Line 3 + +# Integers +age: 30 + +# Floating point +price: 19.99 + +# Boolean values +is_student: true +is_employed: false + +# Null value +middle_name: ~ # null can also be represented with 'null' + +# Timestamp +timestamp: 2023-01-01T12:00:00Z + +# Sequences (lists) + +# Inline list +fruits: [apple, banana, cherry] + +# Block style list +books: + - 'Moby Dick' + - '1984' + - 'Pride and Prejudice' + +# Mappings (dictionaries) + +# Inline dictionary +address: {street: '123 Main St', city: 'Anytown', zip: '12345'} + +# Block style dictionary +employee: + first_name: Jane + last_name: Smith + age: 40 + +# Edge cases and advanced features + +# Strings that look like other types must be quoted +looks_like_number: '12345' +looks_like_boolean: 'yes' +looks_like_null: 'null' +looks_like_timestamp: '2023-01-01T12:00:00Z' + +# Using anchors and aliases to reuse properties +base_address: &base + street: '456 Elm St' + city: 'Sometown' + +office_address: + <<: *base # Merge the base address into this mapping + suite: 500 + +# Nested structures +users: + - name: Alice + age: 28 + interests: + - reading + - cycling + - name: Bob + age: 35 + interests: + - cooking + - hiking diff --git a/libs/config/yamlloader/testdata/mix_02.yml b/libs/config/yamlloader/testdata/mix_02.yml new file mode 100644 index 00000000..757d626d --- /dev/null +++ b/libs/config/yamlloader/testdata/mix_02.yml @@ -0,0 +1,49 @@ +# Scalars with special characters + +# Commas and square brackets in strings should be enclosed in quotes +special_chars: "[This, string] has, special chars." + +# Strings starting with reserved indicators must be quoted +reserved_string: "@not_a_directive" +colon_string: "this: looks like a mapping, but it's not" + +# Explicit data type declaration +explicit_string: !!str 12345 + +# Sequences with nested mappings and lists + +teams: + - + name: Team A + members: + - Alice + - Bob + tasks: + - task1: + status: incomplete + due_date: 2023-01-15 + - task2: + status: complete + +# Complex mapping keys +? | + Multi-line key + which is unusual but valid +: multi-line key's value + +"complex key: with colon": complex key's value + +# Set (a mapping with null values) +set_example: + item1: ~ + item2: ~ + +# Merge multiple mappings (with override) +base_colors: &colors + red: "#FF0000" + blue: "#0000FF" + green: "#00FF00" + +theme: + <<: *colors + blue: "#001122" # Overriding the blue color from base_colors diff --git a/libs/config/yamlloader/yaml.go b/libs/config/yamlloader/yaml.go new file mode 100644 index 00000000..0f0f4e60 --- /dev/null +++ b/libs/config/yamlloader/yaml.go @@ -0,0 +1,19 @@ +package yamlloader + +import ( + "io" + + "github.com/databricks/cli/libs/config" + "gopkg.in/yaml.v3" +) + +func LoadYAML(path string, r io.Reader) (config.Value, error) { + var node yaml.Node + dec := yaml.NewDecoder(r) + err := dec.Decode(&node) + if err != nil { + return config.NilValue, err + } + + return newLoader(path).load(&node) +} diff --git a/libs/config/yamlloader/yaml_anchor_test.go b/libs/config/yamlloader/yaml_anchor_test.go new file mode 100644 index 00000000..a8b66686 --- /dev/null +++ b/libs/config/yamlloader/yaml_anchor_test.go @@ -0,0 +1,117 @@ +package yamlloader_test + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" +) + +func TestYAMLAnchor01(t *testing.T) { + file := "testdata/anchor_01.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + assert.True(t, self.Get("defaults").IsAnchor()) + assert.False(t, self.Get("shirt1").IsAnchor()) + assert.False(t, self.Get("shirt2").IsAnchor()) + + pattern := self.Get("shirt1").Get("pattern") + assert.Equal(t, "striped", pattern.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 8, Column: 12}, pattern.Location()) +} + +func TestYAMLAnchor02(t *testing.T) { + file := "testdata/anchor_02.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + color := self.Get("shirt").Get("color") + assert.Equal(t, "red", color.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 4, Column: 10}, color.Location()) + + primary := self.Get("shirt").Get("primary") + assert.Equal(t, "cotton", primary.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 8, Column: 12}, primary.Location()) + + pattern := self.Get("shirt").Get("pattern") + assert.Equal(t, "striped", pattern.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 13, Column: 12}, pattern.Location()) +} + +func TestYAMLAnchor03(t *testing.T) { + file := "testdata/anchor_03.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + // Assert the override took place. + blue := self.Get("shirt").Get("color") + assert.Equal(t, "blue", blue.AsAny()) + assert.Equal(t, file, blue.Location().File) + assert.Equal(t, 10, blue.Location().Line) + assert.Equal(t, 10, blue.Location().Column) +} + +func TestYAMLAnchor04(t *testing.T) { + file := "testdata/anchor_04.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + p1 := self.Get("person1").Get("address").Get("city") + assert.Equal(t, "San Francisco", p1.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 4, Column: 9}, p1.Location()) + + p2 := self.Get("person2").Get("address").Get("city") + assert.Equal(t, "Los Angeles", p2.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 16, Column: 11}, p2.Location()) +} + +func TestYAMLAnchor05(t *testing.T) { + file := "testdata/anchor_05.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + features := self.Get("phone1").Get("features") + assert.Equal(t, "wifi", features.Index(0).AsAny()) + assert.Equal(t, config.Location{File: file, Line: 4, Column: 5}, features.Index(0).Location()) + assert.Equal(t, "bluetooth", features.Index(1).AsAny()) + assert.Equal(t, config.Location{File: file, Line: 5, Column: 5}, features.Index(1).Location()) +} + +func TestYAMLAnchor06(t *testing.T) { + file := "testdata/anchor_06.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + greeting := self.Get("greeting1") + assert.Equal(t, "Hello, World!", greeting.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 2, Column: 16}, greeting.Location()) +} + +func TestYAMLAnchor07(t *testing.T) { + file := "testdata/anchor_07.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + name := self.Get("person1").Get("name") + assert.Equal(t, "Alice", name.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 5, Column: 9}, name.Location()) + + age := self.Get("person1").Get("age") + assert.Equal(t, 25, age.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 2, Column: 13}, age.Location()) +} + +func TestYAMLAnchor08(t *testing.T) { + file := "testdata/anchor_08.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + username := self.Get("user1").Get("username") + assert.Equal(t, "user1", username.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 5, Column: 13}, username.Location()) + + active := self.Get("user1").Get("active") + assert.Equal(t, true, active.AsAny()) + assert.Equal(t, config.Location{File: file, Line: 2, Column: 11}, active.Location()) +} diff --git a/libs/config/yamlloader/yaml_error_test.go b/libs/config/yamlloader/yaml_error_test.go new file mode 100644 index 00000000..2685042f --- /dev/null +++ b/libs/config/yamlloader/yaml_error_test.go @@ -0,0 +1,36 @@ +package yamlloader_test + +import ( + "bytes" + "os" + "testing" + + "github.com/databricks/cli/libs/config/yamlloader" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" +) + +func TestYAMLErrorMapMerge(t *testing.T) { + for _, file := range []string{ + "testdata/error_01.yml", + "testdata/error_02.yml", + "testdata/error_03.yml", + } { + input, err := os.ReadFile(file) + require.NoError(t, err) + + t.Run(file, func(t *testing.T) { + t.Run("reference", func(t *testing.T) { + var ref any + err = yaml.Unmarshal(input, &ref) + assert.ErrorContains(t, err, "map merge requires map or sequence of maps as the value") + }) + + t.Run("self", func(t *testing.T) { + _, err := yamlloader.LoadYAML(file, bytes.NewBuffer(input)) + assert.ErrorContains(t, err, "map merge requires map or sequence of maps as the value") + }) + }) + } +} diff --git a/libs/config/yamlloader/yaml_mix_test.go b/libs/config/yamlloader/yaml_mix_test.go new file mode 100644 index 00000000..9cd0753d --- /dev/null +++ b/libs/config/yamlloader/yaml_mix_test.go @@ -0,0 +1,26 @@ +package yamlloader_test + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" +) + +func TestYAMLMix01(t *testing.T) { + file := "testdata/mix_01.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + assert.True(t, self.Get("base_address").IsAnchor()) + assert.False(t, self.Get("office_address").IsAnchor()) +} + +func TestYAMLMix02(t *testing.T) { + file := "testdata/mix_02.yml" + self := loadYAML(t, file) + assert.NotEqual(t, config.NilValue, self) + + assert.True(t, self.Get("base_colors").IsAnchor()) + assert.False(t, self.Get("theme").IsAnchor()) +} diff --git a/libs/config/yamlloader/yaml_test.go b/libs/config/yamlloader/yaml_test.go new file mode 100644 index 00000000..017caccd --- /dev/null +++ b/libs/config/yamlloader/yaml_test.go @@ -0,0 +1,30 @@ +package yamlloader_test + +import ( + "bytes" + "os" + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/config/yamlloader" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" +) + +func loadYAML(t *testing.T, path string) config.Value { + input, err := os.ReadFile(path) + require.NoError(t, err) + + var ref any + err = yaml.Unmarshal(input, &ref) + require.NoError(t, err) + + self, err := yamlloader.LoadYAML(path, bytes.NewBuffer(input)) + require.NoError(t, err) + assert.NotNil(t, self) + + // Deep-equal the two values to ensure that the loader is producing + assert.EqualValues(t, ref, self.AsAny()) + return self +} From d4be40520c5ae3698e3f9d342e8a986981a7529f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 20 Oct 2023 15:10:31 +0200 Subject: [PATCH 174/310] Resolve configuration before performing verification (#890) ## Changes If a bundle configuration specifies a workspace host, and the user specifies a profile to use, we perform a check to confirm that the workspace host in the bundle configuration and the workspace host from the profile are identical. If they are not, we return an error. The check was introduced in #571. Previously, the code included an assumption that the client configuration was already loaded from the environment prior to performing the check. This was not the case, and as such if the user intended to use a non-default path to `.databrickscfg`, this path was not used when performing the check. The fix does the following: * Resolve the configuration prior to performing the check. * Don't treat the configuration file not existing as an error. * Add unit tests. Fixes #884. ## Tests Unit tests and manual confirmation. --- bundle/config/workspace.go | 15 +++- bundle/config/workspace_test.go | 144 ++++++++++++++++++++++++++++++ cmd/root/bundle_test.go | 2 +- libs/databrickscfg/loader.go | 1 + libs/databrickscfg/loader_test.go | 18 +--- libs/databrickscfg/ops.go | 5 +- 6 files changed, 163 insertions(+), 22 deletions(-) create mode 100644 bundle/config/workspace_test.go diff --git a/bundle/config/workspace.go b/bundle/config/workspace.go index f29d7c56..16a70afb 100644 --- a/bundle/config/workspace.go +++ b/bundle/config/workspace.go @@ -79,7 +79,7 @@ func (s User) MarshalJSON() ([]byte, error) { } func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { - cfg := databricks.Config{ + cfg := config.Config{ // Generic Host: w.Host, Profile: w.Profile, @@ -114,14 +114,23 @@ func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { } } - if w.Profile != "" && w.Host != "" { + // Resolve the configuration. This is done by [databricks.NewWorkspaceClient] as well, but here + // we need to verify that a profile, if loaded, matches the host configured in the bundle. + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + // Now that the configuration is resolved, we can verify that the host in the bundle configuration + // is identical to the host associated with the selected profile. + if w.Host != "" && w.Profile != "" { err := databrickscfg.ValidateConfigAndProfileHost(&cfg, w.Profile) if err != nil { return nil, err } } - return databricks.NewWorkspaceClient(&cfg) + return databricks.NewWorkspaceClient((*databricks.Config)(&cfg)) } func init() { diff --git a/bundle/config/workspace_test.go b/bundle/config/workspace_test.go new file mode 100644 index 00000000..3ef96325 --- /dev/null +++ b/bundle/config/workspace_test.go @@ -0,0 +1,144 @@ +package config + +import ( + "context" + "io/fs" + "path/filepath" + "runtime" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/databricks-sdk-go/config" + "github.com/stretchr/testify/assert" +) + +func setupWorkspaceTest(t *testing.T) string { + testutil.CleanupEnvironment(t) + + home := t.TempDir() + t.Setenv("HOME", home) + if runtime.GOOS == "windows" { + t.Setenv("USERPROFILE", home) + } + + return home +} + +func TestWorkspaceResolveProfileFromHost(t *testing.T) { + // If only a workspace host is specified, try to find a profile that uses + // the same workspace host (unambiguously). + w := Workspace{ + Host: "https://abc.cloud.databricks.com", + } + + t.Run("no config file", func(t *testing.T) { + setupWorkspaceTest(t) + _, err := w.Client() + assert.NoError(t, err) + }) + + t.Run("default config file", func(t *testing.T) { + setupWorkspaceTest(t) + + // This works if there is a config file with a matching profile. + databrickscfg.SaveToProfile(context.Background(), &config.Config{ + Profile: "default", + Host: "https://abc.cloud.databricks.com", + Token: "123", + }) + + client, err := w.Client() + assert.NoError(t, err) + assert.Equal(t, "default", client.Config.Profile) + }) + + t.Run("custom config file", func(t *testing.T) { + home := setupWorkspaceTest(t) + + // This works if there is a config file with a matching profile. + databrickscfg.SaveToProfile(context.Background(), &config.Config{ + ConfigFile: filepath.Join(home, "customcfg"), + Profile: "custom", + Host: "https://abc.cloud.databricks.com", + Token: "123", + }) + + t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) + client, err := w.Client() + assert.NoError(t, err) + assert.Equal(t, "custom", client.Config.Profile) + }) +} + +func TestWorkspaceVerifyProfileForHost(t *testing.T) { + // If both a workspace host and a profile are specified, + // verify that the host configured in the profile matches + // the host configured in the bundle configuration. + w := Workspace{ + Host: "https://abc.cloud.databricks.com", + Profile: "abc", + } + + t.Run("no config file", func(t *testing.T) { + setupWorkspaceTest(t) + _, err := w.Client() + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + + t.Run("default config file with match", func(t *testing.T) { + setupWorkspaceTest(t) + + // This works if there is a config file with a matching profile. + databrickscfg.SaveToProfile(context.Background(), &config.Config{ + Profile: "abc", + Host: "https://abc.cloud.databricks.com", + }) + + _, err := w.Client() + assert.NoError(t, err) + }) + + t.Run("default config file with mismatch", func(t *testing.T) { + setupWorkspaceTest(t) + + // This works if there is a config file with a matching profile. + databrickscfg.SaveToProfile(context.Background(), &config.Config{ + Profile: "abc", + Host: "https://def.cloud.databricks.com", + }) + + _, err := w.Client() + assert.ErrorContains(t, err, "config host mismatch") + }) + + t.Run("custom config file with match", func(t *testing.T) { + home := setupWorkspaceTest(t) + + // This works if there is a config file with a matching profile. + databrickscfg.SaveToProfile(context.Background(), &config.Config{ + ConfigFile: filepath.Join(home, "customcfg"), + Profile: "abc", + Host: "https://abc.cloud.databricks.com", + }) + + t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) + _, err := w.Client() + assert.NoError(t, err) + }) + + t.Run("custom config file with mismatch", func(t *testing.T) { + home := setupWorkspaceTest(t) + + // This works if there is a config file with a matching profile. + databrickscfg.SaveToProfile(context.Background(), &config.Config{ + ConfigFile: filepath.Join(home, "customcfg"), + Profile: "abc", + Host: "https://def.cloud.databricks.com", + }) + + t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) + _, err := w.Client() + assert.ErrorContains(t, err, "config host mismatch") + }) +} diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 3f9641b7..d7bae2d1 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -83,7 +83,7 @@ func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { cmd.Flag("profile").Value.Set("NOEXIST") b := setup(t, cmd, "https://x.com") - assert.PanicsWithError(t, "no matching config profiles found", func() { + assert.Panics(t, func() { b.WorkspaceClient() }) } diff --git a/libs/databrickscfg/loader.go b/libs/databrickscfg/loader.go index 05698eb4..a7985390 100644 --- a/libs/databrickscfg/loader.go +++ b/libs/databrickscfg/loader.go @@ -103,6 +103,7 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error { return fmt.Errorf("%s %s profile: %w", configFile.Path(), match.Name(), err) } + cfg.Profile = match.Name() return nil } diff --git a/libs/databrickscfg/loader_test.go b/libs/databrickscfg/loader_test.go index 5fa7f7dd..0677687f 100644 --- a/libs/databrickscfg/loader_test.go +++ b/libs/databrickscfg/loader_test.go @@ -59,7 +59,7 @@ func TestLoaderErrorsOnInvalidFile(t *testing.T) { assert.ErrorContains(t, err, "unclosed section: ") } -func TestLoaderSkipssNoMatchingHost(t *testing.T) { +func TestLoaderSkipsNoMatchingHost(t *testing.T) { cfg := config.Config{ Loaders: []config.Loader{ ResolveProfileFromHost, @@ -73,20 +73,6 @@ func TestLoaderSkipssNoMatchingHost(t *testing.T) { assert.Empty(t, cfg.Token) } -func TestLoaderConfiguresMatchingHost(t *testing.T) { - cfg := config.Config{ - Loaders: []config.Loader{ - ResolveProfileFromHost, - }, - ConfigFile: "testdata/databrickscfg", - Host: "https://default/?foo=bar", - } - - err := cfg.EnsureResolved() - assert.NoError(t, err) - assert.Equal(t, "default", cfg.Token) -} - func TestLoaderMatchingHost(t *testing.T) { cfg := config.Config{ Loaders: []config.Loader{ @@ -99,6 +85,7 @@ func TestLoaderMatchingHost(t *testing.T) { err := cfg.EnsureResolved() assert.NoError(t, err) assert.Equal(t, "default", cfg.Token) + assert.Equal(t, "DEFAULT", cfg.Profile) } func TestLoaderMatchingHostWithQuery(t *testing.T) { @@ -113,6 +100,7 @@ func TestLoaderMatchingHostWithQuery(t *testing.T) { err := cfg.EnsureResolved() assert.NoError(t, err) assert.Equal(t, "query", cfg.Token) + assert.Equal(t, "query", cfg.Profile) } func TestLoaderErrorsOnMultipleMatches(t *testing.T) { diff --git a/libs/databrickscfg/ops.go b/libs/databrickscfg/ops.go index c2d6e9fa..90795afd 100644 --- a/libs/databrickscfg/ops.go +++ b/libs/databrickscfg/ops.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/databricks/cli/libs/log" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" "gopkg.in/ini.v1" ) @@ -130,17 +129,17 @@ func SaveToProfile(ctx context.Context, cfg *config.Config) error { return configFile.SaveTo(configFile.Path()) } -func ValidateConfigAndProfileHost(cfg *databricks.Config, profile string) error { +func ValidateConfigAndProfileHost(cfg *config.Config, profile string) error { configFile, err := config.LoadFile(cfg.ConfigFile) if err != nil { return fmt.Errorf("cannot parse config file: %w", err) } + // Normalized version of the configured host. host := normalizeHost(cfg.Host) match, err := findMatchingProfile(configFile, func(s *ini.Section) bool { return profile == s.Name() }) - if err != nil { return err } From 4ce279e3863e3eaa0fbc9e5d0737da647ec01f8a Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 20 Oct 2023 17:03:29 +0200 Subject: [PATCH 175/310] Added test for tasks with python wheel wrapper on (#897) ## Changes Added test for tasks with python wheel wrapper on ## Tests ``` 2023/10/20 16:42:07 [INFO] Listing secrets from ... === RUN TestAccPythonWheelTaskDeployAndRunWithWrapper python_wheel_test.go:13: aws helpers.go:43: Configuration for template: {"node_type_id":"i3.xlarge","python_wheel_wrapper":true,"spark_version":"12.2.x-scala2.12","unique_id":"224a58a5-7ecb-4e7a-9c89-c7f5ea57924e"} ... Resource deployment completed! Run URL: ... 2023-10-20 16:42:33 "[default] Test Wheel Job 224a58a5-7ecb-4e7a-9c89-c7f5ea57924e" RUNNING 2023-10-20 16:47:27 "[default] Test Wheel Job 224a58a5-7ecb-4e7a-9c89-c7f5ea57924e" TERMINATED SUCCESS helpers.go:169: [databricks stdout]: Hello from my func helpers.go:169: [databricks stdout]: Got arguments: helpers.go:169: [databricks stdout]: ['my_test_code', 'one', 'two'] ... --- PASS: TestAccPythonWheelTaskDeployAndRunWithWrapper (321.61s) PASS coverage: 93.5% of statements in ./... ok github.com/databricks/cli/internal/bundle 322.307s coverage: 93.5% of statements in ./... ``` --- internal/bundle/python_wheel_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index fd5c9acc..bfc2d8b2 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func runPythonWheelTest(t *testing.T, pythonWheelWrapper bool) { +func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bool) { env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) @@ -24,7 +24,7 @@ func runPythonWheelTest(t *testing.T, pythonWheelWrapper bool) { bundleRoot, err := initTestTemplate(t, "python_wheel_task", map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), - "spark_version": "13.2.x-snapshot-scala2.12", + "spark_version": sparkVersion, "python_wheel_wrapper": pythonWheelWrapper, }) require.NoError(t, err) @@ -44,9 +44,9 @@ func runPythonWheelTest(t *testing.T, pythonWheelWrapper bool) { } func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { - runPythonWheelTest(t, false) + runPythonWheelTest(t, "13.2.x-snapshot-scala2.12", false) } func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { - runPythonWheelTest(t, true) + runPythonWheelTest(t, "12.2.x-scala2.12", true) } From fc98c455f59b4f12b55bf50001711bb64dd2472c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 09:08:17 +0200 Subject: [PATCH 176/310] Bump github.com/mattn/go-isatty from 0.0.19 to 0.0.20 (#896) Bumps [github.com/mattn/go-isatty](https://github.com/mattn/go-isatty) from 0.0.19 to 0.0.20.
Commits
  • a7c0235 Merge pull request #74 from dkegel-fastly/dkegel-bug73-tinygo
  • 13f3590 Adjust build tags to allow building on tinygo; for #73.
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/mattn/go-isatty&package-manager=go_modules&previous-version=0.0.19&new-version=0.0.20)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c94adbb9..75479149 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/hashicorp/terraform-json v0.17.1 // MPL 2.0 github.com/imdario/mergo v0.3.15 // BSD-3-Clause github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause - github.com/mattn/go-isatty v0.0.19 // MIT + github.com/mattn/go-isatty v0.0.20 // MIT github.com/nwidger/jsoncolor v0.3.2 // MIT github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // BSD-2-Clause github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // MIT diff --git a/go.sum b/go.sum index 3e88ad17..06f6b0f7 100644 --- a/go.sum +++ b/go.sum @@ -118,8 +118,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/nwidger/jsoncolor v0.3.2 h1:rVJJlwAWDJShnbTYOQ5RM7yTA20INyKXlJ/fg4JMhHQ= github.com/nwidger/jsoncolor v0.3.2/go.mod h1:Cs34umxLbJvgBMnVNVqhji9BhoT/N/KinHqZptQ7cf4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= From 9049f11479e9f1ef045d3e6a250e7871ee17f8f1 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 23 Oct 2023 10:19:26 +0200 Subject: [PATCH 177/310] Fix wheel task not working with with 13.x clusters (#898) ## Changes This lets us recognize 13.x as "13.1 or higher," making it possible to use wheel tasks on 13.x-snapshot clusters. --- bundle/python/warning.go | 3 +++ bundle/python/warning_test.go | 2 ++ 2 files changed, 5 insertions(+) diff --git a/bundle/python/warning.go b/bundle/python/warning.go index 01b639ef..9b9fd8e5 100644 --- a/bundle/python/warning.go +++ b/bundle/python/warning.go @@ -79,6 +79,9 @@ func lowerThanExpectedVersion(ctx context.Context, sparkVersion string) bool { return false } + if parts[1][0] == 'x' { // treat versions like 13.x as the very latest minor (13.99) + parts[1] = "99" + } v := "v" + parts[0] + "." + parts[1] return semver.Compare(v, "v13.1") < 0 } diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go index f822f113..b780160e 100644 --- a/bundle/python/warning_test.go +++ b/bundle/python/warning_test.go @@ -390,6 +390,8 @@ func TestSparkVersionLowerThanExpected(t *testing.T) { "13.3.x-scala2.12": false, "14.0.x-scala2.12": false, "14.1.x-scala2.12": false, + "13.x-snapshot-scala-2.12": false, + "13.x-rc-scala-2.12": false, "10.4.x-aarch64-photon-scala2.12": true, "10.4.x-scala2.12": true, "13.0.x-scala2.12": true, From 3a055f4774742ade22c680e871859e5fa5d6a91c Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 23 Oct 2023 11:42:38 +0200 Subject: [PATCH 178/310] Release v0.208.2 (#902) CLI: * Never load authentication configuration from bundle for sync command ([#889](https://github.com/databricks/cli/pull/889)). * Fixed requiring positional arguments for API URL parameters ([#878](https://github.com/databricks/cli/pull/878)). Bundles: * Add support for validating CLI version when loading a jsonschema object ([#883](https://github.com/databricks/cli/pull/883)). * Do not emit wheel wrapper error when python_wheel_wrapper setting is true ([#894](https://github.com/databricks/cli/pull/894)). * Resolve configuration before performing verification ([#890](https://github.com/databricks/cli/pull/890)). * Fix wheel task not working with with 13.x clusters ([#898](https://github.com/databricks/cli/pull/898)). Internal: * Skip prompt on completion hook ([#888](https://github.com/databricks/cli/pull/888)). * New YAML loader to support configuration location ([#828](https://github.com/databricks/cli/pull/828)). Dependency updates: * Bump github.com/mattn/go-isatty from 0.0.19 to 0.0.20 ([#896](https://github.com/databricks/cli/pull/896)). --- CHANGELOG.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 917301d3..8d17743c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Version changelog +## 0.208.2 + +CLI: + * Never load authentication configuration from bundle for sync command ([#889](https://github.com/databricks/cli/pull/889)). + * Fixed requiring positional arguments for API URL parameters ([#878](https://github.com/databricks/cli/pull/878)). + +Bundles: + * Add support for validating CLI version when loading a jsonschema object ([#883](https://github.com/databricks/cli/pull/883)). + * Do not emit wheel wrapper error when python_wheel_wrapper setting is true ([#894](https://github.com/databricks/cli/pull/894)). + * Resolve configuration before performing verification ([#890](https://github.com/databricks/cli/pull/890)). + * Fix wheel task not working with with 13.x clusters ([#898](https://github.com/databricks/cli/pull/898)). + +Internal: + * Skip prompt on completion hook ([#888](https://github.com/databricks/cli/pull/888)). + * New YAML loader to support configuration location ([#828](https://github.com/databricks/cli/pull/828)). + +Dependency updates: + * Bump github.com/mattn/go-isatty from 0.0.19 to 0.0.20 ([#896](https://github.com/databricks/cli/pull/896)). + ## 0.208.1 CLI: From 9f2d2b964fe990ce8a321559cae4a27eed4e8b04 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 23 Oct 2023 14:31:31 +0200 Subject: [PATCH 179/310] Fix URL for bundle template documentation (#903) ## Changes The doc URL link went stale. This PR updates the URL to the correct one. Fixes https://github.com/databricks/cli/issues/899 --- cmd/bundle/init.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 603878be..cd2af420 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -54,7 +54,7 @@ TEMPLATE_PATH optionally specifies which template to use. It can be one of the f - a local file system path with a template directory - a Git repository URL, e.g. https://github.com/my/repository -See https://docs.databricks.com//dev-tools/bundles/templates.html for more information on templates.`, +See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more information on templates.`, } var configFile string From 5018059444f907221ecffb20f7f1ecb2d6eddf16 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 24 Oct 2023 13:12:36 +0200 Subject: [PATCH 180/310] Library to convert config.Value to Go struct (#904) ## Changes Now that we have a new YAML loader (see #828), we need code to turn this into our Go structs. ## Tests New unit tests pass. Confirmed that we can replace our existing loader/converter with this one and that existing unit tests for bundle loading still pass. --- libs/config/convert/error.go | 16 + libs/config/convert/struct_info.go | 87 +++++ libs/config/convert/struct_info_test.go | 89 +++++ libs/config/convert/to_typed.go | 224 ++++++++++++ libs/config/convert/to_typed_test.go | 430 ++++++++++++++++++++++++ libs/config/kind.go | 64 ++++ libs/config/value.go | 113 +++++-- 7 files changed, 998 insertions(+), 25 deletions(-) create mode 100644 libs/config/convert/error.go create mode 100644 libs/config/convert/struct_info.go create mode 100644 libs/config/convert/struct_info_test.go create mode 100644 libs/config/convert/to_typed.go create mode 100644 libs/config/convert/to_typed_test.go create mode 100644 libs/config/kind.go diff --git a/libs/config/convert/error.go b/libs/config/convert/error.go new file mode 100644 index 00000000..b55668d6 --- /dev/null +++ b/libs/config/convert/error.go @@ -0,0 +1,16 @@ +package convert + +import ( + "fmt" + + "github.com/databricks/cli/libs/config" +) + +type TypeError struct { + value config.Value + msg string +} + +func (e TypeError) Error() string { + return fmt.Sprintf("%s: %s", e.value.Location(), e.msg) +} diff --git a/libs/config/convert/struct_info.go b/libs/config/convert/struct_info.go new file mode 100644 index 00000000..367b9ecd --- /dev/null +++ b/libs/config/convert/struct_info.go @@ -0,0 +1,87 @@ +package convert + +import ( + "reflect" + "strings" + "sync" +) + +// structInfo holds the type information we need to efficiently +// convert data from a [config.Value] to a Go struct. +type structInfo struct { + // Fields maps the JSON-name of the field to the field's index for use with [FieldByIndex]. + Fields map[string][]int +} + +// structInfoCache caches type information. +var structInfoCache = make(map[reflect.Type]structInfo) + +// structInfoCacheLock guards concurrent access to structInfoCache. +var structInfoCacheLock sync.Mutex + +// getStructInfo returns the [structInfo] for the given type. +// It lazily populates a cache, so the first call for a given +// type is slower than subsequent calls for that same type. +func getStructInfo(typ reflect.Type) structInfo { + structInfoCacheLock.Lock() + defer structInfoCacheLock.Unlock() + + si, ok := structInfoCache[typ] + if !ok { + si = buildStructInfo(typ) + structInfoCache[typ] = si + } + + return si +} + +// buildStructInfo populates a new [structInfo] for the given type. +func buildStructInfo(typ reflect.Type) structInfo { + var out = structInfo{ + Fields: make(map[string][]int), + } + + // Queue holds the indexes of the structs to visit. + // It is initialized with a single empty slice to visit the top level struct. + var queue [][]int = [][]int{{}} + for i := 0; i < len(queue); i++ { + prefix := queue[i] + + // Traverse embedded anonymous types (if prefix is non-empty). + styp := typ + if len(prefix) > 0 { + styp = styp.FieldByIndex(prefix).Type + } + + // Dereference pointer type. + if styp.Kind() == reflect.Pointer { + styp = styp.Elem() + } + + nf := styp.NumField() + for j := 0; j < nf; j++ { + sf := styp.Field(j) + + // Recurse into anonymous fields. + if sf.Anonymous { + queue = append(queue, append(prefix, sf.Index...)) + continue + } + + name, _, _ := strings.Cut(sf.Tag.Get("json"), ",") + if name == "" || name == "-" { + continue + } + + // Top level fields always take precedence. + // Therefore, if it is already set, we ignore it. + if _, ok := out.Fields[name]; ok { + continue + } + + out.Fields[name] = append(prefix, sf.Index...) + } + } + + return out +} diff --git a/libs/config/convert/struct_info_test.go b/libs/config/convert/struct_info_test.go new file mode 100644 index 00000000..3079958b --- /dev/null +++ b/libs/config/convert/struct_info_test.go @@ -0,0 +1,89 @@ +package convert + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStructInfoPlain(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar,omitempty"` + + // Baz must be skipped. + Baz string `json:""` + + // Qux must be skipped. + Qux string `json:"-"` + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + assert.Len(t, si.Fields, 2) + assert.Equal(t, []int{0}, si.Fields["foo"]) + assert.Equal(t, []int{1}, si.Fields["bar"]) +} + +func TestStructInfoAnonymousByValue(t *testing.T) { + type Bar struct { + Bar string `json:"bar"` + } + + type Foo struct { + Foo string `json:"foo"` + Bar + } + + type Tmp struct { + Foo + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + assert.Len(t, si.Fields, 2) + assert.Equal(t, []int{0, 0}, si.Fields["foo"]) + assert.Equal(t, []int{0, 1, 0}, si.Fields["bar"]) +} + +func TestStructInfoAnonymousByValuePrecedence(t *testing.T) { + type Bar struct { + Bar string `json:"bar"` + } + + type Foo struct { + Foo string `json:"foo"` + Bar + } + + type Tmp struct { + // "foo" comes from [Foo]. + Foo + // "bar" comes from [Bar] directly, not through [Foo]. + Bar + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + assert.Len(t, si.Fields, 2) + assert.Equal(t, []int{0, 0}, si.Fields["foo"]) + assert.Equal(t, []int{1, 0}, si.Fields["bar"]) +} + +func TestStructInfoAnonymousByPointer(t *testing.T) { + type Bar struct { + Bar string `json:"bar"` + } + + type Foo struct { + Foo string `json:"foo"` + *Bar + } + + type Tmp struct { + *Foo + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + assert.Len(t, si.Fields, 2) + assert.Equal(t, []int{0, 0}, si.Fields["foo"]) + assert.Equal(t, []int{0, 1, 0}, si.Fields["bar"]) +} diff --git a/libs/config/convert/to_typed.go b/libs/config/convert/to_typed.go new file mode 100644 index 00000000..9915d30a --- /dev/null +++ b/libs/config/convert/to_typed.go @@ -0,0 +1,224 @@ +package convert + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/databricks/cli/libs/config" +) + +func ToTyped(dst any, src config.Value) error { + dstv := reflect.ValueOf(dst) + + // Dereference pointer if necessary + for dstv.Kind() == reflect.Pointer { + if dstv.IsNil() { + dstv.Set(reflect.New(dstv.Type().Elem())) + } + dstv = dstv.Elem() + } + + // Verify that vv is settable. + if !dstv.CanSet() { + panic("cannot set destination value") + } + + switch dstv.Kind() { + case reflect.Struct: + return toTypedStruct(dstv, src) + case reflect.Map: + return toTypedMap(dstv, src) + case reflect.Slice: + return toTypedSlice(dstv, src) + case reflect.String: + return toTypedString(dstv, src) + case reflect.Bool: + return toTypedBool(dstv, src) + case reflect.Int, reflect.Int32, reflect.Int64: + return toTypedInt(dstv, src) + case reflect.Float32, reflect.Float64: + return toTypedFloat(dstv, src) + } + + return fmt.Errorf("unsupported type: %s", dstv.Kind()) +} + +func toTypedStruct(dst reflect.Value, src config.Value) error { + switch src.Kind() { + case config.KindMap: + info := getStructInfo(dst.Type()) + for k, v := range src.MustMap() { + index, ok := info.Fields[k] + if !ok { + // Ignore unknown fields. + // A warning will be printed later. See PR #904. + continue + } + + // Create intermediate structs embedded as pointer types. + // Code inspired by [reflect.FieldByIndex] implementation. + f := dst + for i, x := range index { + if i > 0 { + if f.Kind() == reflect.Pointer { + if f.IsNil() { + f.Set(reflect.New(f.Type().Elem())) + } + f = f.Elem() + } + } + f = f.Field(x) + } + + err := ToTyped(f.Addr().Interface(), v) + if err != nil { + return err + } + } + + return nil + case config.KindNil: + dst.SetZero() + return nil + } + + return TypeError{ + value: src, + msg: fmt.Sprintf("expected a map, found a %s", src.Kind()), + } +} + +func toTypedMap(dst reflect.Value, src config.Value) error { + switch src.Kind() { + case config.KindMap: + m := src.MustMap() + + // Always overwrite. + dst.Set(reflect.MakeMapWithSize(dst.Type(), len(m))) + for k, v := range m { + kv := reflect.ValueOf(k) + vv := reflect.New(dst.Type().Elem()) + err := ToTyped(vv.Interface(), v) + if err != nil { + return err + } + dst.SetMapIndex(kv, vv.Elem()) + } + return nil + case config.KindNil: + dst.SetZero() + return nil + } + + return TypeError{ + value: src, + msg: fmt.Sprintf("expected a map, found a %s", src.Kind()), + } +} + +func toTypedSlice(dst reflect.Value, src config.Value) error { + switch src.Kind() { + case config.KindSequence: + seq := src.MustSequence() + + // Always overwrite. + dst.Set(reflect.MakeSlice(dst.Type(), len(seq), len(seq))) + for i := range seq { + err := ToTyped(dst.Index(i).Addr().Interface(), seq[i]) + if err != nil { + return err + } + } + return nil + case config.KindNil: + dst.SetZero() + return nil + } + + return TypeError{ + value: src, + msg: fmt.Sprintf("expected a sequence, found a %s", src.Kind()), + } +} + +func toTypedString(dst reflect.Value, src config.Value) error { + switch src.Kind() { + case config.KindString: + dst.SetString(src.MustString()) + return nil + case config.KindBool: + dst.SetString(strconv.FormatBool(src.MustBool())) + return nil + case config.KindInt: + dst.SetString(strconv.FormatInt(src.MustInt(), 10)) + return nil + case config.KindFloat: + dst.SetString(strconv.FormatFloat(src.MustFloat(), 'f', -1, 64)) + return nil + } + + return TypeError{ + value: src, + msg: fmt.Sprintf("expected a string, found a %s", src.Kind()), + } +} + +func toTypedBool(dst reflect.Value, src config.Value) error { + switch src.Kind() { + case config.KindBool: + dst.SetBool(src.MustBool()) + return nil + case config.KindString: + // See https://github.com/go-yaml/yaml/blob/f6f7691b1fdeb513f56608cd2c32c51f8194bf51/decode.go#L684-L693. + switch src.MustString() { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + dst.SetBool(true) + return nil + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + dst.SetBool(false) + return nil + } + } + + return TypeError{ + value: src, + msg: fmt.Sprintf("expected a boolean, found a %s", src.Kind()), + } +} + +func toTypedInt(dst reflect.Value, src config.Value) error { + switch src.Kind() { + case config.KindInt: + dst.SetInt(src.MustInt()) + return nil + case config.KindString: + if i64, err := strconv.ParseInt(src.MustString(), 10, 64); err == nil { + dst.SetInt(i64) + return nil + } + } + + return TypeError{ + value: src, + msg: fmt.Sprintf("expected an int, found a %s", src.Kind()), + } +} + +func toTypedFloat(dst reflect.Value, src config.Value) error { + switch src.Kind() { + case config.KindFloat: + dst.SetFloat(src.MustFloat()) + return nil + case config.KindString: + if f64, err := strconv.ParseFloat(src.MustString(), 64); err == nil { + dst.SetFloat(f64) + return nil + } + } + + return TypeError{ + value: src, + msg: fmt.Sprintf("expected a float, found a %s", src.Kind()), + } +} diff --git a/libs/config/convert/to_typed_test.go b/libs/config/convert/to_typed_test.go new file mode 100644 index 00000000..26e17dcc --- /dev/null +++ b/libs/config/convert/to_typed_test.go @@ -0,0 +1,430 @@ +package convert + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestToTypedStruct(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar,omitempty"` + + // Baz must be skipped. + Baz string `json:""` + + // Qux must be skipped. + Qux string `json:"-"` + } + + var out Tmp + v := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, "bar", out.Foo) + assert.Equal(t, "baz", out.Bar) +} + +func TestToTypedStructOverwrite(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar,omitempty"` + + // Baz must be skipped. + Baz string `json:""` + + // Qux must be skipped. + Qux string `json:"-"` + } + + var out = Tmp{ + Foo: "baz", + Bar: "qux", + } + v := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, "bar", out.Foo) + assert.Equal(t, "baz", out.Bar) +} + +func TestToTypedStructAnonymousByValue(t *testing.T) { + type Bar struct { + Bar string `json:"bar"` + } + + type Foo struct { + Foo string `json:"foo"` + Bar + } + + type Tmp struct { + Foo + } + + var out Tmp + v := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, "bar", out.Foo.Foo) + assert.Equal(t, "baz", out.Foo.Bar.Bar) +} + +func TestToTypedStructAnonymousByPointer(t *testing.T) { + type Bar struct { + Bar string `json:"bar"` + } + + type Foo struct { + Foo string `json:"foo"` + *Bar + } + + type Tmp struct { + *Foo + } + + var out Tmp + v := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, "bar", out.Foo.Foo) + assert.Equal(t, "baz", out.Foo.Bar.Bar) +} + +func TestToTypedStructNil(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var out = Tmp{} + err := ToTyped(&out, config.NilValue) + require.NoError(t, err) + assert.Equal(t, Tmp{}, out) +} + +func TestToTypedStructNilOverwrite(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var out = Tmp{"bar"} + err := ToTyped(&out, config.NilValue) + require.NoError(t, err) + assert.Equal(t, Tmp{}, out) +} + +func TestToTypedMap(t *testing.T) { + var out = map[string]string{} + + v := config.V(map[string]config.Value{ + "key": config.V("value"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Len(t, out, 1) + assert.Equal(t, "value", out["key"]) +} + +func TestToTypedMapOverwrite(t *testing.T) { + var out = map[string]string{ + "foo": "bar", + } + + v := config.V(map[string]config.Value{ + "bar": config.V("qux"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Len(t, out, 1) + assert.Equal(t, "qux", out["bar"]) +} + +func TestToTypedMapWithPointerElement(t *testing.T) { + var out map[string]*string + + v := config.V(map[string]config.Value{ + "key": config.V("value"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Len(t, out, 1) + assert.Equal(t, "value", *out["key"]) +} + +func TestToTypedMapNil(t *testing.T) { + var out = map[string]string{} + err := ToTyped(&out, config.NilValue) + require.NoError(t, err) + assert.Nil(t, out) +} + +func TestToTypedMapNilOverwrite(t *testing.T) { + var out = map[string]string{ + "foo": "bar", + } + err := ToTyped(&out, config.NilValue) + require.NoError(t, err) + assert.Nil(t, out) +} + +func TestToTypedSlice(t *testing.T) { + var out []string + + v := config.V([]config.Value{ + config.V("foo"), + config.V("bar"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Len(t, out, 2) + assert.Equal(t, "foo", out[0]) + assert.Equal(t, "bar", out[1]) +} + +func TestToTypedSliceOverwrite(t *testing.T) { + var out = []string{"qux"} + + v := config.V([]config.Value{ + config.V("foo"), + config.V("bar"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Len(t, out, 2) + assert.Equal(t, "foo", out[0]) + assert.Equal(t, "bar", out[1]) +} + +func TestToTypedSliceWithPointerElement(t *testing.T) { + var out []*string + + v := config.V([]config.Value{ + config.V("foo"), + config.V("bar"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Len(t, out, 2) + assert.Equal(t, "foo", *out[0]) + assert.Equal(t, "bar", *out[1]) +} + +func TestToTypedSliceNil(t *testing.T) { + var out []string + err := ToTyped(&out, config.NilValue) + require.NoError(t, err) + assert.Nil(t, out) +} + +func TestToTypedSliceNilOverwrite(t *testing.T) { + var out = []string{"foo"} + err := ToTyped(&out, config.NilValue) + require.NoError(t, err) + assert.Nil(t, out) +} + +func TestToTypedString(t *testing.T) { + var out string + err := ToTyped(&out, config.V("foo")) + require.NoError(t, err) + assert.Equal(t, "foo", out) +} + +func TestToTypedStringOverwrite(t *testing.T) { + var out string = "bar" + err := ToTyped(&out, config.V("foo")) + require.NoError(t, err) + assert.Equal(t, "foo", out) +} + +func TestToTypedStringFromBool(t *testing.T) { + var out string + err := ToTyped(&out, config.V(true)) + require.NoError(t, err) + assert.Equal(t, "true", out) +} + +func TestToTypedStringFromInt(t *testing.T) { + var out string + err := ToTyped(&out, config.V(123)) + require.NoError(t, err) + assert.Equal(t, "123", out) +} + +func TestToTypedStringFromFloat(t *testing.T) { + var out string + err := ToTyped(&out, config.V(1.2)) + require.NoError(t, err) + assert.Equal(t, "1.2", out) +} + +func TestToTypedBool(t *testing.T) { + var out bool + err := ToTyped(&out, config.V(true)) + require.NoError(t, err) + assert.Equal(t, true, out) +} + +func TestToTypedBoolOverwrite(t *testing.T) { + var out bool = true + err := ToTyped(&out, config.V(false)) + require.NoError(t, err) + assert.Equal(t, false, out) +} + +func TestToTypedBoolFromString(t *testing.T) { + var out bool + + // True-ish + for _, v := range []string{"y", "yes", "on"} { + err := ToTyped(&out, config.V(v)) + require.NoError(t, err) + assert.Equal(t, true, out) + } + + // False-ish + for _, v := range []string{"n", "no", "off"} { + err := ToTyped(&out, config.V(v)) + require.NoError(t, err) + assert.Equal(t, false, out) + } + + // Other + err := ToTyped(&out, config.V("${var.foo}")) + require.Error(t, err) +} + +func TestToTypedInt(t *testing.T) { + var out int + err := ToTyped(&out, config.V(1234)) + require.NoError(t, err) + assert.Equal(t, int(1234), out) +} + +func TestToTypedInt32(t *testing.T) { + var out32 int32 + err := ToTyped(&out32, config.V(1235)) + require.NoError(t, err) + assert.Equal(t, int32(1235), out32) +} + +func TestToTypedInt64(t *testing.T) { + var out64 int64 + err := ToTyped(&out64, config.V(1236)) + require.NoError(t, err) + assert.Equal(t, int64(1236), out64) +} + +func TestToTypedIntOverwrite(t *testing.T) { + var out int = 123 + err := ToTyped(&out, config.V(1234)) + require.NoError(t, err) + assert.Equal(t, int(1234), out) +} + +func TestToTypedInt32Overwrite(t *testing.T) { + var out32 int32 = 123 + err := ToTyped(&out32, config.V(1234)) + require.NoError(t, err) + assert.Equal(t, int32(1234), out32) +} + +func TestToTypedInt64Overwrite(t *testing.T) { + var out64 int64 = 123 + err := ToTyped(&out64, config.V(1234)) + require.NoError(t, err) + assert.Equal(t, int64(1234), out64) +} + +func TestToTypedIntFromStringError(t *testing.T) { + var out int + err := ToTyped(&out, config.V("abc")) + require.Error(t, err) +} + +func TestToTypedIntFromStringInt(t *testing.T) { + var out int + err := ToTyped(&out, config.V("123")) + require.NoError(t, err) + assert.Equal(t, int(123), out) +} + +func TestToTypedFloat32(t *testing.T) { + var out float32 + err := ToTyped(&out, config.V(float32(1.0))) + require.NoError(t, err) + assert.Equal(t, float32(1.0), out) +} + +func TestToTypedFloat64(t *testing.T) { + var out float64 + err := ToTyped(&out, config.V(float64(1.0))) + require.NoError(t, err) + assert.Equal(t, float64(1.0), out) +} + +func TestToTypedFloat32Overwrite(t *testing.T) { + var out float32 = 1.0 + err := ToTyped(&out, config.V(float32(2.0))) + require.NoError(t, err) + assert.Equal(t, float32(2.0), out) +} + +func TestToTypedFloat64Overwrite(t *testing.T) { + var out float64 = 1.0 + err := ToTyped(&out, config.V(float64(2.0))) + require.NoError(t, err) + assert.Equal(t, float64(2.0), out) +} + +func TestToTypedFloat32FromStringError(t *testing.T) { + var out float32 + err := ToTyped(&out, config.V("abc")) + require.Error(t, err) +} + +func TestToTypedFloat64FromStringError(t *testing.T) { + var out float64 + err := ToTyped(&out, config.V("abc")) + require.Error(t, err) +} + +func TestToTypedFloat32FromString(t *testing.T) { + var out float32 + err := ToTyped(&out, config.V("1.2")) + require.NoError(t, err) + assert.Equal(t, float32(1.2), out) +} + +func TestToTypedFloat64FromString(t *testing.T) { + var out float64 + err := ToTyped(&out, config.V("1.2")) + require.NoError(t, err) + assert.Equal(t, float64(1.2), out) +} diff --git a/libs/config/kind.go b/libs/config/kind.go new file mode 100644 index 00000000..5ed1a665 --- /dev/null +++ b/libs/config/kind.go @@ -0,0 +1,64 @@ +package config + +import "time" + +type Kind int + +const ( + // Invalid is the zero value of Kind. + KindInvalid Kind = iota + KindMap + KindSequence + KindNil + KindString + KindBool + KindInt + KindFloat + KindTime +) + +func kindOf(v any) Kind { + switch v.(type) { + case map[string]Value: + return KindMap + case []Value: + return KindSequence + case nil: + return KindNil + case string: + return KindString + case bool: + return KindBool + case int, int32, int64: + return KindInt + case float32, float64: + return KindFloat + case time.Time: + return KindTime + default: + panic("not handled") + } +} + +func (k Kind) String() string { + switch k { + case KindMap: + return "map" + case KindSequence: + return "sequence" + case KindNil: + return "nil" + case KindString: + return "string" + case KindBool: + return "bool" + case KindInt: + return "int" + case KindFloat: + return "float" + case KindTime: + return "time" + default: + return "invalid" + } +} diff --git a/libs/config/value.go b/libs/config/value.go index 994aec38..c77f8147 100644 --- a/libs/config/value.go +++ b/libs/config/value.go @@ -1,9 +1,14 @@ package config -import "time" +import ( + "fmt" + "time" +) type Value struct { v any + + k Kind l Location // Whether or not this value is an anchor. @@ -12,12 +17,23 @@ type Value struct { } // NilValue is equal to the zero-value of Value. -var NilValue = Value{} +var NilValue = Value{ + k: KindNil, +} + +// V constructs a new Value with the given value. +func V(v any) Value { + return Value{ + v: v, + k: kindOf(v), + } +} // NewValue constructs a new Value with the given value and location. func NewValue(v any, loc Location) Value { return Value{ v: v, + k: kindOf(v), l: loc, } } @@ -27,45 +43,47 @@ func (v Value) AsMap() (map[string]Value, bool) { return m, ok } +func (v Value) Kind() Kind { + return v.k +} + func (v Value) Location() Location { return v.l } func (v Value) AsAny() any { - switch vv := v.v.(type) { - case map[string]Value: - m := make(map[string]any) + switch v.k { + case KindInvalid: + panic("invoked AsAny on invalid value") + case KindMap: + vv := v.v.(map[string]Value) + m := make(map[string]any, len(vv)) for k, v := range vv { m[k] = v.AsAny() } return m - case []Value: + case KindSequence: + vv := v.v.([]Value) a := make([]any, len(vv)) for i, v := range vv { a[i] = v.AsAny() } return a - case string: - return vv - case bool: - return vv - case int: - return vv - case int32: - return vv - case int64: - return vv - case float32: - return vv - case float64: - return vv - case time.Time: - return vv - case nil: - return nil + case KindNil: + return v.v + case KindString: + return v.v + case KindBool: + return v.v + case KindInt: + return v.v + case KindFloat: + return v.v + case KindTime: + return v.v default: // Panic because we only want to deal with known types. - panic("not handled") + panic(fmt.Sprintf("invalid kind: %d", v.k)) } } @@ -99,6 +117,7 @@ func (v Value) Index(i int) Value { func (v Value) MarkAnchor() Value { return Value{ v: v.v, + k: v.k, l: v.l, anchor: true, @@ -108,3 +127,47 @@ func (v Value) MarkAnchor() Value { func (v Value) IsAnchor() bool { return v.anchor } + +func (v Value) MustMap() map[string]Value { + return v.v.(map[string]Value) +} + +func (v Value) MustSequence() []Value { + return v.v.([]Value) +} + +func (v Value) MustString() string { + return v.v.(string) +} + +func (v Value) MustBool() bool { + return v.v.(bool) +} + +func (v Value) MustInt() int64 { + switch vv := v.v.(type) { + case int: + return int64(vv) + case int32: + return int64(vv) + case int64: + return int64(vv) + default: + panic("not an int") + } +} + +func (v Value) MustFloat() float64 { + switch vv := v.v.(type) { + case float32: + return float64(vv) + case float64: + return float64(vv) + default: + panic("not a float") + } +} + +func (v Value) MustTime() time.Time { + return v.v.(time.Time) +} From 3411b8aa37a28d78f4ffefd258cd18e7bff3581e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 24 Oct 2023 13:24:43 +0200 Subject: [PATCH 181/310] Loading an empty file yields a nil (#906) ## Changes Empty YAML files are valid and should return a nil-equivalent when loaded. ## Tests Tests pass. --- libs/config/yamlloader/testdata/empty.yml | 0 libs/config/yamlloader/yaml.go | 3 +++ libs/config/yamlloader/yaml_test.go | 5 +++++ 3 files changed, 8 insertions(+) create mode 100644 libs/config/yamlloader/testdata/empty.yml diff --git a/libs/config/yamlloader/testdata/empty.yml b/libs/config/yamlloader/testdata/empty.yml new file mode 100644 index 00000000..e69de29b diff --git a/libs/config/yamlloader/yaml.go b/libs/config/yamlloader/yaml.go index 0f0f4e60..a3cc7284 100644 --- a/libs/config/yamlloader/yaml.go +++ b/libs/config/yamlloader/yaml.go @@ -12,6 +12,9 @@ func LoadYAML(path string, r io.Reader) (config.Value, error) { dec := yaml.NewDecoder(r) err := dec.Decode(&node) if err != nil { + if err == io.EOF { + return config.NilValue, nil + } return config.NilValue, err } diff --git a/libs/config/yamlloader/yaml_test.go b/libs/config/yamlloader/yaml_test.go index 017caccd..ab61f071 100644 --- a/libs/config/yamlloader/yaml_test.go +++ b/libs/config/yamlloader/yaml_test.go @@ -28,3 +28,8 @@ func loadYAML(t *testing.T, path string) config.Value { assert.EqualValues(t, ref, self.AsAny()) return self } + +func TestYAMLEmpty(t *testing.T) { + self := loadYAML(t, "testdata/empty.yml") + assert.Equal(t, config.NilValue, self) +} From f8d7e3111871e901ab227a1a2813e3c7d8325177 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 24 Oct 2023 17:56:54 +0200 Subject: [PATCH 182/310] Fix pattern validation for input properties (#912) ## Changes Fixes bug where input validation would only be done on the first input parameter in the template schema. ## Tests Unit test. --- libs/jsonschema/instance.go | 5 +++- libs/jsonschema/instance_test.go | 29 +++++++++++++++++++ .../multiple-patterns-schema.json | 12 ++++++++ 3 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 libs/jsonschema/testdata/instance-validate/multiple-patterns-schema.json diff --git a/libs/jsonschema/instance.go b/libs/jsonschema/instance.go index d08ed519..091822da 100644 --- a/libs/jsonschema/instance.go +++ b/libs/jsonschema/instance.go @@ -122,7 +122,10 @@ func (s *Schema) validatePattern(instance map[string]any) error { if !ok { continue } - return ValidatePatternMatch(k, v, fieldInfo) + err := ValidatePatternMatch(k, v, fieldInfo) + if err != nil { + return err + } } return nil } diff --git a/libs/jsonschema/instance_test.go b/libs/jsonschema/instance_test.go index 3a357d71..8edbf796 100644 --- a/libs/jsonschema/instance_test.go +++ b/libs/jsonschema/instance_test.go @@ -193,3 +193,32 @@ func TestValidateInstancePatternWithCustomMessage(t *testing.T) { assert.EqualError(t, schema.validatePattern(invalidInstanceValue), "invalid value for foo: \"xyz\". Please enter a string starting with 'a' and ending with 'c'") assert.EqualError(t, schema.ValidateInstance(invalidInstanceValue), "invalid value for foo: \"xyz\". Please enter a string starting with 'a' and ending with 'c'") } + +func TestValidateInstanceForMultiplePatterns(t *testing.T) { + schema, err := Load("./testdata/instance-validate/multiple-patterns-schema.json") + require.NoError(t, err) + + // Valid values for both foo and bar + validInstance := map[string]any{ + "foo": "abcc", + "bar": "deff", + } + assert.NoError(t, schema.validatePattern(validInstance)) + assert.NoError(t, schema.ValidateInstance(validInstance)) + + // Valid value for bar, invalid value for foo + invalidInstanceValue := map[string]any{ + "foo": "xyz", + "bar": "deff", + } + assert.EqualError(t, schema.validatePattern(invalidInstanceValue), "invalid value for foo: \"xyz\". Expected to match regex pattern: ^[a-c]+$") + assert.EqualError(t, schema.ValidateInstance(invalidInstanceValue), "invalid value for foo: \"xyz\". Expected to match regex pattern: ^[a-c]+$") + + // Valid value for foo, invalid value for bar + invalidInstanceValue = map[string]any{ + "foo": "abcc", + "bar": "xyz", + } + assert.EqualError(t, schema.validatePattern(invalidInstanceValue), "invalid value for bar: \"xyz\". Expected to match regex pattern: ^[d-f]+$") + assert.EqualError(t, schema.ValidateInstance(invalidInstanceValue), "invalid value for bar: \"xyz\". Expected to match regex pattern: ^[d-f]+$") +} diff --git a/libs/jsonschema/testdata/instance-validate/multiple-patterns-schema.json b/libs/jsonschema/testdata/instance-validate/multiple-patterns-schema.json new file mode 100644 index 00000000..1098b7fc --- /dev/null +++ b/libs/jsonschema/testdata/instance-validate/multiple-patterns-schema.json @@ -0,0 +1,12 @@ +{ + "properties": { + "foo": { + "type": "string", + "pattern": "^[a-c]+$" + }, + "bar": { + "type": "string", + "pattern": "^[d-f]+$" + } + } +} From d768994bbff3e3f07c455a65f3ee8d99ef5a3096 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 24 Oct 2023 19:37:08 +0200 Subject: [PATCH 183/310] Simplified code generation logic for handling path and request body parameters and JSON input (#905) ## Changes Simplified code generation logic for handling path and request body parameters and JSON input Note: relies on these PRs: https://github.com/databricks/databricks-sdk-go/pull/666 https://github.com/databricks/databricks-sdk-go/pull/669 https://github.com/databricks/databricks-sdk-go/pull/670 --- .codegen/service.go.tmpl | 60 ++++--- cmd/account/groups/groups.go | 17 -- cmd/account/log-delivery/log-delivery.go | 40 +++-- cmd/account/metastores/metastores.go | 4 - cmd/account/networks/networks.go | 3 +- .../o-auth-enrollment/o-auth-enrollment.go | 4 - .../o-auth-published-apps.go | 14 -- cmd/account/private-access/private-access.go | 20 ++- .../published-app-integration.go | 4 - .../service-principals/service-principals.go | 17 -- cmd/account/settings/settings.go | 4 - cmd/account/users/users.go | 17 -- cmd/account/vpc-endpoints/vpc-endpoints.go | 3 +- cmd/account/workspaces/workspaces.go | 3 +- cmd/workspace/catalogs/catalogs.go | 3 +- cmd/workspace/clean-rooms/clean-rooms.go | 13 -- .../cluster-policies/cluster-policies.go | 21 +-- cmd/workspace/clusters/clusters.go | 26 ++- cmd/workspace/dashboards/dashboards.go | 23 +-- cmd/workspace/experiments/experiments.go | 111 +++++-------- .../external-locations/external-locations.go | 7 +- cmd/workspace/functions/functions.go | 8 + .../git-credentials/git-credentials.go | 11 +- .../global-init-scripts.go | 28 +++- cmd/workspace/groups/groups.go | 17 -- .../instance-pools/instance-pools.go | 12 +- .../instance-profiles/instance-profiles.go | 9 +- cmd/workspace/jobs/jobs.go | 42 ----- cmd/workspace/metastores/metastores.go | 51 +++++- .../model-registry/model-registry.go | 153 +++++++++--------- .../model-versions/model-versions.go | 9 ++ cmd/workspace/pipelines/pipelines.go | 29 ---- .../policy-families/policy-families.go | 14 -- cmd/workspace/providers/providers.go | 26 ++- cmd/workspace/queries/queries.go | 21 --- cmd/workspace/query-history/query-history.go | 14 -- .../query-visualizations.go | 10 +- cmd/workspace/recipients/recipients.go | 41 ++--- .../registered-models/registered-models.go | 47 ++++-- cmd/workspace/repos/repos.go | 18 +-- cmd/workspace/schemas/schemas.go | 5 +- cmd/workspace/secrets/secrets.go | 23 ++- .../service-principals/service-principals.go | 17 -- cmd/workspace/settings/settings.go | 4 - cmd/workspace/shares/shares.go | 3 +- .../storage-credentials.go | 7 +- cmd/workspace/tables/tables.go | 9 ++ .../token-management/token-management.go | 26 +-- cmd/workspace/tokens/tokens.go | 4 - cmd/workspace/users/users.go | 25 --- cmd/workspace/volumes/volumes.go | 17 +- cmd/workspace/warehouses/warehouses.go | 21 --- .../workspace-conf/workspace-conf.go | 14 -- cmd/workspace/workspace/workspace.go | 3 +- 54 files changed, 452 insertions(+), 700 deletions(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index b4b6b4d4..27b9a754 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -78,12 +78,10 @@ var {{.CamelName}}Overrides []func( func new{{.PascalName}}() *cobra.Command { cmd := &cobra.Command{} - {{- $needJsonFlag := or .CanSetRequiredFieldsFromJson (and .Request (not .Request.IsOnlyPrimitiveFields)) -}} - {{- if .Request}} var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}} - {{- if $needJsonFlag}} + {{- if .CanUseJson}} var {{.CamelName}}Json flags.JsonFlag {{- end}} {{- end}} @@ -96,7 +94,7 @@ func new{{.PascalName}}() *cobra.Command { cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`) {{end -}} {{if .Request}}// TODO: short flags - {{- if $needJsonFlag}} + {{- if .CanUseJson}} cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`) {{- end}} {{$method := .}} @@ -120,12 +118,14 @@ func new{{.PascalName}}() *cobra.Command { {{- $fullCommandName := (print $serviceName " " .KebabName) -}} {{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }} - {{- $hasPosArgs := and .Request (or .Request.IsAllRequiredFieldsPrimitive (eq .PascalName "RunNow")) -}} + {{- $hasPosArgs := and (not .MustUseJson) (and .Request (or .Request.IsAllRequiredFieldsPrimitive (eq .PascalName "RunNow"))) -}} {{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}} {{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}} {{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}} {{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}} - {{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}} + {{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}} + {{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt .Request) (eq 1 (len .Request.RequiredRequestBodyFields)) -}} + cmd.Use = "{{.KebabName}}{{if $hasPosArgs}}{{range .Request.RequiredFields}} {{.ConstantName}}{{end}}{{end}}" {{- if .Description }} @@ -142,9 +142,9 @@ func new{{.PascalName}}() *cobra.Command { {{if $hasRequiredArgs }} cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs({{len .Request.RequiredFields}}) - {{- if .CanSetRequiredFieldsFromJson }} + {{- if and .CanUseJson .Request.HasRequiredRequestBodyFields }} if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + check = cobra.ExactArgs({{len .Request.RequiredPathFields}}) } {{- end }} return check(cmd, args) @@ -155,14 +155,18 @@ func new{{.PascalName}}() *cobra.Command { ctx := cmd.Context() {{if .Service.IsAccounts}}a := root.AccountClient(ctx){{else}}w := root.WorkspaceClient(ctx){{end}} {{- if .Request }} - {{ if $needJsonFlag }} + {{ if .CanUseJson }} if cmd.Flags().Changed("json") { err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req) if err != nil { return err } - }{{end}}{{if .CanSetRequiredFieldsFromJson }} else { - {{- end }} + }{{end}}{{ if .MustUseJson }}else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + }{{- end}} + {{- if (not .MustUseJson) }} + {{- if and .CanUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }} else { + {{- end}} {{- if $hasIdPrompt}} if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) @@ -182,22 +186,30 @@ func new{{.PascalName}}() *cobra.Command { return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}") } {{- end -}} + {{$method := .}} - {{- if and .Request.IsAllRequiredFieldsPrimitive (not .IsJsonOnly) -}} - {{- range $arg, $field := .Request.RequiredFields}} - {{if not $field.Entity.IsString -}} - _, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}}) - if err != nil { - return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}]) - }{{else -}} - {{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}] - {{- end -}}{{end}} - {{- else -}} - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + {{- range $arg, $field := .Request.RequiredFields}} + {{- $optionalIfJsonIsUsed := and (not $hasIdPrompt) (and $field.IsRequestBodyField $method.CanUseJson) }} + {{- if $optionalIfJsonIsUsed }} + if !cmd.Flags().Changed("json") { + {{- end }} + {{if not $field.Entity.IsString -}} + _, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}}) + if err != nil { + return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}]) + }{{else -}} + {{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}] + {{- end -}} + {{- if $optionalIfJsonIsUsed }} + } + {{- end }} {{- end -}} - {{if .CanSetRequiredFieldsFromJson }} + + {{- if and .CanUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }} } - {{end }} + {{- end}} + + {{- end}} {{end}} {{if $wait -}} wait, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{.Service.PascalName}}.{{.PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}}) diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index 2ecaa3a7..407f46a9 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -79,9 +79,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -95,7 +92,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := a.Groups.Create(ctx, createReq) @@ -282,10 +278,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq iam.ListAccountGroupsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) @@ -305,9 +299,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -316,14 +307,6 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := a.Groups.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index 6323e0dd..48ebe9e9 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -147,9 +147,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -163,7 +160,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := a.LogDelivery.Create(ctx, createReq) @@ -278,10 +274,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq billing.ListLogDeliveryRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.CredentialsId, "credentials-id", listReq.CredentialsId, `Filter by credential configuration ID.`) cmd.Flags().Var(&listReq.Status, "status", `Filter by status ENABLED or DISABLED.`) @@ -298,9 +292,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -309,14 +300,6 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := a.LogDelivery.ListAll(ctx, listReq) if err != nil { return err @@ -355,10 +338,12 @@ func newPatchStatus() *cobra.Command { cmd := &cobra.Command{} var patchStatusReq billing.UpdateLogDeliveryConfigurationStatusRequest + var patchStatusJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&patchStatusJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "patch-status STATUS LOG_DELIVERY_CONFIGURATION_ID" + cmd.Use = "patch-status LOG_DELIVERY_CONFIGURATION_ID STATUS" cmd.Short = `Enable or disable log delivery configuration.` cmd.Long = `Enable or disable log delivery configuration. @@ -372,6 +357,9 @@ func newPatchStatus() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(2) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(1) + } return check(cmd, args) } @@ -380,11 +368,19 @@ func newPatchStatus() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - _, err = fmt.Sscan(args[0], &patchStatusReq.Status) - if err != nil { - return fmt.Errorf("invalid STATUS: %s", args[0]) + if cmd.Flags().Changed("json") { + err = patchStatusJson.Unmarshal(&patchStatusReq) + if err != nil { + return err + } + } + patchStatusReq.LogDeliveryConfigurationId = args[0] + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &patchStatusReq.Status) + if err != nil { + return fmt.Errorf("invalid STATUS: %s", args[1]) + } } - patchStatusReq.LogDeliveryConfigurationId = args[1] err = a.LogDelivery.PatchStatus(ctx, patchStatusReq) if err != nil { diff --git a/cmd/account/metastores/metastores.go b/cmd/account/metastores/metastores.go index 726b779d..30be31ba 100755 --- a/cmd/account/metastores/metastores.go +++ b/cmd/account/metastores/metastores.go @@ -64,9 +64,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -80,7 +77,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := a.Metastores.Create(ctx, createReq) diff --git a/cmd/account/networks/networks.go b/cmd/account/networks/networks.go index f481ffdb..74b3ffde 100755 --- a/cmd/account/networks/networks.go +++ b/cmd/account/networks/networks.go @@ -88,7 +88,8 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.NetworkName = args[0] } diff --git a/cmd/account/o-auth-enrollment/o-auth-enrollment.go b/cmd/account/o-auth-enrollment/o-auth-enrollment.go index 91fdfa0a..7ba2e59a 100755 --- a/cmd/account/o-auth-enrollment/o-auth-enrollment.go +++ b/cmd/account/o-auth-enrollment/o-auth-enrollment.go @@ -75,9 +75,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -91,7 +88,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } err = a.OAuthEnrollment.Create(ctx, createReq) diff --git a/cmd/account/o-auth-published-apps/o-auth-published-apps.go b/cmd/account/o-auth-published-apps/o-auth-published-apps.go index 640e8a4c..b611724d 100755 --- a/cmd/account/o-auth-published-apps/o-auth-published-apps.go +++ b/cmd/account/o-auth-published-apps/o-auth-published-apps.go @@ -5,7 +5,6 @@ package o_auth_published_apps import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/oauth2" "github.com/spf13/cobra" ) @@ -49,10 +48,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq oauth2.ListOAuthPublishedAppsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Int64Var(&listReq.PageSize, "page-size", listReq.PageSize, `The max number of OAuth published apps to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`) @@ -67,9 +64,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -78,14 +72,6 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := a.OAuthPublishedApps.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index 9cbc0929..094c030b 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -97,8 +97,11 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.PrivateAccessSettingsName = args[0] + } + if !cmd.Flags().Changed("json") { createReq.Region = args[1] } @@ -358,7 +361,7 @@ func newReplace() *cobra.Command { cmd.Flags().Var(&replaceReq.PrivateAccessLevel, "private-access-level", `The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object.`) cmd.Flags().BoolVar(&replaceReq.PublicAccessEnabled, "public-access-enabled", replaceReq.PublicAccessEnabled, `Determines if the workspace can be accessed over public internet.`) - cmd.Use = "replace PRIVATE_ACCESS_SETTINGS_NAME REGION PRIVATE_ACCESS_SETTINGS_ID" + cmd.Use = "replace PRIVATE_ACCESS_SETTINGS_ID PRIVATE_ACCESS_SETTINGS_NAME REGION" cmd.Short = `Replace private access settings.` cmd.Long = `Replace private access settings. @@ -388,6 +391,9 @@ func newReplace() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(3) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(1) + } return check(cmd, args) } @@ -402,9 +408,13 @@ func newReplace() *cobra.Command { return err } } - replaceReq.PrivateAccessSettingsName = args[0] - replaceReq.Region = args[1] - replaceReq.PrivateAccessSettingsId = args[2] + replaceReq.PrivateAccessSettingsId = args[0] + if !cmd.Flags().Changed("json") { + replaceReq.PrivateAccessSettingsName = args[1] + } + if !cmd.Flags().Changed("json") { + replaceReq.Region = args[2] + } err = a.PrivateAccess.Replace(ctx, replaceReq) if err != nil { diff --git a/cmd/account/published-app-integration/published-app-integration.go b/cmd/account/published-app-integration/published-app-integration.go index 9b29d53d..9fe4e6e8 100755 --- a/cmd/account/published-app-integration/published-app-integration.go +++ b/cmd/account/published-app-integration/published-app-integration.go @@ -69,9 +69,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -85,7 +82,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := a.PublishedAppIntegration.Create(ctx, createReq) diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index be210b35..06fc690c 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -77,9 +77,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -93,7 +90,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := a.ServicePrincipals.Create(ctx, createReq) @@ -281,10 +277,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq iam.ListAccountServicePrincipalsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) @@ -304,9 +298,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -315,14 +306,6 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := a.ServicePrincipals.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/account/settings/settings.go b/cmd/account/settings/settings.go index 4e98119d..09794488 100755 --- a/cmd/account/settings/settings.go +++ b/cmd/account/settings/settings.go @@ -197,9 +197,6 @@ func newUpdatePersonalComputeSetting() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -213,7 +210,6 @@ func newUpdatePersonalComputeSetting() *cobra.Command { if err != nil { return err } - } else { } response, err := a.Settings.UpdatePersonalComputeSetting(ctx, updatePersonalComputeSettingReq) diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index c826ab0d..97e9c31f 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -85,9 +85,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -101,7 +98,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := a.Users.Create(ctx, createReq) @@ -289,10 +285,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq iam.ListAccountUsersRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) @@ -312,9 +306,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -323,14 +314,6 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := a.Users.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/account/vpc-endpoints/vpc-endpoints.go b/cmd/account/vpc-endpoints/vpc-endpoints.go index 5112b48d..4cefe242 100755 --- a/cmd/account/vpc-endpoints/vpc-endpoints.go +++ b/cmd/account/vpc-endpoints/vpc-endpoints.go @@ -95,7 +95,8 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.VpcEndpointName = args[0] } diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 60142a8a..993e569f 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -116,7 +116,8 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.WorkspaceName = args[0] } diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 462b6450..d1b54452 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -93,7 +93,8 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] } diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index 1eab2fb3..05a1141d 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -250,10 +250,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq sharing.ListCleanRoomsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of clean rooms to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Pagination token to go to next page based on previous query.`) @@ -270,9 +268,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -281,14 +276,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.CleanRooms.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 0e309194..0bd7b4a9 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -108,7 +108,8 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] } @@ -271,8 +272,11 @@ func newEdit() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { editReq.PolicyId = args[0] + } + if !cmd.Flags().Changed("json") { editReq.Name = args[1] } @@ -534,10 +538,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq compute.ListClusterPoliciesRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&listReq.SortColumn, "sort-column", `The cluster policy attribute to sort by.`) cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order in which the policies get listed.`) @@ -552,9 +554,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -563,14 +562,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.ClusterPolicies.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index 07effe09..e4fb6e0a 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -107,8 +107,11 @@ func newChangeOwner() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { changeOwnerReq.ClusterId = args[0] + } + if !cmd.Flags().Changed("json") { changeOwnerReq.OwnerUsername = args[1] } @@ -221,7 +224,8 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.SparkVersion = args[0] } @@ -452,8 +456,11 @@ func newEdit() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { editReq.ClusterId = args[0] + } + if !cmd.Flags().Changed("json") { editReq.SparkVersion = args[1] } @@ -824,10 +831,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq compute.ListClustersRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.CanUseClient, "can-use-client", listReq.CanUseClient, `Filter clusters based on what type of client it can be used for.`) @@ -849,9 +854,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -860,14 +862,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Clusters.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index 2335ee28..8823ef53 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -58,20 +58,12 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "create NAME" + cmd.Use = "create" cmd.Short = `Create a dashboard object.` cmd.Long = `Create a dashboard object.` cmd.Annotations = make(map[string]string) - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } - return check(cmd, args) - } - cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -272,10 +264,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq sql.ListDashboardsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Var(&listReq.Order, "order", `Name of dashboard attribute to order by.`) cmd.Flags().IntVar(&listReq.Page, "page", listReq.Page, `Page number to retrieve.`) @@ -292,9 +282,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -303,14 +290,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Dashboards.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 13087029..ed807ae5 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -95,7 +95,8 @@ func newCreateExperiment() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createExperimentReq.Name = args[0] } @@ -160,9 +161,6 @@ func newCreateRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -176,7 +174,6 @@ func newCreateRun() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Experiments.CreateRun(ctx, createRunReq) @@ -250,7 +247,8 @@ func newDeleteExperiment() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { deleteExperimentReq.ExperimentId = args[0] } @@ -323,7 +321,8 @@ func newDeleteRun() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { deleteRunReq.RunId = args[0] } @@ -399,8 +398,11 @@ func newDeleteRuns() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { deleteRunsReq.ExperimentId = args[0] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[1], &deleteRunsReq.MaxTimestampMillis) if err != nil { return fmt.Errorf("invalid MAX_TIMESTAMP_MILLIS: %s", args[1]) @@ -477,8 +479,11 @@ func newDeleteTag() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { deleteTagReq.RunId = args[0] + } + if !cmd.Flags().Changed("json") { deleteTagReq.Key = args[1] } @@ -907,10 +912,8 @@ func newListArtifacts() *cobra.Command { cmd := &cobra.Command{} var listArtifactsReq ml.ListArtifactsRequest - var listArtifactsJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listArtifactsJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listArtifactsReq.PageToken, "page-token", listArtifactsReq.PageToken, `Token indicating the page of artifact results to fetch.`) cmd.Flags().StringVar(&listArtifactsReq.Path, "path", listArtifactsReq.Path, `Filter artifacts matching this path (a relative path from the root artifact directory).`) @@ -928,9 +931,6 @@ func newListArtifacts() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -939,14 +939,6 @@ func newListArtifacts() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listArtifactsJson.Unmarshal(&listArtifactsReq) - if err != nil { - return err - } - } else { - } - response, err := w.Experiments.ListArtifactsAll(ctx, listArtifactsReq) if err != nil { return err @@ -985,10 +977,8 @@ func newListExperiments() *cobra.Command { cmd := &cobra.Command{} var listExperimentsReq ml.ListExperimentsRequest - var listExperimentsJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listExperimentsJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().IntVar(&listExperimentsReq.MaxResults, "max-results", listExperimentsReq.MaxResults, `Maximum number of experiments desired.`) cmd.Flags().StringVar(&listExperimentsReq.PageToken, "page-token", listExperimentsReq.PageToken, `Token indicating the page of experiments to fetch.`) @@ -1004,9 +994,6 @@ func newListExperiments() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1015,14 +1002,6 @@ func newListExperiments() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listExperimentsJson.Unmarshal(&listExperimentsReq) - if err != nil { - return err - } - } else { - } - response, err := w.Experiments.ListExperimentsAll(ctx, listExperimentsReq) if err != nil { return err @@ -1117,9 +1096,6 @@ func newLogBatch() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1133,7 +1109,6 @@ func newLogBatch() *cobra.Command { if err != nil { return err } - } else { } err = w.Experiments.LogBatch(ctx, logBatchReq) @@ -1193,9 +1168,6 @@ func newLogInputs() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1209,7 +1181,6 @@ func newLogInputs() *cobra.Command { if err != nil { return err } - } else { } err = w.Experiments.LogInputs(ctx, logInputsReq) @@ -1287,12 +1258,17 @@ func newLogMetric() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { logMetricReq.Key = args[0] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[1], &logMetricReq.Value) if err != nil { return fmt.Errorf("invalid VALUE: %s", args[1]) } + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[2], &logMetricReq.Timestamp) if err != nil { return fmt.Errorf("invalid TIMESTAMP: %s", args[2]) @@ -1356,9 +1332,6 @@ func newLogModel() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1372,7 +1345,6 @@ func newLogModel() *cobra.Command { if err != nil { return err } - } else { } err = w.Experiments.LogModel(ctx, logModelReq) @@ -1450,8 +1422,11 @@ func newLogParam() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { logParamReq.Key = args[0] + } + if !cmd.Flags().Changed("json") { logParamReq.Value = args[1] } @@ -1529,7 +1504,8 @@ func newRestoreExperiment() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { restoreExperimentReq.ExperimentId = args[0] } @@ -1602,7 +1578,8 @@ func newRestoreRun() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { restoreRunReq.RunId = args[0] } @@ -1678,8 +1655,11 @@ func newRestoreRuns() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { restoreRunsReq.ExperimentId = args[0] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[1], &restoreRunsReq.MinTimestampMillis) if err != nil { return fmt.Errorf("invalid MIN_TIMESTAMP_MILLIS: %s", args[1]) @@ -1745,9 +1725,6 @@ func newSearchExperiments() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1761,7 +1738,6 @@ func newSearchExperiments() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Experiments.SearchExperimentsAll(ctx, searchExperimentsReq) @@ -1826,9 +1802,6 @@ func newSearchRuns() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1842,7 +1815,6 @@ func newSearchRuns() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Experiments.SearchRunsAll(ctx, searchRunsReq) @@ -1914,9 +1886,14 @@ func newSetExperimentTag() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { setExperimentTagReq.ExperimentId = args[0] + } + if !cmd.Flags().Changed("json") { setExperimentTagReq.Key = args[1] + } + if !cmd.Flags().Changed("json") { setExperimentTagReq.Value = args[2] } @@ -2065,8 +2042,11 @@ func newSetTag() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { setTagReq.Key = args[0] + } + if !cmd.Flags().Changed("json") { setTagReq.Value = args[1] } @@ -2141,7 +2121,8 @@ func newUpdateExperiment() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { updateExperimentReq.ExperimentId = args[0] } @@ -2275,9 +2256,6 @@ func newUpdateRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -2291,7 +2269,6 @@ func newUpdateRun() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Experiments.UpdateRun(ctx, updateRunReq) diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index fd1b44e4..d510d2a9 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -97,9 +97,14 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { createReq.Url = args[1] + } + if !cmd.Flags().Changed("json") { createReq.CredentialName = args[2] } diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index 02c8531d..911c6d14 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -355,8 +355,10 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq catalog.UpdateFunction + var updateJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of function.`) @@ -380,6 +382,12 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No NAME argument specified. Loading names for Functions drop-down." diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index 8d5c59ed..81348155 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -90,7 +90,8 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.GitProvider = args[0] } @@ -333,8 +334,10 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq workspace.UpdateCredentials + var updateJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.GitProvider, "git-provider", updateReq.GitProvider, `Git provider.`) cmd.Flags().StringVar(&updateReq.GitUsername, "git-username", updateReq.GitUsername, `Git username.`) @@ -353,6 +356,12 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No CREDENTIAL_ID argument specified. Loading names for Git Credentials drop-down." diff --git a/cmd/workspace/global-init-scripts/global-init-scripts.go b/cmd/workspace/global-init-scripts/global-init-scripts.go index 12c49a51..513b9637 100755 --- a/cmd/workspace/global-init-scripts/global-init-scripts.go +++ b/cmd/workspace/global-init-scripts/global-init-scripts.go @@ -90,8 +90,11 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { createReq.Script = args[1] } @@ -330,13 +333,15 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq compute.GlobalInitScriptUpdateRequest + var updateJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().BoolVar(&updateReq.Enabled, "enabled", updateReq.Enabled, `Specifies whether the script is enabled.`) cmd.Flags().IntVar(&updateReq.Position, "position", updateReq.Position, `The position of a script, where 0 represents the first script to run, 1 is the second script to run, in ascending order.`) - cmd.Use = "update NAME SCRIPT SCRIPT_ID" + cmd.Use = "update SCRIPT_ID NAME SCRIPT" cmd.Short = `Update init script.` cmd.Long = `Update init script. @@ -347,6 +352,9 @@ func newUpdate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(3) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(1) + } return check(cmd, args) } @@ -355,9 +363,19 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - updateReq.Name = args[0] - updateReq.Script = args[1] - updateReq.ScriptId = args[2] + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.ScriptId = args[0] + if !cmd.Flags().Changed("json") { + updateReq.Name = args[1] + } + if !cmd.Flags().Changed("json") { + updateReq.Script = args[2] + } err = w.GlobalInitScripts.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index 55d231fc..4ebf740d 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -79,9 +79,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -95,7 +92,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Groups.Create(ctx, createReq) @@ -282,10 +278,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq iam.ListGroupsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) @@ -305,9 +299,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -316,14 +307,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Groups.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index b03542c0..1109b921 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -108,8 +108,11 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.InstancePoolName = args[0] + } + if !cmd.Flags().Changed("json") { createReq.NodeTypeId = args[1] } @@ -270,9 +273,14 @@ func newEdit() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { editReq.InstancePoolId = args[0] + } + if !cmd.Flags().Changed("json") { editReq.InstancePoolName = args[1] + } + if !cmd.Flags().Changed("json") { editReq.NodeTypeId = args[2] } diff --git a/cmd/workspace/instance-profiles/instance-profiles.go b/cmd/workspace/instance-profiles/instance-profiles.go index 0922a5ae..b3fdfc65 100755 --- a/cmd/workspace/instance-profiles/instance-profiles.go +++ b/cmd/workspace/instance-profiles/instance-profiles.go @@ -87,7 +87,8 @@ func newAdd() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { addReq.InstanceProfileArn = args[0] } @@ -177,7 +178,8 @@ func newEdit() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { editReq.InstanceProfileArn = args[0] } @@ -303,7 +305,8 @@ func newRemove() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { removeReq.InstanceProfileArn = args[0] } diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index be0df694..7759539e 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -85,9 +85,6 @@ func newCancelAllRuns() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -101,7 +98,6 @@ func newCancelAllRuns() *cobra.Command { if err != nil { return err } - } else { } err = w.Jobs.CancelAllRuns(ctx, cancelAllRunsReq) @@ -265,14 +261,6 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } - return check(cmd, args) - } - cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -964,10 +952,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq jobs.ListJobsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().BoolVar(&listReq.ExpandTasks, "expand-tasks", listReq.ExpandTasks, `Whether to include task and cluster details in the response.`) cmd.Flags().IntVar(&listReq.Limit, "limit", listReq.Limit, `The number of jobs to return.`) @@ -985,9 +971,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -996,14 +979,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Jobs.ListAll(ctx, listReq) if err != nil { return err @@ -1042,10 +1017,8 @@ func newListRuns() *cobra.Command { cmd := &cobra.Command{} var listRunsReq jobs.ListRunsRequest - var listRunsJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listRunsJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().BoolVar(&listRunsReq.ActiveOnly, "active-only", listRunsReq.ActiveOnly, `If active_only is true, only active runs are included in the results; otherwise, lists both active and completed runs.`) cmd.Flags().BoolVar(&listRunsReq.CompletedOnly, "completed-only", listRunsReq.CompletedOnly, `If completed_only is true, only completed runs are included in the results; otherwise, lists both active and completed runs.`) @@ -1068,9 +1041,6 @@ func newListRuns() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1079,14 +1049,6 @@ func newListRuns() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listRunsJson.Unmarshal(&listRunsReq) - if err != nil { - return err - } - } else { - } - response, err := w.Jobs.ListRunsAll(ctx, listRunsReq) if err != nil { return err @@ -1556,9 +1518,6 @@ func newSubmit() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1572,7 +1531,6 @@ func newSubmit() *cobra.Command { if err != nil { return err } - } else { } wait, err := w.Jobs.Submit(ctx, submitReq) diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 27486988..92144ec7 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -60,10 +60,12 @@ func newAssign() *cobra.Command { cmd := &cobra.Command{} var assignReq catalog.CreateMetastoreAssignment + var assignJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&assignJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "assign METASTORE_ID DEFAULT_CATALOG_NAME WORKSPACE_ID" + cmd.Use = "assign WORKSPACE_ID METASTORE_ID DEFAULT_CATALOG_NAME" cmd.Short = `Create an assignment.` cmd.Long = `Create an assignment. @@ -75,6 +77,9 @@ func newAssign() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(3) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(1) + } return check(cmd, args) } @@ -83,11 +88,21 @@ func newAssign() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - assignReq.MetastoreId = args[0] - assignReq.DefaultCatalogName = args[1] - _, err = fmt.Sscan(args[2], &assignReq.WorkspaceId) + if cmd.Flags().Changed("json") { + err = assignJson.Unmarshal(&assignReq) + if err != nil { + return err + } + } + _, err = fmt.Sscan(args[0], &assignReq.WorkspaceId) if err != nil { - return fmt.Errorf("invalid WORKSPACE_ID: %s", args[2]) + return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) + } + if !cmd.Flags().Changed("json") { + assignReq.MetastoreId = args[1] + } + if !cmd.Flags().Changed("json") { + assignReq.DefaultCatalogName = args[2] } err = w.Metastores.Assign(ctx, assignReq) @@ -161,8 +176,11 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { createReq.StorageRoot = args[1] } @@ -361,8 +379,11 @@ func newEnableOptimization() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { enableOptimizationReq.MetastoreId = args[0] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[1], &enableOptimizationReq.Enable) if err != nil { return fmt.Errorf("invalid ENABLE: %s", args[1]) @@ -645,8 +666,10 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq catalog.UpdateMetastore + var updateJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.DeltaSharingOrganizationName, "delta-sharing-organization-name", updateReq.DeltaSharingOrganizationName, `The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name.`) cmd.Flags().Int64Var(&updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, "delta-sharing-recipient-token-lifetime-in-seconds", updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, `The lifetime of delta sharing recipient token in seconds.`) @@ -670,6 +693,12 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down." @@ -727,8 +756,10 @@ func newUpdateAssignment() *cobra.Command { cmd := &cobra.Command{} var updateAssignmentReq catalog.UpdateMetastoreAssignment + var updateAssignmentJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateAssignmentJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateAssignmentReq.DefaultCatalogName, "default-catalog-name", updateAssignmentReq.DefaultCatalogName, `The name of the default catalog for the metastore.`) cmd.Flags().StringVar(&updateAssignmentReq.MetastoreId, "metastore-id", updateAssignmentReq.MetastoreId, `The unique ID of the metastore.`) @@ -749,6 +780,12 @@ func newUpdateAssignment() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = updateAssignmentJson.Unmarshal(&updateAssignmentReq) + if err != nil { + return err + } + } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Metastores drop-down." diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index 4a84bca6..8da9e621 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -82,13 +82,20 @@ func newApproveTransitionRequest() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { approveTransitionRequestReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { approveTransitionRequestReq.Version = args[1] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[2], &approveTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) } + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[3], &approveTransitionRequestReq.ArchiveExistingVersions) if err != nil { return fmt.Errorf("invalid ARCHIVE_EXISTING_VERSIONS: %s", args[3]) @@ -166,9 +173,14 @@ func newCreateComment() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createCommentReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { createCommentReq.Version = args[1] + } + if !cmd.Flags().Changed("json") { createCommentReq.Comment = args[2] } @@ -247,7 +259,8 @@ func newCreateModel() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createModelReq.Name = args[0] } @@ -325,8 +338,11 @@ func newCreateModelVersion() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createModelVersionReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { createModelVersionReq.Source = args[1] } @@ -401,9 +417,14 @@ func newCreateTransitionRequest() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createTransitionRequestReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { createTransitionRequestReq.Version = args[1] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[2], &createTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) @@ -899,10 +920,8 @@ func newDeleteWebhook() *cobra.Command { cmd := &cobra.Command{} var deleteWebhookReq ml.DeleteWebhookRequest - var deleteWebhookJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&deleteWebhookJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&deleteWebhookReq.Id, "id", deleteWebhookReq.Id, `Webhook ID required to delete a registry webhook.`) @@ -918,9 +937,6 @@ func newDeleteWebhook() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -929,14 +945,6 @@ func newDeleteWebhook() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = deleteWebhookJson.Unmarshal(&deleteWebhookReq) - if err != nil { - return err - } - } else { - } - err = w.ModelRegistry.DeleteWebhook(ctx, deleteWebhookReq) if err != nil { return err @@ -1008,7 +1016,8 @@ func newGetLatestVersions() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { getLatestVersionsReq.Name = args[0] } @@ -1362,10 +1371,8 @@ func newListModels() *cobra.Command { cmd := &cobra.Command{} var listModelsReq ml.ListModelsRequest - var listModelsJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listModelsJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().IntVar(&listModelsReq.MaxResults, "max-results", listModelsReq.MaxResults, `Maximum number of registered models desired.`) cmd.Flags().StringVar(&listModelsReq.PageToken, "page-token", listModelsReq.PageToken, `Pagination token to go to the next page based on a previous query.`) @@ -1381,9 +1388,6 @@ func newListModels() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1392,14 +1396,6 @@ func newListModels() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listModelsJson.Unmarshal(&listModelsReq) - if err != nil { - return err - } - } else { - } - response, err := w.ModelRegistry.ListModelsAll(ctx, listModelsReq) if err != nil { return err @@ -1500,10 +1496,8 @@ func newListWebhooks() *cobra.Command { cmd := &cobra.Command{} var listWebhooksReq ml.ListWebhooksRequest - var listWebhooksJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listWebhooksJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: events cmd.Flags().StringVar(&listWebhooksReq.ModelName, "model-name", listWebhooksReq.ModelName, `If not specified, all webhooks associated with the specified events are listed, regardless of their associated model.`) @@ -1521,9 +1515,6 @@ func newListWebhooks() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1532,14 +1523,6 @@ func newListWebhooks() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listWebhooksJson.Unmarshal(&listWebhooksReq) - if err != nil { - return err - } - } else { - } - response, err := w.ModelRegistry.ListWebhooksAll(ctx, listWebhooksReq) if err != nil { return err @@ -1611,9 +1594,14 @@ func newRejectTransitionRequest() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { rejectTransitionRequestReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { rejectTransitionRequestReq.Version = args[1] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[2], &rejectTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) @@ -1691,7 +1679,8 @@ func newRenameModel() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { renameModelReq.Name = args[0] } @@ -1733,10 +1722,8 @@ func newSearchModelVersions() *cobra.Command { cmd := &cobra.Command{} var searchModelVersionsReq ml.SearchModelVersionsRequest - var searchModelVersionsJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&searchModelVersionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&searchModelVersionsReq.Filter, "filter", searchModelVersionsReq.Filter, `String filter condition, like "name='my-model-name'".`) cmd.Flags().IntVar(&searchModelVersionsReq.MaxResults, "max-results", searchModelVersionsReq.MaxResults, `Maximum number of models desired.`) @@ -1753,9 +1740,6 @@ func newSearchModelVersions() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1764,14 +1748,6 @@ func newSearchModelVersions() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = searchModelVersionsJson.Unmarshal(&searchModelVersionsReq) - if err != nil { - return err - } - } else { - } - response, err := w.ModelRegistry.SearchModelVersionsAll(ctx, searchModelVersionsReq) if err != nil { return err @@ -1810,10 +1786,8 @@ func newSearchModels() *cobra.Command { cmd := &cobra.Command{} var searchModelsReq ml.SearchModelsRequest - var searchModelsJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&searchModelsJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&searchModelsReq.Filter, "filter", searchModelsReq.Filter, `String filter condition, like "name LIKE 'my-model-name'".`) cmd.Flags().IntVar(&searchModelsReq.MaxResults, "max-results", searchModelsReq.MaxResults, `Maximum number of models desired.`) @@ -1830,9 +1804,6 @@ func newSearchModels() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -1841,14 +1812,6 @@ func newSearchModels() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = searchModelsJson.Unmarshal(&searchModelsReq) - if err != nil { - return err - } - } else { - } - response, err := w.ModelRegistry.SearchModelsAll(ctx, searchModelsReq) if err != nil { return err @@ -1918,9 +1881,14 @@ func newSetModelTag() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { setModelTagReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { setModelTagReq.Key = args[1] + } + if !cmd.Flags().Changed("json") { setModelTagReq.Value = args[2] } @@ -1993,10 +1961,17 @@ func newSetModelVersionTag() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { setModelVersionTagReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { setModelVersionTagReq.Version = args[1] + } + if !cmd.Flags().Changed("json") { setModelVersionTagReq.Key = args[2] + } + if !cmd.Flags().Changed("json") { setModelVersionTagReq.Value = args[3] } @@ -2145,7 +2120,8 @@ func newTestRegistryWebhook() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { testRegistryWebhookReq.Id = args[0] } @@ -2224,13 +2200,20 @@ func newTransitionStage() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { transitionStageReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { transitionStageReq.Version = args[1] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[2], &transitionStageReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) } + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[3], &transitionStageReq.ArchiveExistingVersions) if err != nil { return fmt.Errorf("invalid ARCHIVE_EXISTING_VERSIONS: %s", args[3]) @@ -2306,8 +2289,11 @@ func newUpdateComment() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { updateCommentReq.Id = args[0] + } + if !cmd.Flags().Changed("json") { updateCommentReq.Comment = args[1] } @@ -2382,7 +2368,8 @@ func newUpdateModel() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { updateModelReq.Name = args[0] } @@ -2457,8 +2444,11 @@ func newUpdateModelVersion() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { updateModelVersionReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { updateModelVersionReq.Version = args[1] } @@ -2611,7 +2601,8 @@ func newUpdateWebhook() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { updateWebhookReq.Id = args[0] } diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index f62cddab..67583a6a 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/spf13/cobra" ) @@ -336,8 +337,10 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq catalog.UpdateModelVersionRequest + var updateJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the model version.`) @@ -366,6 +369,12 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } updateReq.FullName = args[0] _, err = fmt.Sscan(args[1], &updateReq.Version) if err != nil { diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 8c1cf4f4..dd370905 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -76,14 +76,6 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } - return check(cmd, args) - } - cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -494,10 +486,8 @@ func newListPipelineEvents() *cobra.Command { cmd := &cobra.Command{} var listPipelineEventsReq pipelines.ListPipelineEventsRequest - var listPipelineEventsJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listPipelineEventsJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listPipelineEventsReq.Filter, "filter", listPipelineEventsReq.Filter, `Criteria to select a subset of results, expressed using a SQL-like syntax.`) cmd.Flags().IntVar(&listPipelineEventsReq.MaxResults, "max-results", listPipelineEventsReq.MaxResults, `Max number of entries to return in a single page.`) @@ -517,12 +507,6 @@ func newListPipelineEvents() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listPipelineEventsJson.Unmarshal(&listPipelineEventsReq) - if err != nil { - return err - } - } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down." @@ -580,10 +564,8 @@ func newListPipelines() *cobra.Command { cmd := &cobra.Command{} var listPipelinesReq pipelines.ListPipelinesRequest - var listPipelinesJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listPipelinesJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listPipelinesReq.Filter, "filter", listPipelinesReq.Filter, `Select a subset of results based on the specified criteria.`) cmd.Flags().IntVar(&listPipelinesReq.MaxResults, "max-results", listPipelinesReq.MaxResults, `The maximum number of entries to return in a single page.`) @@ -600,9 +582,6 @@ func newListPipelines() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -611,14 +590,6 @@ func newListPipelines() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listPipelinesJson.Unmarshal(&listPipelinesReq) - if err != nil { - return err - } - } else { - } - response, err := w.Pipelines.ListPipelinesAll(ctx, listPipelinesReq) if err != nil { return err diff --git a/cmd/workspace/policy-families/policy-families.go b/cmd/workspace/policy-families/policy-families.go index 532317f7..75ab862a 100755 --- a/cmd/workspace/policy-families/policy-families.go +++ b/cmd/workspace/policy-families/policy-families.go @@ -5,7 +5,6 @@ package policy_families import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/spf13/cobra" ) @@ -115,10 +114,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq compute.ListPolicyFamiliesRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().Int64Var(&listReq.MaxResults, "max-results", listReq.MaxResults, `The max number of policy families to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`) @@ -133,9 +130,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -144,14 +138,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.PolicyFamilies.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index d57451cb..69a16725 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -85,8 +85,11 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[1], &createReq.AuthenticationType) if err != nil { return fmt.Errorf("invalid AUTHENTICATION_TYPE: %s", args[1]) @@ -280,10 +283,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq sharing.ListProvidersRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.DataProviderGlobalMetastoreId, "data-provider-global-metastore-id", listReq.DataProviderGlobalMetastoreId, `If not provided, all providers will be returned.`) @@ -300,9 +301,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -311,14 +309,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Providers.ListAll(ctx, listReq) if err != nil { return err @@ -432,8 +422,10 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq sharing.UpdateProvider + var updateJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Description about the provider.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the Provider.`) @@ -456,6 +448,12 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No NAME argument specified. Loading names for Providers drop-down." diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index 3512adaa..6ac80000 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -72,14 +72,6 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } - return check(cmd, args) - } - cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -281,10 +273,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq sql.ListQueriesRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.Order, "order", listReq.Order, `Name of query attribute to order by.`) cmd.Flags().IntVar(&listReq.Page, "page", listReq.Page, `Page number to retrieve.`) @@ -302,9 +292,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -313,14 +300,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Queries.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/query-history/query-history.go b/cmd/workspace/query-history/query-history.go index 1593d676..337ab403 100755 --- a/cmd/workspace/query-history/query-history.go +++ b/cmd/workspace/query-history/query-history.go @@ -5,7 +5,6 @@ package query_history import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/sql" "github.com/spf13/cobra" ) @@ -46,10 +45,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq sql.ListQueryHistoryRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: filter_by cmd.Flags().BoolVar(&listReq.IncludeMetrics, "include-metrics", listReq.IncludeMetrics, `Whether to include metrics about query.`) @@ -68,9 +65,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -79,14 +73,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.QueryHistory.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/query-visualizations/query-visualizations.go b/cmd/workspace/query-visualizations/query-visualizations.go index fae0f934..11630084 100755 --- a/cmd/workspace/query-visualizations/query-visualizations.go +++ b/cmd/workspace/query-visualizations/query-visualizations.go @@ -180,20 +180,12 @@ func newUpdate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "update ID" + cmd.Use = "update" cmd.Short = `Edit existing visualization.` cmd.Long = `Edit existing visualization.` cmd.Annotations = make(map[string]string) - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } - return check(cmd, args) - } - cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 736daab0..9169e5ff 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -104,8 +104,11 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[1], &createReq.AuthenticationType) if err != nil { return fmt.Errorf("invalid AUTHENTICATION_TYPE: %s", args[1]) @@ -299,10 +302,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq sharing.ListRecipientsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.DataRecipientGlobalMetastoreId, "data-recipient-global-metastore-id", listReq.DataRecipientGlobalMetastoreId, `If not provided, all recipients will be returned.`) @@ -319,9 +320,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -330,14 +328,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Recipients.ListAll(ctx, listReq) if err != nil { return err @@ -376,10 +366,12 @@ func newRotateToken() *cobra.Command { cmd := &cobra.Command{} var rotateTokenReq sharing.RotateRecipientToken + var rotateTokenJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&rotateTokenJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "rotate-token EXISTING_TOKEN_EXPIRE_IN_SECONDS NAME" + cmd.Use = "rotate-token NAME EXISTING_TOKEN_EXPIRE_IN_SECONDS" cmd.Short = `Rotate a token.` cmd.Long = `Rotate a token. @@ -390,6 +382,9 @@ func newRotateToken() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(2) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(1) + } return check(cmd, args) } @@ -398,11 +393,19 @@ func newRotateToken() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - _, err = fmt.Sscan(args[0], &rotateTokenReq.ExistingTokenExpireInSeconds) - if err != nil { - return fmt.Errorf("invalid EXISTING_TOKEN_EXPIRE_IN_SECONDS: %s", args[0]) + if cmd.Flags().Changed("json") { + err = rotateTokenJson.Unmarshal(&rotateTokenReq) + if err != nil { + return err + } + } + rotateTokenReq.Name = args[0] + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &rotateTokenReq.ExistingTokenExpireInSeconds) + if err != nil { + return fmt.Errorf("invalid EXISTING_TOKEN_EXPIRE_IN_SECONDS: %s", args[1]) + } } - rotateTokenReq.Name = args[1] response, err := w.Recipients.RotateToken(ctx, rotateTokenReq) if err != nil { diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index d914e1b6..64f40e17 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -121,9 +121,14 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.CatalogName = args[0] + } + if !cmd.Flags().Changed("json") { createReq.SchemaName = args[1] + } + if !cmd.Flags().Changed("json") { createReq.Name = args[2] } @@ -389,10 +394,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq catalog.ListRegisteredModelsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.CatalogName, "catalog-name", listReq.CatalogName, `The identifier of the catalog under which to list registered models.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Max number of registered models to return.`) @@ -420,9 +423,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -431,14 +431,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.RegisteredModels.ListAll(ctx, listReq) if err != nil { return err @@ -477,8 +469,10 @@ func newSetAlias() *cobra.Command { cmd := &cobra.Command{} var setAliasReq catalog.SetRegisteredModelAliasRequest + var setAliasJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&setAliasJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Use = "set-alias FULL_NAME ALIAS VERSION_NUM" cmd.Short = `Set a Registered Model Alias.` @@ -495,6 +489,9 @@ func newSetAlias() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(3) + if cmd.Flags().Changed("json") { + check = cobra.ExactArgs(2) + } return check(cmd, args) } @@ -503,11 +500,19 @@ func newSetAlias() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = setAliasJson.Unmarshal(&setAliasReq) + if err != nil { + return err + } + } setAliasReq.FullName = args[0] setAliasReq.Alias = args[1] - _, err = fmt.Sscan(args[2], &setAliasReq.VersionNum) - if err != nil { - return fmt.Errorf("invalid VERSION_NUM: %s", args[2]) + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &setAliasReq.VersionNum) + if err != nil { + return fmt.Errorf("invalid VERSION_NUM: %s", args[2]) + } } response, err := w.RegisteredModels.SetAlias(ctx, setAliasReq) @@ -548,8 +553,10 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq catalog.UpdateRegisteredModelRequest + var updateJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the registered model.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the registered model.`) @@ -576,6 +583,12 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No FULL_NAME argument specified. Loading names for Registered Models drop-down." diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 2d510e90..1a2a43b4 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -93,8 +93,11 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Url = args[0] + } + if !cmd.Flags().Changed("json") { createReq.Provider = args[1] } @@ -435,10 +438,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq workspace.ListReposRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.NextPageToken, "next-page-token", listReq.NextPageToken, `Token used to get the next page of results.`) cmd.Flags().StringVar(&listReq.PathPrefix, "path-prefix", listReq.PathPrefix, `Filters repos that have paths starting with the given path prefix.`) @@ -454,9 +455,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -465,14 +463,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Repos.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index fddf986d..70d8b633 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -89,8 +89,11 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { createReq.CatalogName = args[1] } diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index 5425da90..9715d390 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -96,7 +96,8 @@ func newCreateScope() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createScopeReq.Scope = args[0] } @@ -174,8 +175,11 @@ func newDeleteAcl() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { deleteAclReq.Scope = args[0] + } + if !cmd.Flags().Changed("json") { deleteAclReq.Principal = args[1] } @@ -252,7 +256,8 @@ func newDeleteScope() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { deleteScopeReq.Scope = args[0] } @@ -330,8 +335,11 @@ func newDeleteSecret() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { deleteSecretReq.Scope = args[0] + } + if !cmd.Flags().Changed("json") { deleteSecretReq.Key = args[1] } @@ -754,9 +762,14 @@ func newPutAcl() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { putAclReq.Scope = args[0] + } + if !cmd.Flags().Changed("json") { putAclReq.Principal = args[1] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[2], &putAclReq.Permission) if err != nil { return fmt.Errorf("invalid PERMISSION: %s", args[2]) diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index baeec349..e78e0062 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -77,9 +77,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -93,7 +90,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := w.ServicePrincipals.Create(ctx, createReq) @@ -281,10 +277,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq iam.ListServicePrincipalsRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) @@ -304,9 +298,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -315,14 +306,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.ServicePrincipals.ListAll(ctx, listReq) if err != nil { return err diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 71a682a4..3ef9a7e0 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -194,9 +194,6 @@ func newUpdateDefaultWorkspaceNamespace() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -210,7 +207,6 @@ func newUpdateDefaultWorkspaceNamespace() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Settings.UpdateDefaultWorkspaceNamespace(ctx, updateDefaultWorkspaceNamespaceReq) diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index b542196d..c8cab3b7 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -85,7 +85,8 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] } diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index fb0ae475..00c0c215 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -99,7 +99,8 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.Name = args[0] } @@ -471,9 +472,6 @@ func newValidate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -487,7 +485,6 @@ func newValidate() *cobra.Command { if err != nil { return err } - } else { } response, err := w.StorageCredentials.Validate(ctx, validateReq) diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index 53a153fc..a7375f97 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/spf13/cobra" ) @@ -370,8 +371,10 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq catalog.UpdateTableRequest + var updateJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, ``) @@ -395,6 +398,12 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No FULL_NAME argument specified. Loading names for Tables drop-down." diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index b934e264..5d34a2c7 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -83,8 +83,11 @@ func newCreateOboToken() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createOboTokenReq.ApplicationId = args[0] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[1], &createOboTokenReq.LifetimeSeconds) if err != nil { return fmt.Errorf("invalid LIFETIME_SECONDS: %s", args[1]) @@ -372,10 +375,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq settings.ListTokenManagementRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.CreatedById, "created-by-id", listReq.CreatedById, `User ID of the user that created the token.`) cmd.Flags().StringVar(&listReq.CreatedByUsername, "created-by-username", listReq.CreatedByUsername, `Username of the user that created the token.`) @@ -390,9 +391,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -401,14 +399,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.TokenManagement.ListAll(ctx, listReq) if err != nil { return err @@ -465,9 +455,6 @@ func newSetPermissions() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -481,7 +468,6 @@ func newSetPermissions() *cobra.Command { if err != nil { return err } - } else { } response, err := w.TokenManagement.SetPermissions(ctx, setPermissionsReq) @@ -540,9 +526,6 @@ func newUpdatePermissions() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -556,7 +539,6 @@ func newUpdatePermissions() *cobra.Command { if err != nil { return err } - } else { } response, err := w.TokenManagement.UpdatePermissions(ctx, updatePermissionsReq) diff --git a/cmd/workspace/tokens/tokens.go b/cmd/workspace/tokens/tokens.go index eee64c97..dad790c5 100755 --- a/cmd/workspace/tokens/tokens.go +++ b/cmd/workspace/tokens/tokens.go @@ -70,9 +70,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -86,7 +83,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Tokens.Create(ctx, createReq) diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index 6fe4b4f6..ebf319fa 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -85,9 +85,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -101,7 +98,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Users.Create(ctx, createReq) @@ -386,10 +382,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq iam.ListUsersRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) @@ -409,9 +403,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -420,14 +411,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Users.ListAll(ctx, listReq) if err != nil { return err @@ -569,9 +552,6 @@ func newSetPermissions() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -585,7 +565,6 @@ func newSetPermissions() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Users.SetPermissions(ctx, setPermissionsReq) @@ -736,9 +715,6 @@ func newUpdatePermissions() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -752,7 +728,6 @@ func newUpdatePermissions() *cobra.Command { if err != nil { return err } - } else { } response, err := w.Users.UpdatePermissions(ctx, updatePermissionsReq) diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 4dbfc585..d443cea9 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -106,10 +106,17 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { createReq.CatalogName = args[0] + } + if !cmd.Flags().Changed("json") { createReq.SchemaName = args[1] + } + if !cmd.Flags().Changed("json") { createReq.Name = args[2] + } + if !cmd.Flags().Changed("json") { _, err = fmt.Sscan(args[3], &createReq.VolumeType) if err != nil { return fmt.Errorf("invalid VOLUME_TYPE: %s", args[3]) @@ -384,8 +391,10 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq catalog.UpdateVolumeRequestContent + var updateJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the volume.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the volume.`) @@ -411,6 +420,12 @@ func newUpdate() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down." diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index c64e0e0b..c7930e29 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -85,9 +85,6 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -101,7 +98,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { } wait, err := w.Warehouses.Create(ctx, createReq) @@ -627,10 +623,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} var listReq sql.ListWarehousesRequest - var listJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().IntVar(&listReq.RunAsUserId, "run-as-user-id", listReq.RunAsUserId, `Service Principal which will be used to fetch the list of warehouses.`) @@ -644,9 +638,6 @@ func newList() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -655,14 +646,6 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = listJson.Unmarshal(&listReq) - if err != nil { - return err - } - } else { - } - response, err := w.Warehouses.ListAll(ctx, listReq) if err != nil { return err @@ -811,9 +794,6 @@ func newSetWorkspaceWarehouseConfig() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -827,7 +807,6 @@ func newSetWorkspaceWarehouseConfig() *cobra.Command { if err != nil { return err } - } else { } err = w.Warehouses.SetWorkspaceWarehouseConfig(ctx, setWorkspaceWarehouseConfigReq) diff --git a/cmd/workspace/workspace-conf/workspace-conf.go b/cmd/workspace/workspace-conf/workspace-conf.go index d828f66e..687c31ad 100755 --- a/cmd/workspace/workspace-conf/workspace-conf.go +++ b/cmd/workspace/workspace-conf/workspace-conf.go @@ -5,7 +5,6 @@ package workspace_conf import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/settings" "github.com/spf13/cobra" ) @@ -107,10 +106,8 @@ func newSetStatus() *cobra.Command { cmd := &cobra.Command{} var setStatusReq settings.WorkspaceConf - var setStatusJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&setStatusJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Use = "set-status" cmd.Short = `Enable/disable features.` @@ -123,9 +120,6 @@ func newSetStatus() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { check := cobra.ExactArgs(0) - if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) - } return check(cmd, args) } @@ -134,14 +128,6 @@ func newSetStatus() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = setStatusJson.Unmarshal(&setStatusReq) - if err != nil { - return err - } - } else { - } - err = w.WorkspaceConf.SetStatus(ctx, setStatusReq) if err != nil { return err diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 2541c8e3..4af888ac 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -453,7 +453,8 @@ func newImport() *cobra.Command { if err != nil { return err } - } else { + } + if !cmd.Flags().Changed("json") { importReq.Path = args[0] } From 4a09ffc1ec5675b9d072428534de830a6c108121 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 25 Oct 2023 11:37:25 +0200 Subject: [PATCH 184/310] Add support for multiline descriptions when using template enums (#916) ## Changes This PR splits the question prompt at the last new line character to make multiline selection prompts work with `promptui` ## Tests Tested manually https://github.com/databricks/cli/assets/88374338/027e5210-f7f4-479d-98df-744d15b7a8fb --- libs/cmdio/logger.go | 20 ++++++++++++++++++-- libs/cmdio/logger_test.go | 22 ++++++++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/libs/cmdio/logger.go b/libs/cmdio/logger.go index 7d760b99..45b1883c 100644 --- a/libs/cmdio/logger.go +++ b/libs/cmdio/logger.go @@ -113,18 +113,34 @@ func AskSelect(ctx context.Context, question string, choices []string) (string, return logger.AskSelect(question, choices) } +func splitAtLastNewLine(s string) (string, string) { + // Split at the newline character + if i := strings.LastIndex(s, "\n"); i != -1 { + return s[:i+1], s[i+1:] + } + // Return the original string if no newline found + return "", s +} + func (l *Logger) AskSelect(question string, choices []string) (string, error) { if l.Mode == flags.ModeJson { return "", fmt.Errorf("question prompts are not supported in json mode") } + // Promptui does not support multiline prompts. So we split the question. + first, last := splitAtLastNewLine(question) + _, err := l.Writer.Write([]byte(first)) + if err != nil { + return "", err + } + prompt := promptui.Select{ - Label: question, + Label: last, Items: choices, HideHelp: true, Templates: &promptui.SelectTemplates{ Label: "{{.}}: ", - Selected: fmt.Sprintf("%s: {{.}}", question), + Selected: fmt.Sprintf("%s: {{.}}", last), }, } diff --git a/libs/cmdio/logger_test.go b/libs/cmdio/logger_test.go index c5c00d02..2aecfbda 100644 --- a/libs/cmdio/logger_test.go +++ b/libs/cmdio/logger_test.go @@ -21,3 +21,25 @@ func TestAskChoiceFailsInJsonMode(t *testing.T) { _, err := AskSelect(ctx, "what is a question?", []string{"b", "c", "a"}) assert.EqualError(t, err, "question prompts are not supported in json mode") } + +func TestSplitAtLastNewLine(t *testing.T) { + first, last := splitAtLastNewLine("hello\nworld") + assert.Equal(t, "hello\n", first) + assert.Equal(t, "world", last) + + first, last = splitAtLastNewLine("hello\r\nworld") + assert.Equal(t, "hello\r\n", first) + assert.Equal(t, "world", last) + + first, last = splitAtLastNewLine("hello world") + assert.Equal(t, "", first) + assert.Equal(t, "hello world", last) + + first, last = splitAtLastNewLine("hello\nworld\n") + assert.Equal(t, "hello\nworld\n", first) + assert.Equal(t, "", last) + + first, last = splitAtLastNewLine("\nhello world") + assert.Equal(t, "\n", first) + assert.Equal(t, "hello world", last) +} From 486bf596279bd28079c03d79ef024a134b45b459 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 25 Oct 2023 11:54:39 +0200 Subject: [PATCH 185/310] Move bundle configuration filename code (#917) ## Changes This is unrelated to the config root so belongs in a separate file (this was added in #580). ## Tests n/a --- bundle/config/filename.go | 43 +++++++++++++++++++++ bundle/config/filename_test.go | 70 ++++++++++++++++++++++++++++++++++ bundle/config/root.go | 30 --------------- bundle/config/root_test.go | 63 ------------------------------ 4 files changed, 113 insertions(+), 93 deletions(-) create mode 100644 bundle/config/filename.go create mode 100644 bundle/config/filename_test.go diff --git a/bundle/config/filename.go b/bundle/config/filename.go new file mode 100644 index 00000000..11af34d9 --- /dev/null +++ b/bundle/config/filename.go @@ -0,0 +1,43 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" +) + +type ConfigFileNames []string + +// FileNames contains allowed names of root bundle configuration files. +var FileNames = ConfigFileNames{ + "databricks.yml", + "databricks.yaml", + "bundle.yml", + "bundle.yaml", +} + +func (c ConfigFileNames) FindInPath(path string) (string, error) { + result := "" + var firstErr error + + for _, file := range c { + filePath := filepath.Join(path, file) + _, err := os.Stat(filePath) + if err == nil { + if result != "" { + return "", fmt.Errorf("multiple bundle root configuration files found in %s", path) + } + result = filePath + } else { + if firstErr == nil { + firstErr = err + } + } + } + + if result == "" { + return "", firstErr + } + + return result, nil +} diff --git a/bundle/config/filename_test.go b/bundle/config/filename_test.go new file mode 100644 index 00000000..9d71fa7e --- /dev/null +++ b/bundle/config/filename_test.go @@ -0,0 +1,70 @@ +package config + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConfigFileNames_FindInPath(t *testing.T) { + testCases := []struct { + name string + files []string + expected string + err string + }{ + { + name: "file found", + files: []string{"databricks.yml"}, + expected: "BASE/databricks.yml", + err: "", + }, + { + name: "file found", + files: []string{"bundle.yml"}, + expected: "BASE/bundle.yml", + err: "", + }, + { + name: "multiple files found", + files: []string{"databricks.yaml", "bundle.yml"}, + expected: "", + err: "multiple bundle root configuration files found", + }, + { + name: "file not found", + files: []string{}, + expected: "", + err: "no such file or directory", + }, + } + + if runtime.GOOS == "windows" { + testCases[3].err = "The system cannot find the file specified." + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + projectDir := t.TempDir() + for _, file := range tc.files { + f1, _ := os.Create(filepath.Join(projectDir, file)) + f1.Close() + } + + result, err := FileNames.FindInPath(projectDir) + + expected := strings.Replace(tc.expected, "BASE/", projectDir+string(os.PathSeparator), 1) + assert.Equal(t, expected, result) + + if tc.err != "" { + assert.ErrorContains(t, err, tc.err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/bundle/config/root.go b/bundle/config/root.go index bf203833..25d5ce5f 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -12,36 +12,6 @@ import ( "github.com/imdario/mergo" ) -type ConfigFileNames []string - -// FileNames contains allowed names of bundle configuration files. -var FileNames = ConfigFileNames{"databricks.yml", "databricks.yaml", "bundle.yml", "bundle.yaml"} - -func (c ConfigFileNames) FindInPath(path string) (string, error) { - result := "" - var firstErr error - - for _, file := range c { - filePath := filepath.Join(path, file) - _, err := os.Stat(filePath) - if err == nil { - if result != "" { - return "", fmt.Errorf("multiple bundle root configuration files found in %s", path) - } - result = filePath - } else { - if firstErr == nil { - firstErr = err - } - } - } - - if result == "" { - return "", firstErr - } - return result, nil -} - type Root struct { // Path contains the directory path to the root of the bundle. // It is set when loading `databricks.yml`. diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index 6e263667..b0dfc3ec 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -2,11 +2,7 @@ package config import ( "encoding/json" - "os" - "path/filepath" "reflect" - "runtime" - "strings" "testing" "github.com/databricks/cli/bundle/config/variable" @@ -167,62 +163,3 @@ func TestRootMergeTargetOverridesWithMode(t *testing.T) { require.NoError(t, root.MergeTargetOverrides(env)) assert.Equal(t, Development, root.Bundle.Mode) } - -func TestConfigFileNames_FindInPath(t *testing.T) { - testCases := []struct { - name string - files []string - expected string - err string - }{ - { - name: "file found", - files: []string{"databricks.yml"}, - expected: "BASE/databricks.yml", - err: "", - }, - { - name: "file found", - files: []string{"bundle.yml"}, - expected: "BASE/bundle.yml", - err: "", - }, - { - name: "multiple files found", - files: []string{"databricks.yaml", "bundle.yml"}, - expected: "", - err: "multiple bundle root configuration files found", - }, - { - name: "file not found", - files: []string{}, - expected: "", - err: "no such file or directory", - }, - } - - if runtime.GOOS == "windows" { - testCases[3].err = "The system cannot find the file specified." - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - projectDir := t.TempDir() - for _, file := range tc.files { - f1, _ := os.Create(filepath.Join(projectDir, file)) - f1.Close() - } - - result, err := FileNames.FindInPath(projectDir) - - expected := strings.Replace(tc.expected, "BASE/", projectDir+string(os.PathSeparator), 1) - assert.Equal(t, expected, result) - - if tc.err != "" { - assert.ErrorContains(t, err, tc.err) - } else { - assert.NoError(t, err) - } - }) - } -} From a60c40e71ecf9463f11cf3e98ddc8bb4fa3edf96 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 25 Oct 2023 13:56:42 +0200 Subject: [PATCH 186/310] Add configuration normalization code (#915) ## Changes This is similar to #904 but instead of converting the dynamic configuration to Go structs, this normalizes a `config.Value` according to the type of a Go struct and returns the new, normalized `config.Value`. This will be used to ensure that two `config.Value` trees are type-compatible before we can merge them (i.e. instances from different files). Warnings and errors during normalization are accumulated and returned as a `diag.Diagnostics` structure. We can use this to surface warnings about unknown fields, or errors about invalid types, in aggregate instead of one-by-one. This approach is inspired by the pattern to accumulate diagnostics in Terraform provider code. ## Tests New unit tests. --- libs/config/convert/normalize.go | 235 ++++++++++++++ libs/config/convert/normalize_test.go | 435 ++++++++++++++++++++++++++ libs/diag/diagnostic.go | 76 +++++ libs/diag/severity.go | 9 + 4 files changed, 755 insertions(+) create mode 100644 libs/config/convert/normalize.go create mode 100644 libs/config/convert/normalize_test.go create mode 100644 libs/diag/diagnostic.go create mode 100644 libs/diag/severity.go diff --git a/libs/config/convert/normalize.go b/libs/config/convert/normalize.go new file mode 100644 index 00000000..d7d2b1df --- /dev/null +++ b/libs/config/convert/normalize.go @@ -0,0 +1,235 @@ +package convert + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/diag" +) + +func Normalize(dst any, src config.Value) (config.Value, diag.Diagnostics) { + return normalizeType(reflect.TypeOf(dst), src) +} + +func normalizeType(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { + for typ.Kind() == reflect.Pointer { + typ = typ.Elem() + } + + switch typ.Kind() { + case reflect.Struct: + return normalizeStruct(typ, src) + case reflect.Map: + return normalizeMap(typ, src) + case reflect.Slice: + return normalizeSlice(typ, src) + case reflect.String: + return normalizeString(typ, src) + case reflect.Bool: + return normalizeBool(typ, src) + case reflect.Int, reflect.Int32, reflect.Int64: + return normalizeInt(typ, src) + case reflect.Float32, reflect.Float64: + return normalizeFloat(typ, src) + } + + return config.NilValue, diag.Errorf("unsupported type: %s", typ.Kind()) +} + +func typeMismatch(expected config.Kind, src config.Value) diag.Diagnostic { + return diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("expected %s, found %s", expected, src.Kind()), + Location: src.Location(), + } +} + +func normalizeStruct(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + switch src.Kind() { + case config.KindMap: + out := make(map[string]config.Value) + info := getStructInfo(typ) + for k, v := range src.MustMap() { + index, ok := info.Fields[k] + if !ok { + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("unknown field: %s", k), + Location: src.Location(), + }) + continue + } + + // Normalize the value according to the field type. + v, err := normalizeType(typ.FieldByIndex(index).Type, v) + if err != nil { + diags = diags.Extend(err) + // Skip the element if it cannot be normalized. + if err.HasError() { + continue + } + } + + out[k] = v + } + + return config.NewValue(out, src.Location()), diags + case config.KindNil: + return src, diags + } + + return config.NilValue, diags.Append(typeMismatch(config.KindMap, src)) +} + +func normalizeMap(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + switch src.Kind() { + case config.KindMap: + out := make(map[string]config.Value) + for k, v := range src.MustMap() { + // Normalize the value according to the map element type. + v, err := normalizeType(typ.Elem(), v) + if err != nil { + diags = diags.Extend(err) + // Skip the element if it cannot be normalized. + if err.HasError() { + continue + } + } + + out[k] = v + } + + return config.NewValue(out, src.Location()), diags + case config.KindNil: + return src, diags + } + + return config.NilValue, diags.Append(typeMismatch(config.KindMap, src)) +} + +func normalizeSlice(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + switch src.Kind() { + case config.KindSequence: + out := make([]config.Value, 0, len(src.MustSequence())) + for _, v := range src.MustSequence() { + // Normalize the value according to the slice element type. + v, err := normalizeType(typ.Elem(), v) + if err != nil { + diags = diags.Extend(err) + // Skip the element if it cannot be normalized. + if err.HasError() { + continue + } + } + + out = append(out, v) + } + + return config.NewValue(out, src.Location()), diags + case config.KindNil: + return src, diags + } + + return config.NilValue, diags.Append(typeMismatch(config.KindSequence, src)) +} + +func normalizeString(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { + var diags diag.Diagnostics + var out string + + switch src.Kind() { + case config.KindString: + out = src.MustString() + case config.KindBool: + out = strconv.FormatBool(src.MustBool()) + case config.KindInt: + out = strconv.FormatInt(src.MustInt(), 10) + case config.KindFloat: + out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64) + default: + return config.NilValue, diags.Append(typeMismatch(config.KindString, src)) + } + + return config.NewValue(out, src.Location()), diags +} + +func normalizeBool(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { + var diags diag.Diagnostics + var out bool + + switch src.Kind() { + case config.KindBool: + out = src.MustBool() + case config.KindString: + // See https://github.com/go-yaml/yaml/blob/f6f7691b1fdeb513f56608cd2c32c51f8194bf51/decode.go#L684-L693. + switch src.MustString() { + case "true", "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out = true + case "false", "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out = false + default: + // Cannot interpret as a boolean. + return config.NilValue, diags.Append(typeMismatch(config.KindBool, src)) + } + default: + return config.NilValue, diags.Append(typeMismatch(config.KindBool, src)) + } + + return config.NewValue(out, src.Location()), diags +} + +func normalizeInt(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { + var diags diag.Diagnostics + var out int64 + + switch src.Kind() { + case config.KindInt: + out = src.MustInt() + case config.KindString: + var err error + out, err = strconv.ParseInt(src.MustString(), 10, 64) + if err != nil { + return config.NilValue, diags.Append(diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("cannot parse %q as an integer", src.MustString()), + Location: src.Location(), + }) + } + default: + return config.NilValue, diags.Append(typeMismatch(config.KindInt, src)) + } + + return config.NewValue(out, src.Location()), diags +} + +func normalizeFloat(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { + var diags diag.Diagnostics + var out float64 + + switch src.Kind() { + case config.KindFloat: + out = src.MustFloat() + case config.KindString: + var err error + out, err = strconv.ParseFloat(src.MustString(), 64) + if err != nil { + return config.NilValue, diags.Append(diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("cannot parse %q as a floating point number", src.MustString()), + Location: src.Location(), + }) + } + default: + return config.NilValue, diags.Append(typeMismatch(config.KindFloat, src)) + } + + return config.NewValue(out, src.Location()), diags +} diff --git a/libs/config/convert/normalize_test.go b/libs/config/convert/normalize_test.go new file mode 100644 index 00000000..9c4b10bb --- /dev/null +++ b/libs/config/convert/normalize_test.go @@ -0,0 +1,435 @@ +package convert + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/diag" + "github.com/stretchr/testify/assert" +) + +func TestNormalizeStruct(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + var typ Tmp + vin := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeStructElementDiagnostic(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + var typ Tmp + vin := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V(map[string]config.Value{"an": config.V("error")}), + }) + + vout, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected string, found map`, + Location: config.Location{}, + }, err[0]) + + // Elements that encounter an error during normalization are dropped. + assert.Equal(t, map[string]any{ + "foo": "bar", + }, vout.AsAny()) +} + +func TestNormalizeStructUnknownField(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + vout, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `unknown field: bar`, + Location: vin.Get("foo").Location(), + }, err[0]) + + // The field that can be mapped to the struct field is retained. + assert.Equal(t, map[string]any{ + "foo": "bar", + }, vout.AsAny()) +} + +func TestNormalizeStructNil(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := config.NilValue + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeStructError(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := config.V("string") + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected map, found string`, + Location: vin.Get("foo").Location(), + }, err[0]) +} + +func TestNormalizeMap(t *testing.T) { + var typ map[string]string + vin := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeMapElementDiagnostic(t *testing.T) { + var typ map[string]string + vin := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V(map[string]config.Value{"an": config.V("error")}), + }) + + vout, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected string, found map`, + Location: config.Location{}, + }, err[0]) + + // Elements that encounter an error during normalization are dropped. + assert.Equal(t, map[string]any{ + "foo": "bar", + }, vout.AsAny()) +} + +func TestNormalizeMapNil(t *testing.T) { + var typ map[string]string + vin := config.NilValue + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeMapError(t *testing.T) { + var typ map[string]string + vin := config.V("string") + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected map, found string`, + Location: vin.Location(), + }, err[0]) +} + +func TestNormalizeSlice(t *testing.T) { + var typ []string + vin := config.V([]config.Value{ + config.V("foo"), + config.V("bar"), + }) + + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeSliceElementDiagnostic(t *testing.T) { + var typ []string + vin := config.V([]config.Value{ + config.V("foo"), + config.V("bar"), + config.V(map[string]config.Value{"an": config.V("error")}), + }) + + vout, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected string, found map`, + Location: config.Location{}, + }, err[0]) + + // Elements that encounter an error during normalization are dropped. + assert.Equal(t, []any{"foo", "bar"}, vout.AsAny()) +} + +func TestNormalizeSliceNil(t *testing.T) { + var typ []string + vin := config.NilValue + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeSliceError(t *testing.T) { + var typ []string + vin := config.V("string") + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected sequence, found string`, + Location: vin.Location(), + }, err[0]) +} + +func TestNormalizeString(t *testing.T) { + var typ string + vin := config.V("string") + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeStringNil(t *testing.T) { + var typ string + vin := config.NewValue(nil, config.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected string, found nil`, + Location: vin.Location(), + }, err[0]) +} + +func TestNormalizeStringFromBool(t *testing.T) { + var typ string + vin := config.NewValue(true, config.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, config.NewValue("true", vin.Location()), vout) +} + +func TestNormalizeStringFromInt(t *testing.T) { + var typ string + vin := config.NewValue(123, config.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, config.NewValue("123", vin.Location()), vout) +} + +func TestNormalizeStringFromFloat(t *testing.T) { + var typ string + vin := config.NewValue(1.20, config.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, config.NewValue("1.2", vin.Location()), vout) +} + +func TestNormalizeStringError(t *testing.T) { + var typ string + vin := config.V(map[string]config.Value{"an": config.V("error")}) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected string, found map`, + Location: config.Location{}, + }, err[0]) +} + +func TestNormalizeBool(t *testing.T) { + var typ bool + vin := config.V(true) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, config.V(true), vout) +} + +func TestNormalizeBoolNil(t *testing.T) { + var typ bool + vin := config.NewValue(nil, config.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected bool, found nil`, + Location: vin.Location(), + }, err[0]) +} + +func TestNormalizeBoolFromString(t *testing.T) { + var typ bool + + for _, c := range []struct { + Input string + Output bool + }{ + {"true", true}, + {"false", false}, + {"Y", true}, + {"N", false}, + {"on", true}, + {"off", false}, + } { + vin := config.V(c.Input) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, config.V(c.Output), vout) + } +} + +func TestNormalizeBoolFromStringError(t *testing.T) { + var typ bool + vin := config.V("abc") + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected bool, found string`, + Location: vin.Location(), + }, err[0]) +} + +func TestNormalizeBoolError(t *testing.T) { + var typ bool + vin := config.V(map[string]config.Value{"an": config.V("error")}) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected bool, found map`, + Location: config.Location{}, + }, err[0]) +} + +func TestNormalizeInt(t *testing.T) { + var typ int + vin := config.V(123) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, config.V(int64(123)), vout) +} + +func TestNormalizeIntNil(t *testing.T) { + var typ int + vin := config.NewValue(nil, config.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected int, found nil`, + Location: vin.Location(), + }, err[0]) +} + +func TestNormalizeIntFromString(t *testing.T) { + var typ int + vin := config.V("123") + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, config.V(int64(123)), vout) +} + +func TestNormalizeIntFromStringError(t *testing.T) { + var typ int + vin := config.V("abc") + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `cannot parse "abc" as an integer`, + Location: vin.Location(), + }, err[0]) +} + +func TestNormalizeIntError(t *testing.T) { + var typ int + vin := config.V(map[string]config.Value{"an": config.V("error")}) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected int, found map`, + Location: config.Location{}, + }, err[0]) +} + +func TestNormalizeFloat(t *testing.T) { + var typ float64 + vin := config.V(1.2) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, config.V(1.2), vout) +} + +func TestNormalizeFloatNil(t *testing.T) { + var typ float64 + vin := config.NewValue(nil, config.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected float, found nil`, + Location: vin.Location(), + }, err[0]) +} + +func TestNormalizeFloatFromString(t *testing.T) { + var typ float64 + vin := config.V("1.2") + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, config.V(1.2), vout) +} + +func TestNormalizeFloatFromStringError(t *testing.T) { + var typ float64 + vin := config.V("abc") + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `cannot parse "abc" as a floating point number`, + Location: vin.Location(), + }, err[0]) +} + +func TestNormalizeFloatError(t *testing.T) { + var typ float64 + vin := config.V(map[string]config.Value{"an": config.V("error")}) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Error, + Summary: `expected float, found map`, + Location: config.Location{}, + }, err[0]) +} diff --git a/libs/diag/diagnostic.go b/libs/diag/diagnostic.go new file mode 100644 index 00000000..c5757a58 --- /dev/null +++ b/libs/diag/diagnostic.go @@ -0,0 +1,76 @@ +package diag + +import ( + "fmt" + + "github.com/databricks/cli/libs/config" +) + +type Diagnostic struct { + Severity Severity + + // Summary is a short description of the diagnostic. + // This is expected to be a single line and always present. + Summary string + + // Detail is a longer description of the diagnostic. + // This may be multiple lines and may be nil. + Detail string + + // Location is a source code location associated with the diagnostic message. + // It may be zero if there is no associated location. + Location config.Location +} + +// Errorf creates a new error diagnostic. +func Errorf(format string, args ...any) Diagnostics { + return []Diagnostic{ + { + Severity: Error, + Summary: fmt.Sprintf(format, args...), + }, + } +} + +// Warningf creates a new warning diagnostic. +func Warningf(format string, args ...any) Diagnostics { + return []Diagnostic{ + { + Severity: Warning, + Summary: fmt.Sprintf(format, args...), + }, + } +} + +// Infof creates a new info diagnostic. +func Infof(format string, args ...any) Diagnostics { + return []Diagnostic{ + { + Severity: Info, + Summary: fmt.Sprintf(format, args...), + }, + } +} + +// Diagsnostics holds zero or more instances of [Diagnostic]. +type Diagnostics []Diagnostic + +// Append adds a new diagnostic to the end of the list. +func (ds Diagnostics) Append(d Diagnostic) Diagnostics { + return append(ds, d) +} + +// Extend adds all diagnostics from another list to the end of the list. +func (ds Diagnostics) Extend(other Diagnostics) Diagnostics { + return append(ds, other...) +} + +// HasError returns true if any of the diagnostics are errors. +func (ds Diagnostics) HasError() bool { + for _, d := range ds { + if d.Severity == Error { + return true + } + } + return false +} diff --git a/libs/diag/severity.go b/libs/diag/severity.go new file mode 100644 index 00000000..d25c1280 --- /dev/null +++ b/libs/diag/severity.go @@ -0,0 +1,9 @@ +package diag + +type Severity int + +const ( + Error Severity = iota + Warning + Info +) From a5815a0b475dcec281c23e5bb4408c10a46694ec Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 25 Oct 2023 14:27:25 +0200 Subject: [PATCH 187/310] Add welcome message to bundle templates (#907) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes Adds a welcome_message field to templates and the default python template. ## Tests Manually. Here's the output logs during template init now: ``` shreyas.goenka@THW32HFW6T bricks % cli bundle init Template to use [default-python]: Welcome to the sample Databricks Asset Bundle template! Please enter the following information to initialize your sample DAB. Unique name for this project [my_project]: abcde Include a stub (sample) notebook in 'abcde/src': no Include a stub (sample) Delta Live Tables pipeline in 'abcde/src': yes Include a stub (sample) Python package in 'abcde/src': no ✨ Your new project has been created in the 'abcde' directory! Please refer to the README.md of your project for further instructions on getting started. Or read the documentation on Databricks Asset Bundles at https://docs.databricks.com/dev-tools/bundles/index.html. ``` --- libs/jsonschema/extension.go | 3 +++ libs/template/materialize.go | 6 ++++++ .../default-python/databricks_template_schema.json | 1 + 3 files changed, 10 insertions(+) diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go index 8503ab2d..ffb77bd8 100644 --- a/libs/jsonschema/extension.go +++ b/libs/jsonschema/extension.go @@ -12,6 +12,9 @@ type Extension struct { // that do have an order defined. Order *int `json:"order,omitempty"` + // Welcome message to print before prompting the user for input + WelcomeMessage string `json:"welcome_message,omitempty"` + // The message to print after the template is successfully initalized SuccessMessage string `json:"success_message,omitempty"` diff --git a/libs/template/materialize.go b/libs/template/materialize.go index ec62e41f..7c9105b7 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -48,6 +48,12 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st return err } + // Print welcome message + welcome := config.schema.WelcomeMessage + if welcome != "" { + cmdio.LogString(ctx, welcome) + } + // Read and assign config values from file if configFilePath != "" { err = config.assignValuesFromFile(configFilePath) diff --git a/libs/template/templates/default-python/databricks_template_schema.json b/libs/template/templates/default-python/databricks_template_schema.json index 8b26ee70..8d5afb57 100644 --- a/libs/template/templates/default-python/databricks_template_schema.json +++ b/libs/template/templates/default-python/databricks_template_schema.json @@ -1,4 +1,5 @@ { + "welcome_message": "\nWelcome to the sample Databricks Asset Bundle template! Please enter the following information to initialize your sample DAB.\n", "properties": { "project_name": { "type": "string", From 6e21ced54ac9c7d2eea9adbf75fe86a67936c811 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 25 Oct 2023 14:55:56 +0200 Subject: [PATCH 188/310] Consolidate bundle configuration loader function (#918) ## Changes There were two functions related to loading a bundle configuration file; one as a package function and one as a member function on the configuration type. Loading the same configuration object twice doesn't make sense and therefore we can consolidate to only using the package function. The package function would scan for known file names if the specified path was a directory. This functionality was not in use because the top-level bundle loader figures out the filename itself as of #580. ## Tests Pass. --- bundle/bundle.go | 3 +- bundle/config/root.go | 56 ++++++++++++-------------------------- bundle/config/root_test.go | 12 +++----- 3 files changed, 24 insertions(+), 47 deletions(-) diff --git a/bundle/bundle.go b/bundle/bundle.go index e1625179..fd9c131f 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -76,10 +76,11 @@ func Load(ctx context.Context, path string) (*Bundle, error) { return nil, err } log.Debugf(ctx, "Loading bundle configuration from: %s", configFile) - err = bundle.Config.Load(configFile) + root, err := config.Load(configFile) if err != nil { return nil, err } + bundle.Config = *root return bundle, nil } diff --git a/bundle/config/root.go b/bundle/config/root.go index 25d5ce5f..31867c6c 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -58,27 +58,33 @@ type Root struct { Experimental *Experimental `json:"experimental,omitempty"` } +// Load loads the bundle configuration file at the specified path. func Load(path string) (*Root, error) { - var r Root - - stat, err := os.Stat(path) + raw, err := os.ReadFile(path) if err != nil { return nil, err } - // If we were given a directory, assume this is the bundle root. - if stat.IsDir() { - path, err = FileNames.FindInPath(path) - if err != nil { - return nil, err - } + var r Root + err = yaml.Unmarshal(raw, &r) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", path, err) } - if err := r.Load(path); err != nil { - return nil, err + if r.Environments != nil && r.Targets != nil { + return nil, fmt.Errorf("both 'environments' and 'targets' are specified, only 'targets' should be used: %s", path) } - return &r, nil + if r.Environments != nil { + //TODO: add a command line notice that this is a deprecated option. + r.Targets = r.Environments + } + + r.Path = filepath.Dir(path) + r.SetConfigFilePath(path) + + _, err = r.Resources.VerifyUniqueResourceIdentifiers() + return &r, err } // SetConfigFilePath configures the path that its configuration @@ -127,32 +133,6 @@ func (r *Root) InitializeVariables(vars []string) error { return nil } -func (r *Root) Load(path string) error { - raw, err := os.ReadFile(path) - if err != nil { - return err - } - err = yaml.Unmarshal(raw, r) - if err != nil { - return fmt.Errorf("failed to load %s: %w", path, err) - } - - if r.Environments != nil && r.Targets != nil { - return fmt.Errorf("both 'environments' and 'targets' are specified, only 'targets' should be used: %s", path) - } - - if r.Environments != nil { - //TODO: add a command line notice that this is a deprecated option. - r.Targets = r.Environments - } - - r.Path = filepath.Dir(path) - r.SetConfigFilePath(path) - - _, err = r.Resources.VerifyUniqueResourceIdentifiers() - return err -} - func (r *Root) Merge(other *Root) error { err := r.Sync.Merge(r, other) if err != nil { diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index b0dfc3ec..3f37da07 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -25,8 +25,7 @@ func TestRootMarshalUnmarshal(t *testing.T) { } func TestRootLoad(t *testing.T) { - root := &Root{} - err := root.Load("../tests/basic/databricks.yml") + root, err := Load("../tests/basic/databricks.yml") require.NoError(t, err) assert.Equal(t, "basic", root.Bundle.Name) } @@ -77,18 +76,15 @@ func TestRootMergeMap(t *testing.T) { } func TestDuplicateIdOnLoadReturnsError(t *testing.T) { - root := &Root{} - err := root.Load("./testdata/duplicate_resource_names_in_root/databricks.yml") + _, err := Load("./testdata/duplicate_resource_names_in_root/databricks.yml") assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)") } func TestDuplicateIdOnMergeReturnsError(t *testing.T) { - root := &Root{} - err := root.Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml") + root, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml") require.NoError(t, err) - other := &Root{} - err = other.Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml") + other, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml") require.NoError(t, err) err = root.Merge(other) From 01f669555d3d21b4a7f30be2bae32aef2d754752 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 Oct 2023 12:11:59 +0200 Subject: [PATCH 189/310] Bump google.golang.org/grpc from 1.58.2 to 1.58.3 (#920) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.58.2 to 1.58.3.
Release notes

Sourced from google.golang.org/grpc's releases.

Release 1.58.3

Security

  • server: prohibit more than MaxConcurrentStreams handlers from running at once (CVE-2023-44487)

    In addition to this change, applications should ensure they do not leave running tasks behind related to the RPC before returning from method handlers, or should enforce appropriate limits on any such work.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/grpc&package-manager=go_modules&previous-version=1.58.2&new-version=1.58.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/databricks/cli/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 75479149..3d483901 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( google.golang.org/api v0.146.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect - google.golang.org/grpc v1.58.2 // indirect + google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 06f6b0f7..7d336858 100644 --- a/go.sum +++ b/go.sum @@ -264,8 +264,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= -google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 9b16e9bd454ee6b37e1234be5ba338be21134242 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Thu, 26 Oct 2023 13:41:28 +0200 Subject: [PATCH 190/310] Bump the Go SDK in the CLI (#919) ## Changes Bump the Databricks Go SDK version from v0.23.0 to v0.24.0. ## Tests --- .codegen/_openapi_sha | 2 +- .gitattributes | 2 + cmd/account/cmd.go | 2 + cmd/account/groups/groups.go | 2 + cmd/account/network-policy/network-policy.go | 239 ++++++++++++++++++ .../service-principals/service-principals.go | 2 + cmd/account/users/users.go | 10 + cmd/workspace/apps/apps.go | 232 +++++++++++++++++ cmd/workspace/cmd.go | 2 + cmd/workspace/groups/groups.go | 2 + .../model-registry/model-registry.go | 12 +- cmd/workspace/recipients/recipients.go | 2 +- .../service-principals/service-principals.go | 2 + cmd/workspace/users/users.go | 10 + go.mod | 12 +- go.sum | 32 +-- 16 files changed, 538 insertions(+), 27 deletions(-) create mode 100755 cmd/account/network-policy/network-policy.go create mode 100755 cmd/workspace/apps/apps.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index e36ae531..23aa4473 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -493a76554afd3afdd15dc858773d01643f80352a \ No newline at end of file +5903bb39137fd76ac384b2044e425f9c56840e00 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index f50218fe..5e5dc235 100755 --- a/.gitattributes +++ b/.gitattributes @@ -10,6 +10,7 @@ cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/account/log-delivery/log-delivery.go linguist-generated=true cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true cmd/account/metastores/metastores.go linguist-generated=true +cmd/account/network-policy/network-policy.go linguist-generated=true cmd/account/networks/networks.go linguist-generated=true cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true @@ -25,6 +26,7 @@ cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true cmd/account/workspaces/workspaces.go linguist-generated=true cmd/workspace/alerts/alerts.go linguist-generated=true +cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 744b3670..38be7314 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -16,6 +16,7 @@ import ( log_delivery "github.com/databricks/cli/cmd/account/log-delivery" account_metastore_assignments "github.com/databricks/cli/cmd/account/metastore-assignments" account_metastores "github.com/databricks/cli/cmd/account/metastores" + account_network_policy "github.com/databricks/cli/cmd/account/network-policy" networks "github.com/databricks/cli/cmd/account/networks" o_auth_enrollment "github.com/databricks/cli/cmd/account/o-auth-enrollment" o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps" @@ -49,6 +50,7 @@ func New() *cobra.Command { cmd.AddCommand(log_delivery.New()) cmd.AddCommand(account_metastore_assignments.New()) cmd.AddCommand(account_metastores.New()) + cmd.AddCommand(account_network_policy.New()) cmd.AddCommand(networks.New()) cmd.AddCommand(o_auth_enrollment.New()) cmd.AddCommand(o_auth_published_apps.New()) diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index 407f46a9..53bafc41 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -67,6 +67,7 @@ func newCreate() *cobra.Command { // TODO: array: members // TODO: complex arg: meta // TODO: array: roles + // TODO: array: schemas cmd.Use = "create" cmd.Short = `Create a new group.` @@ -442,6 +443,7 @@ func newUpdate() *cobra.Command { // TODO: array: members // TODO: complex arg: meta // TODO: array: roles + // TODO: array: schemas cmd.Use = "update ID" cmd.Short = `Replace a group.` diff --git a/cmd/account/network-policy/network-policy.go b/cmd/account/network-policy/network-policy.go new file mode 100755 index 00000000..23a9605c --- /dev/null +++ b/cmd/account/network-policy/network-policy.go @@ -0,0 +1,239 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package network_policy + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "network-policy", + Short: `Network policy is a set of rules that defines what can be accessed from your Databricks network.`, + Long: `Network policy is a set of rules that defines what can be accessed from your + Databricks network. E.g.: You can choose to block your SQL UDF to access + internet from your Databricks serverless clusters. + + There is only one instance of this setting per account. Since this setting has + a default value, this setting is present on all accounts even though it's + never set on a given account. Deletion reverts the value of the setting back + to the default value.`, + GroupID: "settings", + Annotations: map[string]string{ + "package": "settings", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete-account-network-policy command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteAccountNetworkPolicyOverrides []func( + *cobra.Command, + *settings.DeleteAccountNetworkPolicyRequest, +) + +func newDeleteAccountNetworkPolicy() *cobra.Command { + cmd := &cobra.Command{} + + var deleteAccountNetworkPolicyReq settings.DeleteAccountNetworkPolicyRequest + + // TODO: short flags + + cmd.Use = "delete-account-network-policy ETAG" + cmd.Short = `Delete Account Network Policy.` + cmd.Long = `Delete Account Network Policy. + + Reverts back all the account network policies back to default.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + deleteAccountNetworkPolicyReq.Etag = args[0] + + response, err := a.NetworkPolicy.DeleteAccountNetworkPolicy(ctx, deleteAccountNetworkPolicyReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteAccountNetworkPolicyOverrides { + fn(cmd, &deleteAccountNetworkPolicyReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDeleteAccountNetworkPolicy()) + }) +} + +// start read-account-network-policy command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var readAccountNetworkPolicyOverrides []func( + *cobra.Command, + *settings.ReadAccountNetworkPolicyRequest, +) + +func newReadAccountNetworkPolicy() *cobra.Command { + cmd := &cobra.Command{} + + var readAccountNetworkPolicyReq settings.ReadAccountNetworkPolicyRequest + + // TODO: short flags + + cmd.Use = "read-account-network-policy ETAG" + cmd.Short = `Get Account Network Policy.` + cmd.Long = `Get Account Network Policy. + + Gets the value of Account level Network Policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + readAccountNetworkPolicyReq.Etag = args[0] + + response, err := a.NetworkPolicy.ReadAccountNetworkPolicy(ctx, readAccountNetworkPolicyReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range readAccountNetworkPolicyOverrides { + fn(cmd, &readAccountNetworkPolicyReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newReadAccountNetworkPolicy()) + }) +} + +// start update-account-network-policy command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateAccountNetworkPolicyOverrides []func( + *cobra.Command, + *settings.UpdateAccountNetworkPolicyRequest, +) + +func newUpdateAccountNetworkPolicy() *cobra.Command { + cmd := &cobra.Command{} + + var updateAccountNetworkPolicyReq settings.UpdateAccountNetworkPolicyRequest + var updateAccountNetworkPolicyJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateAccountNetworkPolicyJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&updateAccountNetworkPolicyReq.AllowMissing, "allow-missing", updateAccountNetworkPolicyReq.AllowMissing, `This should always be set to true for Settings RPCs.`) + // TODO: complex arg: setting + + cmd.Use = "update-account-network-policy" + cmd.Short = `Update Account Network Policy.` + cmd.Long = `Update Account Network Policy. + + Updates the policy content of Account level Network Policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateAccountNetworkPolicyJson.Unmarshal(&updateAccountNetworkPolicyReq) + if err != nil { + return err + } + } + + response, err := a.NetworkPolicy.UpdateAccountNetworkPolicy(ctx, updateAccountNetworkPolicyReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateAccountNetworkPolicyOverrides { + fn(cmd, &updateAccountNetworkPolicyReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdateAccountNetworkPolicy()) + }) +} + +// end service AccountNetworkPolicy diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index 06fc690c..4ad57d4e 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -66,6 +66,7 @@ func newCreate() *cobra.Command { // TODO: array: groups cmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Databricks service principal ID.`) // TODO: array: roles + // TODO: array: schemas cmd.Use = "create" cmd.Short = `Create a service principal.` @@ -442,6 +443,7 @@ func newUpdate() *cobra.Command { // TODO: array: groups cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Databricks service principal ID.`) // TODO: array: roles + // TODO: array: schemas cmd.Use = "update ID" cmd.Short = `Replace service principal.` diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 97e9c31f..20769699 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -72,6 +72,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Databricks user ID.`) // TODO: complex arg: name // TODO: array: roles + // TODO: array: schemas cmd.Flags().StringVar(&createReq.UserName, "user-name", createReq.UserName, `Email address of the Databricks user.`) cmd.Use = "create" @@ -215,6 +216,14 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().StringVar(&getReq.Attributes, "attributes", getReq.Attributes, `Comma-separated list of attributes to return in response.`) + cmd.Flags().IntVar(&getReq.Count, "count", getReq.Count, `Desired number of results per page.`) + cmd.Flags().StringVar(&getReq.ExcludedAttributes, "excluded-attributes", getReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) + cmd.Flags().StringVar(&getReq.Filter, "filter", getReq.Filter, `Query by which the results have to be filtered.`) + cmd.Flags().StringVar(&getReq.SortBy, "sort-by", getReq.SortBy, `Attribute to sort the results.`) + cmd.Flags().Var(&getReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().IntVar(&getReq.StartIndex, "start-index", getReq.StartIndex, `Specifies the index of the first result.`) + cmd.Use = "get ID" cmd.Short = `Get user details.` cmd.Long = `Get user details. @@ -451,6 +460,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Databricks user ID.`) // TODO: complex arg: name // TODO: array: roles + // TODO: array: schemas cmd.Flags().StringVar(&updateReq.UserName, "user-name", updateReq.UserName, `Email address of the Databricks user.`) cmd.Use = "update ID" diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go new file mode 100755 index 00000000..69222bfe --- /dev/null +++ b/cmd/workspace/apps/apps.go @@ -0,0 +1,232 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package apps + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "apps", + Short: `Lakehouse Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.`, + Long: `Lakehouse Apps run directly on a customer’s Databricks instance, integrate + with their data, use and extend Databricks services, and enable users to + interact through single sign-on.`, + GroupID: "serving", + Annotations: map[string]string{ + "package": "serving", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *serving.DeployAppRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq serving.DeployAppRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: output-only field + + cmd.Use = "create" + cmd.Short = `Create and deploy an application.` + cmd.Long = `Create and deploy an application. + + Creates and deploys an application.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Apps.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newCreate()) + }) +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *serving.DeleteAppRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq serving.DeleteAppRequest + + // TODO: short flags + + cmd.Use = "delete NAME" + cmd.Short = `Delete an application.` + cmd.Long = `Delete an application. + + Delete an application definition` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Name = args[0] + + err = w.Apps.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDelete()) + }) +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *serving.GetAppRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq serving.GetAppRequest + + // TODO: short flags + + cmd.Use = "get NAME" + cmd.Short = `Get definition for an application.` + cmd.Long = `Get definition for an application. + + Get an application definition` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.Name = args[0] + + err = w.Apps.Get(ctx, getReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGet()) + }) +} + +// end service Apps diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index dc3f6798..b519b084 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -4,6 +4,7 @@ package workspace import ( alerts "github.com/databricks/cli/cmd/workspace/alerts" + apps "github.com/databricks/cli/cmd/workspace/apps" artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists" catalogs "github.com/databricks/cli/cmd/workspace/catalogs" clean_rooms "github.com/databricks/cli/cmd/workspace/clean-rooms" @@ -66,6 +67,7 @@ func All() []*cobra.Command { var out []*cobra.Command out = append(out, alerts.New()) + out = append(out, apps.New()) out = append(out, artifact_allowlists.New()) out = append(out, catalogs.New()) out = append(out, clean_rooms.New()) diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index 4ebf740d..d4765235 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -67,6 +67,7 @@ func newCreate() *cobra.Command { // TODO: array: members // TODO: complex arg: meta // TODO: array: roles + // TODO: array: schemas cmd.Use = "create" cmd.Short = `Create a new group.` @@ -442,6 +443,7 @@ func newUpdate() *cobra.Command { // TODO: array: members // TODO: complex arg: meta // TODO: array: roles + // TODO: array: schemas cmd.Use = "update ID" cmd.Short = `Replace a group.` diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index 8da9e621..c0fe43c7 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -19,9 +19,15 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "model-registry", - Short: `MLflow Model Registry is a centralized model repository and a UI and set of APIs that enable you to manage the full lifecycle of MLflow Models.`, - Long: `MLflow Model Registry is a centralized model repository and a UI and set of - APIs that enable you to manage the full lifecycle of MLflow Models.`, + Short: `Note: This API reference documents APIs for the Workspace Model Registry.`, + Long: `Note: This API reference documents APIs for the Workspace Model Registry. + Databricks recommends using [Models in Unity + Catalog](/api/workspace/registeredmodels) instead. Models in Unity Catalog + provides centralized model governance, cross-workspace access, lineage, and + deployment. Workspace Model Registry will be deprecated in the future. + + The Workspace Model Registry is a centralized model repository and a UI and + set of APIs that enable you to manage the full lifecycle of MLflow Models.`, GroupID: "ml", Annotations: map[string]string{ "package": "ml", diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 9169e5ff..53576043 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -70,7 +70,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `Description about the recipient.`) - // TODO: any: data_recipient_global_metastore_id + cmd.Flags().StringVar(&createReq.DataRecipientGlobalMetastoreId, "data-recipient-global-metastore-id", createReq.DataRecipientGlobalMetastoreId, `The global Unity Catalog metastore id provided by the data recipient.`) // TODO: complex arg: ip_access_list cmd.Flags().StringVar(&createReq.Owner, "owner", createReq.Owner, `Username of the recipient owner.`) // TODO: complex arg: properties_kvpairs diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index e78e0062..134e8c1f 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -66,6 +66,7 @@ func newCreate() *cobra.Command { // TODO: array: groups cmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Databricks service principal ID.`) // TODO: array: roles + // TODO: array: schemas cmd.Use = "create" cmd.Short = `Create a service principal.` @@ -442,6 +443,7 @@ func newUpdate() *cobra.Command { // TODO: array: groups cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Databricks service principal ID.`) // TODO: array: roles + // TODO: array: schemas cmd.Use = "update ID" cmd.Short = `Replace service principal.` diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index ebf319fa..97b6dcde 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -72,6 +72,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Databricks user ID.`) // TODO: complex arg: name // TODO: array: roles + // TODO: array: schemas cmd.Flags().StringVar(&createReq.UserName, "user-name", createReq.UserName, `Email address of the Databricks user.`) cmd.Use = "create" @@ -215,6 +216,14 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().StringVar(&getReq.Attributes, "attributes", getReq.Attributes, `Comma-separated list of attributes to return in response.`) + cmd.Flags().IntVar(&getReq.Count, "count", getReq.Count, `Desired number of results per page.`) + cmd.Flags().StringVar(&getReq.ExcludedAttributes, "excluded-attributes", getReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) + cmd.Flags().StringVar(&getReq.Filter, "filter", getReq.Filter, `Query by which the results have to be filtered.`) + cmd.Flags().StringVar(&getReq.SortBy, "sort-by", getReq.SortBy, `Attribute to sort the results.`) + cmd.Flags().Var(&getReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().IntVar(&getReq.StartIndex, "start-index", getReq.StartIndex, `Specifies the index of the first result.`) + cmd.Use = "get ID" cmd.Short = `Get user details.` cmd.Long = `Get user details. @@ -619,6 +628,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Databricks user ID.`) // TODO: complex arg: name // TODO: array: roles + // TODO: array: schemas cmd.Flags().StringVar(&updateReq.UserName, "user-name", updateReq.UserName, `Email address of the Databricks user.`) cmd.Use = "update ID" diff --git a/go.mod b/go.mod index 3d483901..b67aafa9 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.23.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.24.0 // Apache 2.0 github.com/fatih/color v1.15.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.3.1 // BSD-3-Clause @@ -22,7 +22,7 @@ require ( github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.8.4 // MIT github.com/whilp/git-urls v1.0.0 // MIT - golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/mod v0.13.0 golang.org/x/oauth2 v0.13.0 golang.org/x/sync v0.4.0 @@ -34,7 +34,7 @@ require ( require gopkg.in/yaml.v3 v3.0.1 require ( - cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute v1.23.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -45,7 +45,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -56,9 +56,9 @@ require ( golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/api v0.146.0 // indirect + google.golang.org/api v0.148.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a // indirect google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 7d336858..7f435f6b 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0= +cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= @@ -31,8 +31,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.23.0 h1:rdLMA7cDUPJiCSMyuUSufzDDmugqyp79SNiY/vc7kMI= -github.com/databricks/databricks-sdk-go v0.23.0/go.mod h1:a6rErRNh5bz+IJbO07nwW70iGyvtWidy1p/S5thepXI= +github.com/databricks/databricks-sdk-go v0.24.0 h1:fx34MOGYXVc72QBSFnKuDa/H3ekDMqZYH4jKZF8mrXk= +github.com/databricks/databricks-sdk-go v0.24.0/go.mod h1:a6rErRNh5bz+IJbO07nwW70iGyvtWidy1p/S5thepXI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -79,8 +79,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= @@ -88,8 +88,8 @@ github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -166,8 +166,8 @@ golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw= -golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -244,12 +244,12 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.146.0 h1:9aBYT4vQXt9dhCuLNfwfd3zpwu8atg0yPkjBymwSrOM= -google.golang.org/api v0.146.0/go.mod h1:OARJqIfoYjXJj4C1AiBSXYZt03qsoz8FQYU6fBEfrHM= +google.golang.org/api v0.148.0 h1:HBq4TZlN4/1pNcu0geJZ/Q50vIwIXT532UIMYoo0vOs= +google.golang.org/api v0.148.0/go.mod h1:8/TBgwaKjfqTdacOJrOv2+2Q6fBDU1uHKK06oGSkxzU= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -257,8 +257,8 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From 19e00d2d470ac046bbc34c622475bb032eaed8dc Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 26 Oct 2023 16:38:01 +0200 Subject: [PATCH 191/310] Upload terraform state even if apply fails (#923) ## Changes Upload terraform state even if apply fails Fixes #893 ## Tests Manually running `databricks bundle deploy` with incorrect permissions in bundle config and observe that it gets uploaded correctly --- bundle/phases/deploy.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index a8ca7518..6c75218b 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -29,8 +29,10 @@ func Deploy() bundle.Mutator { terraform.Interpolate(), terraform.Write(), terraform.StatePull(), - terraform.Apply(), - terraform.StatePush(), + bundle.Defer( + terraform.Apply(), + terraform.StatePush(), + ), ), lock.Release(lock.GoalDeploy), ), From 6f22ae86965d8e95b241c6ede0a184549093f122 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 26 Oct 2023 16:58:16 +0200 Subject: [PATCH 192/310] Use UserName instead of Id to check if identity used is a service principal (#924) ## Changes Use UserName instead of Id to check if identity used is a service principal --- bundle/config/mutator/process_target_mode.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index c11bd1c5..39321069 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -176,7 +176,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error { } return transformDevelopmentMode(b) case config.Production: - isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.Id) + isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName) return validateProductionMode(ctx, b, isPrincipal) case "": // No action From bb662fadbb1a6a0d3c5c86d13e6c3207f09affba Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 27 Oct 2023 11:16:41 +0200 Subject: [PATCH 193/310] Bump Terraform provider to v1.29.0 (#926) This PR: 1. Regenerates go structs using provider version 1.29 2. Adds QOL autogenerated diff labels for github 3. Adds a small SOP for doing the tf provider bump for go structs --- .gitattributes | 3 +++ bundle/internal/tf/codegen/README.md | 6 ++++++ bundle/internal/tf/codegen/schema/version.go | 2 +- .../internal/tf/codegen/templates/root.go.tmpl | 1 + bundle/internal/tf/schema/data_source_job.go | 6 ++++++ .../tf/schema/resource_artifact_allowlist.go | 17 +++++++++++++++++ bundle/internal/tf/schema/resource_job.go | 6 ++++++ bundle/internal/tf/schema/resource_sql_table.go | 4 ++++ bundle/internal/tf/schema/resources.go | 2 ++ bundle/internal/tf/schema/root.go | 3 ++- 10 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 bundle/internal/tf/schema/resource_artifact_allowlist.go diff --git a/.gitattributes b/.gitattributes index 5e5dc235..e94cfcd7 100755 --- a/.gitattributes +++ b/.gitattributes @@ -83,3 +83,6 @@ cmd/workspace/warehouses/warehouses.go linguist-generated=true cmd/workspace/workspace-bindings/workspace-bindings.go linguist-generated=true cmd/workspace/workspace-conf/workspace-conf.go linguist-generated=true cmd/workspace/workspace/workspace.go linguist-generated=true + +# Hide diff for Go structs generated from databricks terraform provider schema +bundle/internal/tf/schema/*.go linguist-generated=true diff --git a/bundle/internal/tf/codegen/README.md b/bundle/internal/tf/codegen/README.md index fce447cf..b1f8a33a 100644 --- a/bundle/internal/tf/codegen/README.md +++ b/bundle/internal/tf/codegen/README.md @@ -13,3 +13,9 @@ Run with: ```go go run . ``` + +How to regenerate Go structs from an updated terraform provider? +1. Bump version in ./schema/version.go +2. Delete `./tmp` if it exists +3. Run `go run .` +4. Run `gofmt -s -w ../schema` diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index c82218fc..3269a971 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.28.0" +const ProviderVersion = "1.29.0" diff --git a/bundle/internal/tf/codegen/templates/root.go.tmpl b/bundle/internal/tf/codegen/templates/root.go.tmpl index 5530427c..57fa7129 100644 --- a/bundle/internal/tf/codegen/templates/root.go.tmpl +++ b/bundle/internal/tf/codegen/templates/root.go.tmpl @@ -1,3 +1,4 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. package schema type Providers struct { diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index 569c8b81..49be8f01 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -24,6 +24,11 @@ type DataSourceJobJobSettingsSettingsDbtTask struct { WarehouseId string `json:"warehouse_id,omitempty"` } +type DataSourceJobJobSettingsSettingsDeployment struct { + Kind string `json:"kind"` + MetadataFilePath string `json:"metadata_file_path,omitempty"` +} + type DataSourceJobJobSettingsSettingsEmailNotifications struct { AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` @@ -848,6 +853,7 @@ type DataSourceJobJobSettingsSettings struct { Compute []DataSourceJobJobSettingsSettingsCompute `json:"compute,omitempty"` Continuous *DataSourceJobJobSettingsSettingsContinuous `json:"continuous,omitempty"` DbtTask *DataSourceJobJobSettingsSettingsDbtTask `json:"dbt_task,omitempty"` + Deployment *DataSourceJobJobSettingsSettingsDeployment `json:"deployment,omitempty"` EmailNotifications *DataSourceJobJobSettingsSettingsEmailNotifications `json:"email_notifications,omitempty"` GitSource *DataSourceJobJobSettingsSettingsGitSource `json:"git_source,omitempty"` Health *DataSourceJobJobSettingsSettingsHealth `json:"health,omitempty"` diff --git a/bundle/internal/tf/schema/resource_artifact_allowlist.go b/bundle/internal/tf/schema/resource_artifact_allowlist.go new file mode 100644 index 00000000..e2629ac1 --- /dev/null +++ b/bundle/internal/tf/schema/resource_artifact_allowlist.go @@ -0,0 +1,17 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceArtifactAllowlistArtifactMatcher struct { + Artifact string `json:"artifact"` + MatchType string `json:"match_type"` +} + +type ResourceArtifactAllowlist struct { + ArtifactType string `json:"artifact_type"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + ArtifactMatcher []ResourceArtifactAllowlistArtifactMatcher `json:"artifact_matcher,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 7af07560..b4a33bdf 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -24,6 +24,11 @@ type ResourceJobDbtTask struct { WarehouseId string `json:"warehouse_id,omitempty"` } +type ResourceJobDeployment struct { + Kind string `json:"kind"` + MetadataFilePath string `json:"metadata_file_path,omitempty"` +} + type ResourceJobEmailNotifications struct { AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` @@ -852,6 +857,7 @@ type ResourceJob struct { Compute []ResourceJobCompute `json:"compute,omitempty"` Continuous *ResourceJobContinuous `json:"continuous,omitempty"` DbtTask *ResourceJobDbtTask `json:"dbt_task,omitempty"` + Deployment *ResourceJobDeployment `json:"deployment,omitempty"` EmailNotifications *ResourceJobEmailNotifications `json:"email_notifications,omitempty"` GitSource *ResourceJobGitSource `json:"git_source,omitempty"` Health *ResourceJobHealth `json:"health,omitempty"` diff --git a/bundle/internal/tf/schema/resource_sql_table.go b/bundle/internal/tf/schema/resource_sql_table.go index 077645c2..97a8977b 100644 --- a/bundle/internal/tf/schema/resource_sql_table.go +++ b/bundle/internal/tf/schema/resource_sql_table.go @@ -12,15 +12,19 @@ type ResourceSqlTableColumn struct { type ResourceSqlTable struct { CatalogName string `json:"catalog_name"` ClusterId string `json:"cluster_id,omitempty"` + ClusterKeys []string `json:"cluster_keys,omitempty"` Comment string `json:"comment,omitempty"` DataSourceFormat string `json:"data_source_format,omitempty"` Id string `json:"id,omitempty"` Name string `json:"name"` + Options map[string]string `json:"options,omitempty"` + Partitions []string `json:"partitions,omitempty"` Properties map[string]string `json:"properties,omitempty"` SchemaName string `json:"schema_name"` StorageCredentialName string `json:"storage_credential_name,omitempty"` StorageLocation string `json:"storage_location,omitempty"` TableType string `json:"table_type"` ViewDefinition string `json:"view_definition,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` Column []ResourceSqlTableColumn `json:"column,omitempty"` } diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index cf98f9a9..9a04be7e 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -4,6 +4,7 @@ package schema type Resources struct { AccessControlRuleSet map[string]*ResourceAccessControlRuleSet `json:"databricks_access_control_rule_set,omitempty"` + ArtifactAllowlist map[string]*ResourceArtifactAllowlist `json:"databricks_artifact_allowlist,omitempty"` AwsS3Mount map[string]*ResourceAwsS3Mount `json:"databricks_aws_s3_mount,omitempty"` AzureAdlsGen1Mount map[string]*ResourceAzureAdlsGen1Mount `json:"databricks_azure_adls_gen1_mount,omitempty"` AzureAdlsGen2Mount map[string]*ResourceAzureAdlsGen2Mount `json:"databricks_azure_adls_gen2_mount,omitempty"` @@ -87,6 +88,7 @@ type Resources struct { func NewResources() *Resources { return &Resources{ AccessControlRuleSet: make(map[string]*ResourceAccessControlRuleSet), + ArtifactAllowlist: make(map[string]*ResourceArtifactAllowlist), AwsS3Mount: make(map[string]*ResourceAwsS3Mount), AzureAdlsGen1Mount: make(map[string]*ResourceAzureAdlsGen1Mount), AzureAdlsGen2Mount: make(map[string]*ResourceAzureAdlsGen2Mount), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 74f4db1a..3ad8cf4d 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -1,3 +1,4 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. package schema type Providers struct { @@ -24,7 +25,7 @@ func NewRoot() *Root { "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ "source": "databricks/databricks", - "version": "1.28.0", + "version": "1.29.0", }, }, }, From 905fe10e625ceb90137f5c5e1422e5aba41c1c23 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Fri, 27 Oct 2023 12:50:41 +0200 Subject: [PATCH 194/310] `make snapshot` to build file in `.databricks/databricks` (#927) Goreleaser builds binary in 10-15 seconds, but go build does it just in 3-5 seconds. Target is `.databricks` folder in the current checkout, which is already in `.gitignore`. Make sure you have the following $PATH: ``` PATH="/path/to/cli/checkout/.databricks:$PATH" ``` --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3c55b8cf..243a9119 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ build: vendor snapshot: @echo "✓ Building dev snapshot" - @goreleaser build --snapshot --clean --single-target + @go build -o .databricks/databricks vendor: @echo "✓ Filling vendor folder with library code ..." From 5a8cd0c5bc3f12805019b16bcede8d6edd7d5697 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 27 Oct 2023 14:55:43 +0200 Subject: [PATCH 195/310] Persist deployment metadata in WSFS (#845) ## Changes This PR introduces a metadata struct that stores a subset of bundle configuration that we wish to expose to other Databricks services that wish to integrate with bundles. This metadata file is uploaded to a file `${bundle.workspace.state_path}/metadata.json` in the WSFS destination of the bundle deployment. Documentation for emitted metadata fields: * `version`: Version for the metadata file schema * `config.bundle.git.branch`: Name of the git branch the bundle was deployed from. * `config.bundle.git.origin_url`: URL for git remote "origin" * `config.bundle.git.bundle_root_path`: Relative path of the bundle root from the root of the git repository. Is set to "." if they are the same. * `config.bundle.git.commit`: SHA-1 commit hash of the exact commit this bundle was deployed from. Note, the deployment might not exactly match this commit version if there are changes that have not been committed to git at deploy time, * `file_path`: Path in workspace where we sync bundle files to. * `resources.jobs.[job-ref].id`: Id of the job * `resources.jobs.[job-ref].relative_path`: Relative path of the yaml config file from the bundle root where this job was defined. Example metadata object when bundle root and git root are the same: ```json { "version": 1, "config": { "bundle": { "lock": {}, "git": { "branch": "master", "origin_url": "www.host.com", "commit": "7af8e5d3f5dceffff9295d42d21606ccf056dce0", "bundle_root_path": "." } }, "workspace": { "file_path": "/Users/shreyas.goenka@databricks.com/.bundle/pipeline-progress/default/files" }, "resources": { "jobs": { "bar": { "id": "245921165354846", "relative_path": "databricks.yml" } } }, "sync": {} } } ``` Example metadata when the git root is one level above the bundle repo: ```json { "version": 1, "config": { "bundle": { "lock": {}, "git": { "branch": "dev-branch", "origin_url": "www.my-repo.com", "commit": "3db46ef750998952b00a2b3e7991e31787e4b98b", "bundle_root_path": "pipeline-progress" } }, "workspace": { "file_path": "/Users/shreyas.goenka@databricks.com/.bundle/pipeline-progress/default/files" }, "resources": { "jobs": { "bar": { "id": "245921165354846", "relative_path": "databricks.yml" } } }, "sync": {} } } ``` This unblocks integration to the jobs break glass UI for bundles. ## Tests Unit tests and integration tests. --- bundle/bundle.go | 9 ++ bundle/config/bundle.go | 2 +- bundle/config/git.go | 3 + bundle/config/lock.go | 4 +- bundle/config/mutator/load_git_details.go | 13 +++ bundle/config/paths/paths.go | 4 +- bundle/deploy/metadata/compute.go | 51 +++++++++ bundle/deploy/metadata/compute_test.go | 100 +++++++++++++++++ bundle/deploy/metadata/upload.go | 36 ++++++ bundle/metadata/metadata.go | 45 ++++++++ bundle/phases/deploy.go | 8 +- .../databricks_template_schema.json | 2 +- .../databricks_template_schema.json | 16 +++ .../bundles/job_metadata/template/a/b/bar.py | 2 + .../template/a/b/resources.yml.tmpl | 12 ++ .../job_metadata/template/databricks.yml.tmpl | 21 ++++ .../bundles/job_metadata/template/foo.py | 2 + internal/bundle/job_metadata_test.go | 105 ++++++++++++++++++ libs/git/repository.go | 2 +- 19 files changed, 429 insertions(+), 8 deletions(-) create mode 100644 bundle/deploy/metadata/compute.go create mode 100644 bundle/deploy/metadata/compute_test.go create mode 100644 bundle/deploy/metadata/upload.go create mode 100644 bundle/metadata/metadata.go create mode 100644 internal/bundle/bundles/job_metadata/databricks_template_schema.json create mode 100644 internal/bundle/bundles/job_metadata/template/a/b/bar.py create mode 100644 internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl create mode 100644 internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/job_metadata/template/foo.py create mode 100644 internal/bundle/job_metadata_test.go diff --git a/bundle/bundle.go b/bundle/bundle.go index fd9c131f..a2d774bb 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -15,6 +15,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/bundle/metadata" "github.com/databricks/cli/folders" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" @@ -31,6 +32,14 @@ const internalFolder = ".internal" type Bundle struct { Config config.Root + // Metadata about the bundle deployment. This is the interface Databricks services + // rely on to integrate with bundles when they need additional information about + // a bundle deployment. + // + // After deploy, a file containing the metadata (metadata.json) can be found + // in the WSFS location containing the bundle state. + Metadata metadata.Metadata + // Store a pointer to the workspace client. // It can be initialized on demand after loading the configuration. clientOnce sync.Once diff --git a/bundle/config/bundle.go b/bundle/config/bundle.go index d444f507..933e88bf 100644 --- a/bundle/config/bundle.go +++ b/bundle/config/bundle.go @@ -29,7 +29,7 @@ type Bundle struct { Lock Lock `json:"lock" bundle:"readonly"` // Force-override Git branch validation. - Force bool `json:"force" bundle:"readonly"` + Force bool `json:"force,omitempty" bundle:"readonly"` // Contains Git information like current commit, current branch and // origin url. Automatically loaded by reading .git directory if not specified diff --git a/bundle/config/git.go b/bundle/config/git.go index 760134a8..58a5d54d 100644 --- a/bundle/config/git.go +++ b/bundle/config/git.go @@ -5,6 +5,9 @@ type Git struct { OriginURL string `json:"origin_url,omitempty"` Commit string `json:"commit,omitempty" bundle:"readonly"` + // Path to bundle root relative to the git repository root. + BundleRootPath string `json:"bundle_root_path,omitempty" bundle:"readonly"` + // Inferred is set to true if the Git details were inferred and weren't set explicitly Inferred bool `json:"-" bundle:"readonly"` diff --git a/bundle/config/lock.go b/bundle/config/lock.go index 28d5a5ac..760099a9 100644 --- a/bundle/config/lock.go +++ b/bundle/config/lock.go @@ -4,11 +4,11 @@ type Lock struct { // Enabled toggles deployment lock. True by default. // Use a pointer value so that only explicitly configured values are set // and we don't merge configuration with zero-initialized values. - Enabled *bool `json:"enabled"` + Enabled *bool `json:"enabled,omitempty"` // Force acquisition of deployment lock even if it is currently held. // This may be necessary if a prior deployment failed to release the lock. - Force bool `json:"force"` + Force bool `json:"force,omitempty"` } func (lock Lock) IsEnabled() bool { diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index ab47677d..3a50d683 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -2,6 +2,7 @@ package mutator import ( "context" + "path/filepath" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/git" @@ -52,5 +53,17 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error { remoteUrl := repo.OriginUrl() b.Config.Bundle.Git.OriginURL = remoteUrl } + + // Compute relative path of the bundle root from the Git repo root. + absBundlePath, err := filepath.Abs(b.Config.Path) + if err != nil { + return err + } + // repo.Root() returns the absolute path of the repo + relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath) + if err != nil { + return err + } + b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath) return nil } diff --git a/bundle/config/paths/paths.go b/bundle/config/paths/paths.go index c2cbcb7d..2c9ecb8c 100644 --- a/bundle/config/paths/paths.go +++ b/bundle/config/paths/paths.go @@ -6,8 +6,8 @@ import ( ) type Paths struct { - // ConfigFilePath holds the path to the configuration file that - // described the resource that this type is embedded in. + // Absolute path on the local file system to the configuration file that holds + // the definition of this resource. ConfigFilePath string `json:"-" bundle:"readonly"` } diff --git a/bundle/deploy/metadata/compute.go b/bundle/deploy/metadata/compute.go new file mode 100644 index 00000000..9a3ae0e3 --- /dev/null +++ b/bundle/deploy/metadata/compute.go @@ -0,0 +1,51 @@ +package metadata + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/metadata" +) + +type compute struct{} + +func Compute() bundle.Mutator { + return &compute{} +} + +func (m *compute) Name() string { + return "metadata.Compute" +} + +func (m *compute) Apply(_ context.Context, b *bundle.Bundle) error { + b.Metadata = metadata.Metadata{ + Version: metadata.Version, + Config: metadata.Config{}, + } + + // Set git details in metadata + b.Metadata.Config.Bundle.Git = b.Config.Bundle.Git + + // Set job config paths in metadata + jobsMetadata := make(map[string]*metadata.Job) + for name, job := range b.Config.Resources.Jobs { + // Compute config file path the job is defined in, relative to the bundle + // root + relativePath, err := filepath.Rel(b.Config.Path, job.ConfigFilePath) + if err != nil { + return fmt.Errorf("failed to compute relative path for job %s: %w", name, err) + } + // Metadata for the job + jobsMetadata[name] = &metadata.Job{ + ID: job.ID, + RelativePath: filepath.ToSlash(relativePath), + } + } + b.Metadata.Config.Resources.Jobs = jobsMetadata + + // Set file upload destination of the bundle in metadata + b.Metadata.Config.Workspace.FilesPath = b.Config.Workspace.FilesPath + return nil +} diff --git a/bundle/deploy/metadata/compute_test.go b/bundle/deploy/metadata/compute_test.go new file mode 100644 index 00000000..9e4b475c --- /dev/null +++ b/bundle/deploy/metadata/compute_test.go @@ -0,0 +1,100 @@ +package metadata + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/metadata" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestComputeMetadataMutator(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Users/shreyas.goenka@databricks.com", + ArtifactsPath: "/Users/shreyas.goenka@databricks.com/artifacts", + FilesPath: "/Users/shreyas.goenka@databricks.com/files", + }, + Bundle: config.Bundle{ + Name: "my-bundle", + Target: "development", + Git: config.Git{ + Branch: "my-branch", + OriginURL: "www.host.com", + Commit: "abcd", + BundleRootPath: "a/b/c/d", + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my-job-1": { + Paths: paths.Paths{ + ConfigFilePath: "a/b/c", + }, + ID: "1111", + JobSettings: &jobs.JobSettings{ + Name: "My Job One", + }, + }, + "my-job-2": { + Paths: paths.Paths{ + ConfigFilePath: "d/e/f", + }, + ID: "2222", + JobSettings: &jobs.JobSettings{ + Name: "My Job Two", + }, + }, + }, + Pipelines: map[string]*resources.Pipeline{ + "my-pipeline": { + Paths: paths.Paths{ + ConfigFilePath: "abc", + }, + }, + }, + }, + }, + } + + expectedMetadata := metadata.Metadata{ + Version: metadata.Version, + Config: metadata.Config{ + Workspace: metadata.Workspace{ + FilesPath: "/Users/shreyas.goenka@databricks.com/files", + }, + Bundle: metadata.Bundle{ + Git: config.Git{ + Branch: "my-branch", + OriginURL: "www.host.com", + Commit: "abcd", + BundleRootPath: "a/b/c/d", + }, + }, + Resources: metadata.Resources{ + Jobs: map[string]*metadata.Job{ + "my-job-1": { + RelativePath: "a/b/c", + ID: "1111", + }, + "my-job-2": { + RelativePath: "d/e/f", + ID: "2222", + }, + }, + }, + }, + } + + err := Compute().Apply(context.Background(), b) + require.NoError(t, err) + + assert.Equal(t, expectedMetadata, b.Metadata) +} diff --git a/bundle/deploy/metadata/upload.go b/bundle/deploy/metadata/upload.go new file mode 100644 index 00000000..f550a66e --- /dev/null +++ b/bundle/deploy/metadata/upload.go @@ -0,0 +1,36 @@ +package metadata + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/filer" +) + +const MetadataFileName = "metadata.json" + +type upload struct{} + +func Upload() bundle.Mutator { + return &upload{} +} + +func (m *upload) Name() string { + return "metadata.Upload" +} + +func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { + f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) + if err != nil { + return err + } + + metadata, err := json.MarshalIndent(b.Metadata, "", " ") + if err != nil { + return err + } + + return f.Write(ctx, MetadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists) +} diff --git a/bundle/metadata/metadata.go b/bundle/metadata/metadata.go new file mode 100644 index 00000000..27edd584 --- /dev/null +++ b/bundle/metadata/metadata.go @@ -0,0 +1,45 @@ +package metadata + +import ( + "github.com/databricks/cli/bundle/config" +) + +const Version = 1 + +type Bundle struct { + Git config.Git `json:"git,omitempty"` +} + +type Workspace struct { + FilesPath string `json:"file_path,omitempty"` +} + +type Job struct { + ID string `json:"id,omitempty"` + + // Relative path from the bundle root to the configuration file that holds + // the definition of this resource. + RelativePath string `json:"relative_path,omitempty"` +} + +type Resources struct { + Jobs map[string]*Job `json:"jobs,omitempty"` +} + +type Config struct { + Bundle Bundle `json:"bundle,omitempty"` + Workspace Workspace `json:"workspace,omitempty"` + Resources Resources `json:"resources,omitempty"` +} + +// Metadata about the bundle deployment. This is the interface Databricks services +// rely on to integrate with bundles when they need additional information about +// a bundle deployment. +// +// After deploy, a file containing the metadata (metadata.json) can be found +// in the WSFS location containing the bundle state. +type Metadata struct { + Version int `json:"version"` + + Config Config `json:"config"` +} diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 6c75218b..805bae80 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/bundle/deploy/lock" + "github.com/databricks/cli/bundle/deploy/metadata" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/bundle/python" @@ -31,7 +32,12 @@ func Deploy() bundle.Mutator { terraform.StatePull(), bundle.Defer( terraform.Apply(), - terraform.StatePush(), + bundle.Seq( + terraform.StatePush(), + terraform.Load(), + metadata.Compute(), + metadata.Upload(), + ), ), ), lock.Release(lock.GoalDeploy), diff --git a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json b/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json index cfed842c..8fca7a7c 100644 --- a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json +++ b/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json @@ -2,7 +2,7 @@ "properties": { "unique_id": { "type": "string", - "description": "Unique ID for job name" + "description": "Unique ID for pipeline name" } } } diff --git a/internal/bundle/bundles/job_metadata/databricks_template_schema.json b/internal/bundle/bundles/job_metadata/databricks_template_schema.json new file mode 100644 index 00000000..c1c5cf12 --- /dev/null +++ b/internal/bundle/bundles/job_metadata/databricks_template_schema.json @@ -0,0 +1,16 @@ +{ + "properties": { + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" + } + } +} diff --git a/internal/bundle/bundles/job_metadata/template/a/b/bar.py b/internal/bundle/bundles/job_metadata/template/a/b/bar.py new file mode 100644 index 00000000..6f463767 --- /dev/null +++ b/internal/bundle/bundles/job_metadata/template/a/b/bar.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("bye") diff --git a/internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl b/internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl new file mode 100644 index 00000000..bdba05f5 --- /dev/null +++ b/internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl @@ -0,0 +1,12 @@ +resources: + jobs: + bar: + name: test-job-metadata-2-{{.unique_id}} + tasks: + - task_key: my_notebook_task + new_cluster: + num_workers: 1 + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + notebook_task: + notebook_path: "./bar.py" diff --git a/internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl b/internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl new file mode 100644 index 00000000..7aaabadd --- /dev/null +++ b/internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl @@ -0,0 +1,21 @@ +bundle: + name: job-metadata + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + +include: + - "a/b/*.yml" + +resources: + jobs: + foo: + name: test-job-metadata-1-{{.unique_id}} + tasks: + - task_key: my_notebook_task + new_cluster: + num_workers: 1 + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + notebook_task: + notebook_path: "./foo.py" diff --git a/internal/bundle/bundles/job_metadata/template/foo.py b/internal/bundle/bundles/job_metadata/template/foo.py new file mode 100644 index 00000000..4914a743 --- /dev/null +++ b/internal/bundle/bundles/job_metadata/template/foo.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("hello") diff --git a/internal/bundle/job_metadata_test.go b/internal/bundle/job_metadata_test.go new file mode 100644 index 00000000..70962c4c --- /dev/null +++ b/internal/bundle/job_metadata_test.go @@ -0,0 +1,105 @@ +package bundle + +import ( + "context" + "encoding/json" + "fmt" + "io" + "path" + "strconv" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/metadata" + "github.com/databricks/cli/internal" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/databricks-sdk-go" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccJobsMetadataFile(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + nodeTypeId := internal.GetNodeTypeId(env) + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, "job_metadata", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": "13.2.x-snapshot-scala2.12", + }) + require.NoError(t, err) + + // deploy bundle + err = deployBundle(t, bundleRoot) + require.NoError(t, err) + + // Cleanup the deployed bundle + t.Cleanup(func() { + err = destroyBundle(t, bundleRoot) + require.NoError(t, err) + }) + + // assert job 1 is created + jobName := "test-job-metadata-1-" + uniqueId + job1, err := w.Jobs.GetBySettingsName(context.Background(), jobName) + require.NoError(t, err) + assert.Equal(t, job1.Settings.Name, jobName) + + // assert job 2 is created + jobName = "test-job-metadata-2-" + uniqueId + job2, err := w.Jobs.GetBySettingsName(context.Background(), jobName) + require.NoError(t, err) + assert.Equal(t, job2.Settings.Name, jobName) + + // Compute root path for the bundle deployment + me, err := w.CurrentUser.Me(context.Background()) + require.NoError(t, err) + root := fmt.Sprintf("/Users/%s/.bundle/%s", me.UserName, uniqueId) + f, err := filer.NewWorkspaceFilesClient(w, root) + require.NoError(t, err) + + // Read metadata object from the workspace + r, err := f.Read(context.Background(), "state/metadata.json") + require.NoError(t, err) + b, err := io.ReadAll(r) + require.NoError(t, err) + actualMetadata := metadata.Metadata{} + err = json.Unmarshal(b, &actualMetadata) + require.NoError(t, err) + + // expected value for the metadata + expectedMetadata := metadata.Metadata{ + Version: metadata.Version, + Config: metadata.Config{ + Bundle: metadata.Bundle{ + Git: config.Git{ + BundleRootPath: ".", + }, + }, + Workspace: metadata.Workspace{ + FilesPath: path.Join(root, "files"), + }, + Resources: metadata.Resources{ + Jobs: map[string]*metadata.Job{ + "foo": { + ID: strconv.FormatInt(job1.JobId, 10), + RelativePath: "databricks.yml", + }, + "bar": { + ID: strconv.FormatInt(job2.JobId, 10), + RelativePath: "a/b/resources.yml", + }, + }, + }, + }, + } + + // Assert metadata matches what we expected. + assert.Equal(t, expectedMetadata, actualMetadata) +} diff --git a/libs/git/repository.go b/libs/git/repository.go index 9c847687..d1641118 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -40,7 +40,7 @@ type Repository struct { config *config } -// Root returns the repository root. +// Root returns the absolute path to the repository root. func (r *Repository) Root() string { return r.rootPath } From b91fab7d09982b097aa5c09f35bb36efeeb82c05 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 27 Oct 2023 17:43:44 +0200 Subject: [PATCH 196/310] Run make fmt from fmt job (#929) ## Changes I noticed we weren't running `goimports` from our formatting job. If we run `make fmt`, we do. ## Tests The fmt job passes. --- .github/workflows/push.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index d1beea44..617238c2 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -73,11 +73,13 @@ jobs: # No need to download cached dependencies when running gofmt. cache: false - - name: Run gofmt + - name: Install goimports run: | - # -l: list files that were reformatted - # -w: write back formatted files to disk - gofmt -l -w ./ + go install golang.org/x/tools/cmd/goimports@latest + + - name: Run make fmt + run: | + make fmt - name: Run go mod tidy run: | From e408b701ac6be5de3295b93474b19335be885231 Mon Sep 17 00:00:00 2001 From: Taiga Matsumoto Date: Fri, 27 Oct 2023 11:57:26 -0700 Subject: [PATCH 197/310] Add override to support YAML inputs for apps (#921) ## Changes Take @andrefurlan-db 's original [commit](https://github.com/databricks/cli/compare/databricks:6e21ced...andrefurlan-db:12ed10c) to add `apps` support to the CLI and add the yaml file-support as an override (the apps routes are already apart of the Go SDK and are available for use in the CLI) **NOTE: this feature is still private preview. CLI usage will be internal only** ## Tests --- cmd/workspace/apps/overrides.go | 58 +++++++++++++++++++++++++++++++++ libs/flags/yaml_flag.go | 42 ++++++++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 cmd/workspace/apps/overrides.go create mode 100644 libs/flags/yaml_flag.go diff --git a/cmd/workspace/apps/overrides.go b/cmd/workspace/apps/overrides.go new file mode 100644 index 00000000..e38e139b --- /dev/null +++ b/cmd/workspace/apps/overrides.go @@ -0,0 +1,58 @@ +package apps + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/spf13/cobra" +) + +func createOverride(cmd *cobra.Command, deployReq *serving.DeployAppRequest) { + var manifestYaml flags.YamlFlag + var resourcesYaml flags.YamlFlag + createJson := cmd.Flag("json").Value.(*flags.JsonFlag) + + // TODO: short flags + cmd.Flags().Var(&manifestYaml, "manifest", `either inline YAML string or @path/to/manifest.yaml`) + cmd.Flags().Var(&resourcesYaml, "resources", `either inline YAML string or @path/to/resources.yaml`) + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&deployReq) + if err != nil { + return err + } + } else if cmd.Flags().Changed("manifest") { + err = manifestYaml.Unmarshal(&deployReq.Manifest) + if err != nil { + return err + } + if cmd.Flags().Changed("resources") { + err = resourcesYaml.Unmarshal(&deployReq.Resources) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in YAML format by specifying the --manifest flag or provide a json payload using the --json flag") + } + response, err := w.Apps.Create(ctx, *deployReq) + if err != nil { + return err + } + + return cmdio.Render(ctx, response) + } +} + +func init() { + createOverrides = append(createOverrides, createOverride) +} diff --git a/libs/flags/yaml_flag.go b/libs/flags/yaml_flag.go new file mode 100644 index 00000000..95cc9b4b --- /dev/null +++ b/libs/flags/yaml_flag.go @@ -0,0 +1,42 @@ +package flags + +import ( + "fmt" + "os" + + "github.com/ghodss/yaml" +) + +type YamlFlag struct { + raw []byte +} + +func (y *YamlFlag) String() string { + return fmt.Sprintf("YAML (%d bytes)", len(y.raw)) +} + +// TODO: Command.MarkFlagFilename() +func (y *YamlFlag) Set(v string) error { + // Load request from file if it starts with '@' (like curl). + if v[0] != '@' { + y.raw = []byte(v) + return nil + } + buf, err := os.ReadFile(v[1:]) + if err != nil { + return fmt.Errorf("read %s: %w", v, err) + } + y.raw = buf + return nil +} + +func (y *YamlFlag) Unmarshal(v any) error { + if y.raw == nil { + return nil + } + return yaml.Unmarshal(y.raw, v) +} + +func (y *YamlFlag) Type() string { + return "YAML" +} From 82a71ef33ba9b6ea04bec9ad5b1528340327c0f7 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:26:01 +0100 Subject: [PATCH 198/310] Add GitHub issue templates (#925) This PR adds a few github issue templates, allowing customers to give feedback and report issues in a more structured way. --- .github/ISSUE_TEMPLATE/cli-issue.md | 29 ++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 1 + .github/ISSUE_TEMPLATE/dabs-issue.md | 33 ++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/cli-issue.md create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/dabs-issue.md diff --git a/.github/ISSUE_TEMPLATE/cli-issue.md b/.github/ISSUE_TEMPLATE/cli-issue.md new file mode 100644 index 00000000..6a8a75f7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/cli-issue.md @@ -0,0 +1,29 @@ +--- +name: Bug report for the CLI +about: Use this to report an issue with the CLI. +title: '' +labels: CLI +--- + +### Describe the issue +A clear and concise description of what the issue is + +### Steps to reproduce the behavior +Please list the steps required to reproduce the issue, for example: +1. Run `databricks clusters ...` +2. See error + +### Expected Behavior +Clear and concise description of what should have happened + +### Actual Behavior +Clear and concise description of what actually happened + +### OS and CLI version +Please include the version of the CLI (eg: v0.1.2) and the operating system (eg: windows). You can run databricks --version to get the version of your Databricks CLI + +### Is this a regression? +Did this work in a previous version of the CLI? If so, which versions did you try? + +### Debug Logs +Output logs if you run the command with debug logs enabled. Example: databricks clusters list --log-level=debug. Redact if needed diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..0086358d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: true diff --git a/.github/ISSUE_TEMPLATE/dabs-issue.md b/.github/ISSUE_TEMPLATE/dabs-issue.md new file mode 100644 index 00000000..dff82026 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/dabs-issue.md @@ -0,0 +1,33 @@ +--- +name: Bug report for Databricks Asset Bundles +about: Use this to report an issue with Databricks Asset Bundles. +labels: DABs +title: '' +--- + +### Describe the issue +A clear and concise description of what the issue is + +### Configuration +Please provide a minimal reproducible configuration for the issue + +### Steps to reproduce the behavior + Please list the steps required to reproduce the issue, for example: +1. Run `databricks bundle deploy ...` +2. Run `databricks bundle run ...` +3. See error + +### Expected Behavior +Clear and concise description of what should have happened + +### Actual Behavior +Clear and concise description of what actually happened + +### OS and CLI version +Please provide the version of the CLI (eg: v0.1.2) and the operating system (eg: windows). You can run databricks --version to get the version of your Databricks CLI + +### Is this a regression? +Did this work in a previous version of the CLI? If so, which versions did you try? + +### Debug Logs +Output logs if you run the command with debug logs enabled. Example: databricks bundle deploy --log-level=debug. Redact if needed From 1e46b9f88a7dfe96ddc509b014beaa48ba0efef8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:26:06 +0100 Subject: [PATCH 199/310] Bump github.com/google/uuid from 1.3.1 to 1.4.0 (#932) Bumps [github.com/google/uuid](https://github.com/google/uuid) from 1.3.1 to 1.4.0.
Release notes

Sourced from github.com/google/uuid's releases.

v1.4.0

1.4.0 (2023-10-26)

Features

  • UUIDs slice type with Strings() convenience method (#133) (cd5fbbd)

Fixes

  • Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
Changelog

Sourced from github.com/google/uuid's changelog.

1.4.0 (2023-10-26)

Features

  • UUIDs slice type with Strings() convenience method (#133) (cd5fbbd)

Fixes

  • Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
Commits
  • 8de8764 chore(master): release 1.4.0 (#134)
  • 7c22e97 Clarify the documentation of Parse to state its job is to parse, not validate...
  • cd5fbbd feat: UUIDs slice type with Strings() convenience method (#133)
  • 47f5b39 docs: fix a typo in CONTRIBUTING.md (#130)
  • 542ddab chore(tests): add Fuzz tests (#128)
  • 06716f6 chore(tests): Add json.Unmarshal test with empty value cases (#116)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/google/uuid&package-manager=go_modules&previous-version=1.3.1&new-version=1.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b67aafa9..03a728da 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/databricks/databricks-sdk-go v0.24.0 // Apache 2.0 github.com/fatih/color v1.15.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE - github.com/google/uuid v1.3.1 // BSD-3-Clause + github.com/google/uuid v1.4.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 github.com/hashicorp/hc-install v0.6.1 // MPL 2.0 github.com/hashicorp/terraform-exec v0.19.0 // MPL 2.0 diff --git a/go.sum b/go.sum index 7f435f6b..19973e5c 100644 --- a/go.sum +++ b/go.sum @@ -86,8 +86,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= From d70d7445c4f8f873959dd45ae68f8c42617d9353 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 1 Nov 2023 14:02:06 +0100 Subject: [PATCH 200/310] Remove resolution of repo names against the Databricks Github account (#940) ## Changes This functionality is not exercised (and will not be anytime soon). Instead we use a map to have first party aliases for supported templates. https://github.com/databricks/cli/blob/1e46b9f88a7dfe96ddc509b014beaa48ba0efef8/cmd/bundle/init.go#L21 ## Tests Existing tests and manually, bundle init still works. --- internal/git_clone_test.go | 6 +++--- libs/git/clone.go | 17 +---------------- 2 files changed, 4 insertions(+), 19 deletions(-) diff --git a/internal/git_clone_test.go b/internal/git_clone_test.go index 3fb69b92..73c3db10 100644 --- a/internal/git_clone_test.go +++ b/internal/git_clone_test.go @@ -32,14 +32,14 @@ func TestAccGitClone(t *testing.T) { assert.Contains(t, string(b), "ide") } -func TestAccGitCloneWithOnlyRepoNameOnAlternateBranch(t *testing.T) { +func TestAccGitCloneOnNonDefaultBranch(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() ctx := context.Background() var err error - err = git.Clone(ctx, "notebook-best-practices", "dais-2022", tmpDir) + err = git.Clone(ctx, "https://github.com/databricks/notebook-best-practices", "dais-2022", tmpDir) // assert on repo content assert.NoError(t, err) @@ -47,7 +47,7 @@ func TestAccGitCloneWithOnlyRepoNameOnAlternateBranch(t *testing.T) { assert.NoError(t, err) assert.Contains(t, string(b), "Software engineering best practices for Databricks notebooks") - // assert current branch is main, ie default for the repo + // assert current branch is dais-2022 b, err = os.ReadFile(filepath.Join(tmpDir, ".git/HEAD")) assert.NoError(t, err) assert.Contains(t, string(b), "dais-2022") diff --git a/libs/git/clone.go b/libs/git/clone.go index e7d001cd..9369686c 100644 --- a/libs/git/clone.go +++ b/libs/git/clone.go @@ -5,18 +5,11 @@ import ( "errors" "fmt" "os/exec" - "regexp" "strings" "github.com/databricks/cli/libs/process" ) -// source: https://stackoverflow.com/questions/59081778/rules-for-special-characters-in-github-repository-name -var githubRepoRegex = regexp.MustCompile(`^[\w-\.]+$`) - -const githubUrl = "https://github.com" -const databricksOrg = "databricks" - type cloneOptions struct { // Branch or tag to clone Reference string @@ -59,17 +52,9 @@ func (opts cloneOptions) clone(ctx context.Context) error { } func Clone(ctx context.Context, url, reference, targetPath string) error { - // We assume only the repository name has been if input does not contain any - // `/` characters and the url is only made up of alphanumeric characters and - // ".", "_" and "-". This repository is resolved again databricks github account. - fullUrl := url - if githubRepoRegex.MatchString(url) { - fullUrl = strings.Join([]string{githubUrl, databricksOrg, url}, "/") - } - opts := cloneOptions{ Reference: reference, - RepositoryUrl: fullUrl, + RepositoryUrl: url, TargetPath: targetPath, Shallow: true, } From b6aa4631f19f4bae4cfce580a5d89cfa5447ae52 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 2 Nov 2023 12:00:30 +0100 Subject: [PATCH 201/310] Fix metadata computation for empty bundle (#939) ## Changes This PR fixes metadata computation for empty bundle. Before we would error because the `terraform.Load()` mutator errors on a empty / no state file. ## Tests Failing integration tests now pass. --- bundle/deploy/terraform/convert.go | 5 +++++ bundle/deploy/terraform/load.go | 22 ++++++++++++++++------ bundle/deploy/terraform/load_test.go | 2 +- cmd/bundle/run.go | 2 +- 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 3bfc8b83..71385881 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -213,6 +213,11 @@ func BundleToTerraform(config *config.Root) *schema.Root { } func TerraformToBundle(state *tfjson.State, config *config.Root) error { + // This is a no-op if the state is empty. + if state.Values == nil || state.Values.RootModule == nil { + return nil + } + for _, resource := range state.Values.RootModule.Resources { // Limit to resources. if resource.Mode != tfjson.ManagedResourceMode { diff --git a/bundle/deploy/terraform/load.go b/bundle/deploy/terraform/load.go index 9fd68884..624bf7a5 100644 --- a/bundle/deploy/terraform/load.go +++ b/bundle/deploy/terraform/load.go @@ -3,13 +3,20 @@ package terraform import ( "context" "fmt" + "slices" "github.com/databricks/cli/bundle" "github.com/hashicorp/terraform-exec/tfexec" tfjson "github.com/hashicorp/terraform-json" ) -type load struct{} +type loadMode int + +const ErrorOnEmptyState loadMode = 0 + +type load struct { + modes []loadMode +} func (l *load) Name() string { return "terraform.Load" @@ -31,7 +38,7 @@ func (l *load) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - err = ValidateState(state) + err = l.validateState(state) if err != nil { return err } @@ -45,9 +52,12 @@ func (l *load) Apply(ctx context.Context, b *bundle.Bundle) error { return nil } -func ValidateState(state *tfjson.State) error { +func (l *load) validateState(state *tfjson.State) error { if state.Values == nil { - return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?") + if slices.Contains(l.modes, ErrorOnEmptyState) { + return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?") + } + return nil } if state.Values.RootModule == nil { @@ -57,6 +67,6 @@ func ValidateState(state *tfjson.State) error { return nil } -func Load() bundle.Mutator { - return &load{} +func Load(modes ...loadMode) bundle.Mutator { + return &load{modes: modes} } diff --git a/bundle/deploy/terraform/load_test.go b/bundle/deploy/terraform/load_test.go index 1937ca8a..aeaffa14 100644 --- a/bundle/deploy/terraform/load_test.go +++ b/bundle/deploy/terraform/load_test.go @@ -34,7 +34,7 @@ func TestLoadWithNoState(t *testing.T) { err = bundle.Apply(context.Background(), b, bundle.Seq( Initialize(), - Load(), + Load(ErrorOnEmptyState), )) require.ErrorContains(t, err, "Did you forget to run 'databricks bundle deploy'") diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index b5a60ee1..b2766b20 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -38,7 +38,7 @@ func newRunCommand() *cobra.Command { terraform.Interpolate(), terraform.Write(), terraform.StatePull(), - terraform.Load(), + terraform.Load(terraform.ErrorOnEmptyState), )) if err != nil { return err From 51d41d6c44bdd3667fe0e5f886002034c12615d3 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 2 Nov 2023 16:43:41 +0100 Subject: [PATCH 202/310] Release v0.209.0 (#944) CLI: * Added GitHub issue templates for CLI and DABs issues ([#925](https://github.com/databricks/cli/pull/925)). * Added override to support YAML inputs for apps ([#921](https://github.com/databricks/cli/pull/921)). * Simplified code generation logic for handling path and request body parameters and JSON input ([#905](https://github.com/databricks/cli/pull/905)). Bundles: * Fixed URL for bundle template documentation in init command help docs ([#903](https://github.com/databricks/cli/pull/903)). * Fixed pattern validation for input parameters in a bundle template ([#912](https://github.com/databricks/cli/pull/912)). * Fixed multiline description rendering for enum input parameters in bundle templates ([#916](https://github.com/databricks/cli/pull/916)). * Changed production mode check for whether identity used is a service principal to use UserName ([#924](https://github.com/databricks/cli/pull/924)). * Changed bundle deploy to upload partial terraform state even if deployment fails ([#923](https://github.com/databricks/cli/pull/923)). * Added support for welcome messages to bundle templates ([#907](https://github.com/databricks/cli/pull/907)). * Added support for uploading bundle deployment metadata to WSFS ([#845](https://github.com/databricks/cli/pull/845)). Internal: * Loading an empty yaml file yields a nil ([#906](https://github.com/databricks/cli/pull/906)). * Library to convert config.Value to Go struct ([#904](https://github.com/databricks/cli/pull/904)). * Remove default resolution of repo names against the Databricks Github account([#940](https://github.com/databricks/cli/pull/940)). * Run make fmt from fmt job ([#929](https://github.com/databricks/cli/pull/929)). * `make snapshot` to build file in `.databricks/databricks` ([#927](https://github.com/databricks/cli/pull/927)). * Add configuration normalization code ([#915](https://github.com/databricks/cli/pull/915)). API Changes: * Added `databricks apps` command group. * Added `databricks account network-policy` command group. Dependency updates: * Bump Terraform provider from v1.28.0 to v1.29.0 ([#926](https://github.com/databricks/cli/pull/926)). * Bump the Go SDK in the CLI from v0.23 to v0.24 ([#919](https://github.com/databricks/cli/pull/919)). * Bump google.golang.org/grpc from 1.58.2 to 1.58.3 ([#920](https://github.com/databricks/cli/pull/920)). * Bump github.com/google/uuid from 1.3.1 to 1.4.0 ([#932](https://github.com/databricks/cli/pull/932)). OpenAPI commit 5903bb39137fd76ac384b2044e425f9c56840e00 (2023-10-23) --- CHANGELOG.md | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d17743c..d654f760 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,43 @@ # Version changelog +## 0.209.0 + +CLI: + * Added GitHub issue templates for CLI and DABs issues ([#925](https://github.com/databricks/cli/pull/925)). + * Added override to support YAML inputs for apps ([#921](https://github.com/databricks/cli/pull/921)). + * Simplified code generation logic for handling path and request body parameters and JSON input ([#905](https://github.com/databricks/cli/pull/905)). + + +Bundles: + * Fixed URL for bundle template documentation in init command help docs ([#903](https://github.com/databricks/cli/pull/903)). + * Fixed pattern validation for input parameters in a bundle template ([#912](https://github.com/databricks/cli/pull/912)). + * Fixed multiline description rendering for enum input parameters in bundle templates ([#916](https://github.com/databricks/cli/pull/916)). + * Changed production mode check for whether identity used is a service principal to use UserName ([#924](https://github.com/databricks/cli/pull/924)). + * Changed bundle deploy to upload partial terraform state even if deployment fails ([#923](https://github.com/databricks/cli/pull/923)). + * Added support for welcome messages to bundle templates ([#907](https://github.com/databricks/cli/pull/907)). + * Added support for uploading bundle deployment metadata to WSFS ([#845](https://github.com/databricks/cli/pull/845)). + + +Internal: + * Loading an empty yaml file yields a nil ([#906](https://github.com/databricks/cli/pull/906)). + * Library to convert config.Value to Go struct ([#904](https://github.com/databricks/cli/pull/904)). + * Remove default resolution of repo names against the Databricks Github account([#940](https://github.com/databricks/cli/pull/940)). + * Run make fmt from fmt job ([#929](https://github.com/databricks/cli/pull/929)). + * `make snapshot` to build file in `.databricks/databricks` ([#927](https://github.com/databricks/cli/pull/927)). + * Add configuration normalization code ([#915](https://github.com/databricks/cli/pull/915)). + +API Changes: + * Added `databricks apps` command group. + * Added `databricks account network-policy` command group. + +Dependency updates: + * Bump Terraform provider from v1.28.0 to v1.29.0 ([#926](https://github.com/databricks/cli/pull/926)). + * Bump the Go SDK in the CLI from v0.23 to v0.24 ([#919](https://github.com/databricks/cli/pull/919)). + * Bump google.golang.org/grpc from 1.58.2 to 1.58.3 ([#920](https://github.com/databricks/cli/pull/920)). + * Bump github.com/google/uuid from 1.3.1 to 1.4.0 ([#932](https://github.com/databricks/cli/pull/932)). + +OpenAPI commit 5903bb39137fd76ac384b2044e425f9c56840e00 (2023-10-23) + ## 0.208.2 CLI: From 401ae3dc9953cfb8c9bd246b7b90eb9d27e35ce2 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 2 Nov 2023 18:35:17 +0100 Subject: [PATCH 203/310] Remove mention of Lakehouse apps from the changelog (#945) --- CHANGELOG.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d654f760..424fef90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,6 @@ CLI: * Added GitHub issue templates for CLI and DABs issues ([#925](https://github.com/databricks/cli/pull/925)). - * Added override to support YAML inputs for apps ([#921](https://github.com/databricks/cli/pull/921)). * Simplified code generation logic for handling path and request body parameters and JSON input ([#905](https://github.com/databricks/cli/pull/905)). @@ -27,7 +26,6 @@ Internal: * Add configuration normalization code ([#915](https://github.com/databricks/cli/pull/915)). API Changes: - * Added `databricks apps` command group. * Added `databricks account network-policy` command group. Dependency updates: From 8e1156edbdd3bd61cc80703a332616ddb2e5609a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 3 Nov 2023 12:15:47 -0700 Subject: [PATCH 204/310] Function to merge two instances of `config.Value` (#938) ## Changes Semantics for merging two instances of `config.Value`: * Merging x with nil or nil with x always yields x * Merging maps a and b means entries from map b take precedence * Merging sequences a and b means concatenating them These are the same semantics that we use today when calling into mergo in `bundle/config`. ## Tests Unit tests pass. --- libs/config/merge/merge.go | 98 +++++++++++++++ libs/config/merge/merge_test.go | 207 ++++++++++++++++++++++++++++++++ 2 files changed, 305 insertions(+) create mode 100644 libs/config/merge/merge.go create mode 100644 libs/config/merge/merge_test.go diff --git a/libs/config/merge/merge.go b/libs/config/merge/merge.go new file mode 100644 index 00000000..896e2129 --- /dev/null +++ b/libs/config/merge/merge.go @@ -0,0 +1,98 @@ +package merge + +import ( + "fmt" + + "github.com/databricks/cli/libs/config" +) + +// Merge recursively merges the specified values. +// +// Semantics are as follows: +// * Merging x with nil or nil with x always yields x. +// * Merging maps a and b means entries from map b take precedence. +// * Merging sequences a and b means concatenating them. +func Merge(a, b config.Value) (config.Value, error) { + return merge(a, b) +} + +func merge(a, b config.Value) (config.Value, error) { + ak := a.Kind() + bk := b.Kind() + + // If a is nil, return b. + if ak == config.KindNil { + return b, nil + } + + // If b is nil, return a. + if bk == config.KindNil { + return a, nil + } + + // Call the appropriate merge function based on the kind of a and b. + switch ak { + case config.KindMap: + if bk != config.KindMap { + return config.NilValue, fmt.Errorf("cannot merge map with %s", bk) + } + return mergeMap(a, b) + case config.KindSequence: + if bk != config.KindSequence { + return config.NilValue, fmt.Errorf("cannot merge sequence with %s", bk) + } + return mergeSequence(a, b) + default: + if ak != bk { + return config.NilValue, fmt.Errorf("cannot merge %s with %s", ak, bk) + } + return mergePrimitive(a, b) + } +} + +func mergeMap(a, b config.Value) (config.Value, error) { + out := make(map[string]config.Value) + am := a.MustMap() + bm := b.MustMap() + + // Add the values from a into the output map. + for k, v := range am { + out[k] = v + } + + // Merge the values from b into the output map. + for k, v := range bm { + if _, ok := out[k]; ok { + // If the key already exists, merge the values. + merged, err := merge(out[k], v) + if err != nil { + return config.NilValue, err + } + out[k] = merged + } else { + // Otherwise, just set the value. + out[k] = v + } + } + + // Preserve the location of the first value. + return config.NewValue(out, a.Location()), nil +} + +func mergeSequence(a, b config.Value) (config.Value, error) { + as := a.MustSequence() + bs := b.MustSequence() + + // Merging sequences means concatenating them. + out := make([]config.Value, len(as)+len(bs)) + copy(out[:], as) + copy(out[len(as):], bs) + + // Preserve the location of the first value. + return config.NewValue(out, a.Location()), nil +} + +func mergePrimitive(a, b config.Value) (config.Value, error) { + // Merging primitive values means using the incoming value. + return b, nil +} diff --git a/libs/config/merge/merge_test.go b/libs/config/merge/merge_test.go new file mode 100644 index 00000000..c2e89f60 --- /dev/null +++ b/libs/config/merge/merge_test.go @@ -0,0 +1,207 @@ +package merge + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" +) + +func TestMergeMaps(t *testing.T) { + v1 := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + v2 := config.V(map[string]config.Value{ + "bar": config.V("qux"), + "qux": config.V("foo"), + }) + + // Merge v2 into v1. + { + out, err := Merge(v1, v2) + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "foo": "bar", + "bar": "qux", + "qux": "foo", + }, out.AsAny()) + } + + // Merge v1 into v2. + { + out, err := Merge(v2, v1) + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "foo": "bar", + "bar": "baz", + "qux": "foo", + }, out.AsAny()) + } +} + +func TestMergeMapsNil(t *testing.T) { + v := config.V(map[string]config.Value{ + "foo": config.V("bar"), + }) + + // Merge nil into v. + { + out, err := Merge(v, config.NilValue) + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "foo": "bar", + }, out.AsAny()) + } + + // Merge v into nil. + { + out, err := Merge(config.NilValue, v) + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "foo": "bar", + }, out.AsAny()) + } +} + +func TestMergeMapsError(t *testing.T) { + v := config.V(map[string]config.Value{ + "foo": config.V("bar"), + }) + + other := config.V("string") + + // Merge a string into v. + { + out, err := Merge(v, other) + assert.EqualError(t, err, "cannot merge map with string") + assert.Equal(t, config.NilValue, out) + } +} + +func TestMergeSequences(t *testing.T) { + v1 := config.V([]config.Value{ + config.V("bar"), + config.V("baz"), + }) + + v2 := config.V([]config.Value{ + config.V("qux"), + config.V("foo"), + }) + + // Merge v2 into v1. + { + out, err := Merge(v1, v2) + assert.NoError(t, err) + assert.Equal(t, []any{ + "bar", + "baz", + "qux", + "foo", + }, out.AsAny()) + } + + // Merge v1 into v2. + { + out, err := Merge(v2, v1) + assert.NoError(t, err) + assert.Equal(t, []any{ + "qux", + "foo", + "bar", + "baz", + }, out.AsAny()) + } +} + +func TestMergeSequencesNil(t *testing.T) { + v := config.V([]config.Value{ + config.V("bar"), + }) + + // Merge nil into v. + { + out, err := Merge(v, config.NilValue) + assert.NoError(t, err) + assert.Equal(t, []any{ + "bar", + }, out.AsAny()) + } + + // Merge v into nil. + { + out, err := Merge(config.NilValue, v) + assert.NoError(t, err) + assert.Equal(t, []any{ + "bar", + }, out.AsAny()) + } +} + +func TestMergeSequencesError(t *testing.T) { + v := config.V([]config.Value{ + config.V("bar"), + }) + + other := config.V("string") + + // Merge a string into v. + { + out, err := Merge(v, other) + assert.EqualError(t, err, "cannot merge sequence with string") + assert.Equal(t, config.NilValue, out) + } +} + +func TestMergePrimitives(t *testing.T) { + v1 := config.V("bar") + v2 := config.V("baz") + + // Merge v2 into v1. + { + out, err := Merge(v1, v2) + assert.NoError(t, err) + assert.Equal(t, "baz", out.AsAny()) + } + + // Merge v1 into v2. + { + out, err := Merge(v2, v1) + assert.NoError(t, err) + assert.Equal(t, "bar", out.AsAny()) + } +} + +func TestMergePrimitivesNil(t *testing.T) { + v := config.V("bar") + + // Merge nil into v. + { + out, err := Merge(v, config.NilValue) + assert.NoError(t, err) + assert.Equal(t, "bar", out.AsAny()) + } + + // Merge v into nil. + { + out, err := Merge(config.NilValue, v) + assert.NoError(t, err) + assert.Equal(t, "bar", out.AsAny()) + } +} + +func TestMergePrimitivesError(t *testing.T) { + v := config.V("bar") + other := config.V(map[string]config.Value{ + "foo": config.V("bar"), + }) + + // Merge a map into v. + { + out, err := Merge(v, other) + assert.EqualError(t, err, "cannot merge string with map") + assert.Equal(t, config.NilValue, out) + } +} From fb32e78c9b9fb000ce898b8a60b0b47920f487d3 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 6 Nov 2023 16:05:17 +0100 Subject: [PATCH 205/310] Make to/from string methods private to the jsonschema package (#942) ## Changes This PR makes a few methods private, exposing cleaner interfaces to get the string representations for enums and default values of a JSON Schema. ## Tests Manually, template initialization for the `default-python` template still works as expected. --- libs/jsonschema/instance.go | 2 +- libs/jsonschema/schema.go | 16 +++++++++++ libs/jsonschema/utils.go | 10 +++---- libs/jsonschema/utils_test.go | 54 +++++++++++++++++------------------ libs/template/config.go | 10 +++---- 5 files changed, 54 insertions(+), 38 deletions(-) diff --git a/libs/jsonschema/instance.go b/libs/jsonschema/instance.go index 091822da..0b060cff 100644 --- a/libs/jsonschema/instance.go +++ b/libs/jsonschema/instance.go @@ -122,7 +122,7 @@ func (s *Schema) validatePattern(instance map[string]any) error { if !ok { continue } - err := ValidatePatternMatch(k, v, fieldInfo) + err := validatePatternMatch(k, v, fieldInfo) if err != nil { return err } diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index f1a89e7b..57082dc8 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -58,6 +58,22 @@ type Schema struct { Extension } +// Default value defined in a JSON Schema, represented as a string. +func (s *Schema) DefaultString() (string, error) { + return toString(s.Default, s.Type) +} + +// Allowed enum values defined in a JSON Schema, represented as a slice of strings. +func (s *Schema) EnumStringSlice() ([]string, error) { + return toStringSlice(s.Enum, s.Type) +} + +// Parses a string as a Go primitive value. The type of the value is determined +// by the type defined in the JSON Schema. +func (s *Schema) ParseString(v string) (any, error) { + return fromString(v, s.Type) +} + type Type string const ( diff --git a/libs/jsonschema/utils.go b/libs/jsonschema/utils.go index 7bb666c7..9e65ed06 100644 --- a/libs/jsonschema/utils.go +++ b/libs/jsonschema/utils.go @@ -39,7 +39,7 @@ func toInteger(v any) (int64, error) { } } -func ToString(v any, T Type) (string, error) { +func toString(v any, T Type) (string, error) { switch T { case BooleanType: boolVal, ok := v.(bool) @@ -72,10 +72,10 @@ func ToString(v any, T Type) (string, error) { } } -func ToStringSlice(arr []any, T Type) ([]string, error) { +func toStringSlice(arr []any, T Type) ([]string, error) { res := []string{} for _, v := range arr { - s, err := ToString(v, T) + s, err := toString(v, T) if err != nil { return nil, err } @@ -84,7 +84,7 @@ func ToStringSlice(arr []any, T Type) ([]string, error) { return res, nil } -func FromString(s string, T Type) (any, error) { +func fromString(s string, T Type) (any, error) { if T == StringType { return s, nil } @@ -113,7 +113,7 @@ func FromString(s string, T Type) (any, error) { return v, err } -func ValidatePatternMatch(name string, value any, propertySchema *Schema) error { +func validatePatternMatch(name string, value any, propertySchema *Schema) error { if propertySchema.Pattern == "" { // Return early if no pattern is specified return nil diff --git a/libs/jsonschema/utils_test.go b/libs/jsonschema/utils_test.go index 4c43e57d..b036a23f 100644 --- a/libs/jsonschema/utils_test.go +++ b/libs/jsonschema/utils_test.go @@ -49,82 +49,82 @@ func TestTemplateToInteger(t *testing.T) { } func TestTemplateToString(t *testing.T) { - s, err := ToString(true, BooleanType) + s, err := toString(true, BooleanType) assert.NoError(t, err) assert.Equal(t, "true", s) - s, err = ToString("abc", StringType) + s, err = toString("abc", StringType) assert.NoError(t, err) assert.Equal(t, "abc", s) - s, err = ToString(1.1, NumberType) + s, err = toString(1.1, NumberType) assert.NoError(t, err) assert.Equal(t, "1.1", s) - s, err = ToString(2, IntegerType) + s, err = toString(2, IntegerType) assert.NoError(t, err) assert.Equal(t, "2", s) - _, err = ToString([]string{}, ArrayType) + _, err = toString([]string{}, ArrayType) assert.EqualError(t, err, "cannot format object of type array as a string. Value of object: []string{}") - _, err = ToString("true", BooleanType) + _, err = toString("true", BooleanType) assert.EqualError(t, err, "expected bool, got: \"true\"") - _, err = ToString(123, StringType) + _, err = toString(123, StringType) assert.EqualError(t, err, "expected string, got: 123") - _, err = ToString(false, NumberType) + _, err = toString(false, NumberType) assert.EqualError(t, err, "expected float, got: false") - _, err = ToString("abc", IntegerType) + _, err = toString("abc", IntegerType) assert.EqualError(t, err, "cannot convert \"abc\" to an integer") - _, err = ToString("abc", "foobar") + _, err = toString("abc", "foobar") assert.EqualError(t, err, "unknown json schema type: \"foobar\"") } func TestTemplateFromString(t *testing.T) { - v, err := FromString("true", BooleanType) + v, err := fromString("true", BooleanType) assert.NoError(t, err) assert.Equal(t, true, v) - v, err = FromString("abc", StringType) + v, err = fromString("abc", StringType) assert.NoError(t, err) assert.Equal(t, "abc", v) - v, err = FromString("1.1", NumberType) + v, err = fromString("1.1", NumberType) assert.NoError(t, err) // Floating point conversions are not perfect assert.True(t, (v.(float64)-1.1) < 0.000001) - v, err = FromString("12345", IntegerType) + v, err = fromString("12345", IntegerType) assert.NoError(t, err) assert.Equal(t, int64(12345), v) - v, err = FromString("123", NumberType) + v, err = fromString("123", NumberType) assert.NoError(t, err) assert.Equal(t, float64(123), v) - _, err = FromString("qrt", ArrayType) + _, err = fromString("qrt", ArrayType) assert.EqualError(t, err, "cannot parse string as object of type array. Value of string: \"qrt\"") - _, err = FromString("abc", IntegerType) + _, err = fromString("abc", IntegerType) assert.EqualError(t, err, "could not parse \"abc\" as a integer: strconv.ParseInt: parsing \"abc\": invalid syntax") - _, err = FromString("1.0", IntegerType) + _, err = fromString("1.0", IntegerType) assert.EqualError(t, err, "could not parse \"1.0\" as a integer: strconv.ParseInt: parsing \"1.0\": invalid syntax") - _, err = FromString("1.0", "foobar") + _, err = fromString("1.0", "foobar") assert.EqualError(t, err, "unknown json schema type: \"foobar\"") } func TestTemplateToStringSlice(t *testing.T) { - s, err := ToStringSlice([]any{"a", "b", "c"}, StringType) + s, err := toStringSlice([]any{"a", "b", "c"}, StringType) assert.NoError(t, err) assert.Equal(t, []string{"a", "b", "c"}, s) - s, err = ToStringSlice([]any{1.1, 2.2, 3.3}, NumberType) + s, err = toStringSlice([]any{1.1, 2.2, 3.3}, NumberType) assert.NoError(t, err) assert.Equal(t, []string{"1.1", "2.2", "3.3"}, s) } @@ -133,23 +133,23 @@ func TestValidatePropertyPatternMatch(t *testing.T) { var err error // Expect no error if no pattern is specified. - err = ValidatePatternMatch("foo", 1, &Schema{Type: "integer"}) + err = validatePatternMatch("foo", 1, &Schema{Type: "integer"}) assert.NoError(t, err) // Expect error because value is not a string. - err = ValidatePatternMatch("bar", 1, &Schema{Type: "integer", Pattern: "abc"}) + err = validatePatternMatch("bar", 1, &Schema{Type: "integer", Pattern: "abc"}) assert.EqualError(t, err, "invalid value for bar: 1. Expected a value of type string") // Expect error because the pattern is invalid. - err = ValidatePatternMatch("bar", "xyz", &Schema{Type: "string", Pattern: "(abc"}) + err = validatePatternMatch("bar", "xyz", &Schema{Type: "string", Pattern: "(abc"}) assert.EqualError(t, err, "error parsing regexp: missing closing ): `(abc`") // Expect no error because the pattern matches. - err = ValidatePatternMatch("bar", "axyzd", &Schema{Type: "string", Pattern: "(a*.d)"}) + err = validatePatternMatch("bar", "axyzd", &Schema{Type: "string", Pattern: "(a*.d)"}) assert.NoError(t, err) // Expect custom error message on match fail - err = ValidatePatternMatch("bar", "axyze", &Schema{ + err = validatePatternMatch("bar", "axyze", &Schema{ Type: "string", Pattern: "(a*.d)", Extension: Extension{ @@ -159,7 +159,7 @@ func TestValidatePropertyPatternMatch(t *testing.T) { assert.EqualError(t, err, "invalid value for bar: \"axyze\". my custom msg") // Expect generic message on match fail - err = ValidatePatternMatch("bar", "axyze", &Schema{ + err = validatePatternMatch("bar", "axyze", &Schema{ Type: "string", Pattern: "(a*.d)", }) diff --git a/libs/template/config.go b/libs/template/config.go index 8ace307b..58b671fb 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -75,7 +75,7 @@ func (c *config) assignDefaultValues(r *renderer) error { if property.Default == nil { continue } - defaultVal, err := jsonschema.ToString(property.Default, property.Type) + defaultVal, err := property.DefaultString() if err != nil { return err } @@ -83,7 +83,7 @@ func (c *config) assignDefaultValues(r *renderer) error { if err != nil { return err } - defaultValTyped, err := jsonschema.FromString(defaultVal, property.Type) + defaultValTyped, err := property.ParseString(defaultVal) if err != nil { return err } @@ -107,7 +107,7 @@ func (c *config) promptForValues(r *renderer) error { var defaultVal string var err error if property.Default != nil { - defaultValRaw, err := jsonschema.ToString(property.Default, property.Type) + defaultValRaw, err := property.DefaultString() if err != nil { return err } @@ -126,7 +126,7 @@ func (c *config) promptForValues(r *renderer) error { var userInput string if property.Enum != nil { // convert list of enums to string slice - enums, err := jsonschema.ToStringSlice(property.Enum, property.Type) + enums, err := property.EnumStringSlice() if err != nil { return err } @@ -142,7 +142,7 @@ func (c *config) promptForValues(r *renderer) error { } // Convert user input string back to a value - c.values[name], err = jsonschema.FromString(userInput, property.Type) + c.values[name], err = property.ParseString(userInput) if err != nil { return err } From 9eec6da29bae122e763233e88fe0917d78519aca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:20:01 +0100 Subject: [PATCH 206/310] Bump golang.org/x/mod from 0.13.0 to 0.14.0 (#954) Bumps [golang.org/x/mod](https://github.com/golang/mod) from 0.13.0 to 0.14.0.
Commits
  • 6e58e47 modfile: improve directory path detection and error text consistency
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/mod&package-manager=go_modules&previous-version=0.13.0&new-version=0.14.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 03a728da..fd433c9b 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/stretchr/testify v1.8.4 // MIT github.com/whilp/git-urls v1.0.0 // MIT golang.org/x/exp v0.0.0-20231006140011-7918f672742d - golang.org/x/mod v0.13.0 + golang.org/x/mod v0.14.0 golang.org/x/oauth2 v0.13.0 golang.org/x/sync v0.4.0 golang.org/x/term v0.13.0 diff --git a/go.sum b/go.sum index 19973e5c..eb0a0242 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From b5a438ea6269fb8707b5e91e761602350705f8cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:20:12 +0100 Subject: [PATCH 207/310] Bump golang.org/x/text from 0.13.0 to 0.14.0 (#953) Bumps [golang.org/x/text](https://github.com/golang/text) from 0.13.0 to 0.14.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/text&package-manager=go_modules&previous-version=0.13.0&new-version=0.14.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fd433c9b..f7a331ec 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( golang.org/x/oauth2 v0.13.0 golang.org/x/sync v0.4.0 golang.org/x/term v0.13.0 - golang.org/x/text v0.13.0 + golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) diff --git a/go.sum b/go.sum index eb0a0242..e2477d5e 100644 --- a/go.sum +++ b/go.sum @@ -232,8 +232,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 83820a3a67384bbe93f983002845b64ab490db77 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:20:32 +0100 Subject: [PATCH 208/310] Bump golang.org/x/sync from 0.4.0 to 0.5.0 (#951) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.4.0 to 0.5.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/sync&package-manager=go_modules&previous-version=0.4.0&new-version=0.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f7a331ec..967234a8 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/mod v0.14.0 golang.org/x/oauth2 v0.13.0 - golang.org/x/sync v0.4.0 + golang.org/x/sync v0.5.0 golang.org/x/term v0.13.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 diff --git a/go.sum b/go.sum index e2477d5e..1c9466bb 100644 --- a/go.sum +++ b/go.sum @@ -197,8 +197,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 4b18f117d308caebbfc1c9e1879fce8a2d741e8c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:20:51 +0100 Subject: [PATCH 209/310] Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 (#950) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.7.0 to 1.8.0.
Release notes

Sourced from github.com/spf13/cobra's releases.

v1.8.0

✨ Features

🐛 Bug fixes

🔧 Maintenance

🧪 Testing & CI/CD

✏️ Documentation


Thank you everyone who contributed to this release and all your hard work! Cobra and this community would never be possible without all of you!!!! 🐍

Full Changelog: https://github.com/spf13/cobra/compare/v1.7.0...v1.8.0

Commits
  • a0a6ae0 Improve API to get flag completion function (#2063)
  • 890302a Support usage as plugin for tools like kubectl (#2018)
  • 48cea5c build(deps): bump actions/checkout from 3 to 4 (#2028)
  • 22953d8 Replace all non-alphanumerics in active help env var program prefix (#1940)
  • 00b68a1 Add tests for flag completion registration (#2053)
  • b711e87 Don't complete --help flag when flag parsing disabled (#2061)
  • 8b1eba4 Fix linter errors (#2052)
  • 4cafa37 Allow running persistent run hooks of all parents (#2044)
  • 5c962a2 build(deps): bump github.com/cpuguy83/go-md2man/v2 from 2.0.2 to 2.0.3 (#2047)
  • efe8fa3 build(deps): bump actions/setup-go from 3 to 4 (#1934)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/spf13/cobra&package-manager=go_modules&previous-version=1.7.0&new-version=1.8.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 967234a8..4b9a6614 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/nwidger/jsoncolor v0.3.2 // MIT github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // BSD-2-Clause github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // MIT - github.com/spf13/cobra v1.7.0 // Apache 2.0 + github.com/spf13/cobra v1.8.0 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.8.4 // MIT github.com/whilp/git-urls v1.0.0 // MIT diff --git a/go.sum b/go.sum index 1c9466bb..a230ea05 100644 --- a/go.sum +++ b/go.sum @@ -28,7 +28,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/databricks/databricks-sdk-go v0.24.0 h1:fx34MOGYXVc72QBSFnKuDa/H3ekDMqZYH4jKZF8mrXk= @@ -136,8 +136,8 @@ github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= From 677f28e2fb5ef67321bb2605e581770d435f0761 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:02:55 +0100 Subject: [PATCH 210/310] Bump github.com/fatih/color from 1.15.0 to 1.16.0 (#952) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/fatih/color](https://github.com/fatih/color) from 1.15.0 to 1.16.0.
Release notes

Sourced from github.com/fatih/color's releases.

v1.16.0

What's Changed

Dependency updates

New Contributors

Full Changelog: https://github.com/fatih/color/compare/v1.15.0...v1.16.0

Commits
  • 0f9779e Merge pull request #213 from fatih/dependabot/go_modules/golang.org/x/sys-0.14.0
  • 0c78604 Bump golang.org/x/sys from 0.13.0 to 0.14.0
  • 96e0f73 Merge pull request #208 from fatih/dependabot/github_actions/actions/checkout-4
  • 4c66e32 Bump actions/checkout from 3 to 4
  • 2fb03d6 Merge pull request #202 from fatih/dependabot/github_actions/actions/setup-go-4
  • 8ba7bbd Bump actions/setup-go from 3 to 4
  • e3f97f3 Merge pull request #209 from fatih/dependabot/go_modules/golang.org/x/sys-0.13.0
  • e146575 Bump golang.org/x/sys from 0.10.0 to 0.13.0
  • d2d7a5e Merge pull request #212 from fatih/dependabot/go_modules/github.com/mattn/go-...
  • f3e45da Merge branch 'main' into dependabot/go_modules/github.com/mattn/go-isatty-0.0.20
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/fatih/color&package-manager=go_modules&previous-version=1.15.0&new-version=1.16.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 4b9a6614..e52bb383 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 github.com/databricks/databricks-sdk-go v0.24.0 // Apache 2.0 - github.com/fatih/color v1.15.0 // MIT + github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.4.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 @@ -54,7 +54,7 @@ require ( go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect + golang.org/x/sys v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/api v0.148.0 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index a230ea05..665b0dea 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -216,8 +216,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= From 283f24179dc7bf7053d45541c1bfdde75b3dad4f Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:35:59 +0100 Subject: [PATCH 211/310] Remove validation for default value against pattern (#959) ## Changes This PR removes validation for default value against the regex pattern specified in a JSON schema at schema load time. This is required because https://github.com/databricks/cli/pull/795 introduces parameterising the default value as a Go text template impling that the default value now does not necessarily have to match the pattern at schema load time. This will also unblock: https://github.com/databricks/mlops-stacks/pull/108 Note, this does not remove runtime validation for input parameters right before template initialization, which happens here: https://github.com/databricks/cli/blob/fb32e78c9b9fb000ce898b8a60b0b47920f487d3/libs/template/materialize.go#L76 ## Tests Changes to existing test. --- libs/jsonschema/schema.go | 5 ----- libs/jsonschema/schema_test.go | 13 +------------ 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index 57082dc8..83213791 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -168,11 +168,6 @@ func (schema *Schema) validateSchemaPattern() error { return fmt.Errorf("invalid regex pattern %q provided for property %q: %w", pattern, name, err) } - // validate default value against the pattern - if property.Default != nil && !r.MatchString(property.Default.(string)) { - return fmt.Errorf("default value %q for property %q does not match specified regex pattern: %q", property.Default, name, pattern) - } - // validate enum values against the pattern for i, enum := range property.Enum { if !r.MatchString(enum.(string)) { diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go index 8826a32b..a750f44a 100644 --- a/libs/jsonschema/schema_test.go +++ b/libs/jsonschema/schema_test.go @@ -175,7 +175,7 @@ func TestSchemaValidateIncorrectRegex(t *testing.T) { assert.EqualError(t, s.validate(), "invalid regex pattern \"(abc\" provided for property \"foo\": error parsing regexp: missing closing ): `(abc`") } -func TestSchemaValidatePatternDefault(t *testing.T) { +func TestSchemaDefaultValueIsNotValidatedAgainstPattern(t *testing.T) { s := &Schema{ Properties: map[string]*Schema{ "foo": { @@ -185,17 +185,6 @@ func TestSchemaValidatePatternDefault(t *testing.T) { }, }, } - assert.EqualError(t, s.validate(), "default value \"def\" for property \"foo\" does not match specified regex pattern: \"abc\"") - - s = &Schema{ - Properties: map[string]*Schema{ - "foo": { - Type: "string", - Pattern: "a.*d", - Default: "axyzd", - }, - }, - } assert.NoError(t, s.validate()) } From 56bcb6f8335d8c94bf788ed281ac2b783260374f Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Tue, 7 Nov 2023 20:06:27 +0100 Subject: [PATCH 212/310] Make Cobra runner compatible with testing interactive flows (#957) ## Changes This PR enables testing commands with stdin ## Tests https://github.com/databricks/cli/pull/914 --- internal/helpers.go | 50 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/internal/helpers.go b/internal/helpers.go index 5a7e59e8..22e38e21 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "context" + "encoding/json" "fmt" "io" "math/rand" @@ -17,6 +18,7 @@ import ( "github.com/databricks/cli/cmd" _ "github.com/databricks/cli/cmd/version" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/compute" @@ -63,6 +65,8 @@ type cobraTestRunner struct { args []string stdout bytes.Buffer stderr bytes.Buffer + stdinR *io.PipeReader + stdinW *io.PipeWriter ctx context.Context @@ -119,15 +123,46 @@ func (t *cobraTestRunner) registerFlagCleanup(c *cobra.Command) { }) } +// Like [cobraTestRunner.Eventually], but more specific +func (t *cobraTestRunner) WaitForTextPrinted(text string, timeout time.Duration) { + t.Eventually(func() bool { + currentStdout := t.stdout.String() + return strings.Contains(currentStdout, text) + }, timeout, 50*time.Millisecond) +} + +func (t *cobraTestRunner) WithStdin() { + reader, writer := io.Pipe() + t.stdinR = reader + t.stdinW = writer +} + +func (t *cobraTestRunner) CloseStdin() { + if t.stdinW == nil { + panic("no standard input configured") + } + t.stdinW.Close() +} + +func (t *cobraTestRunner) SendText(text string) { + if t.stdinW == nil { + panic("no standard input configured") + } + t.stdinW.Write([]byte(text + "\n")) +} + func (t *cobraTestRunner) RunBackground() { var stdoutR, stderrR io.Reader var stdoutW, stderrW io.WriteCloser stdoutR, stdoutW = io.Pipe() stderrR, stderrW = io.Pipe() - root := cmd.New(context.Background()) + root := cmd.New(t.ctx) root.SetOut(stdoutW) root.SetErr(stderrW) root.SetArgs(t.args) + if t.stdinW != nil { + root.SetIn(t.stdinR) + } // Register cleanup function to restore flags to their original values // once test has been executed. This is needed because flag values reside @@ -239,6 +274,19 @@ func (c *cobraTestRunner) Eventually(condition func() bool, waitFor time.Duratio } } +func (t *cobraTestRunner) RunAndExpectOutput(heredoc string) { + stdout, _, err := t.Run() + require.NoError(t, err) + require.Equal(t, cmdio.Heredoc(heredoc), strings.TrimSpace(stdout.String())) +} + +func (t *cobraTestRunner) RunAndParseJSON(v any) { + stdout, _, err := t.Run() + require.NoError(t, err) + err = json.Unmarshal(stdout.Bytes(), &v) + require.NoError(t, err) +} + func NewCobraTestRunner(t *testing.T, args ...string) *cobraTestRunner { return &cobraTestRunner{ T: t, From 10291b0e13fd8734b86f9a8668fc66f13ac066cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Szafra=C5=84ski?= Date: Tue, 7 Nov 2023 21:00:09 +0100 Subject: [PATCH 213/310] Bundle path rewrites for dbt and SQL file tasks (#962) ## Changes Support path rewrites for Dbt and SQL file job taks. ## Tests * Added unit test --- bundle/config/mutator/translate_paths.go | 11 +++++++ bundle/config/mutator/translate_paths_jobs.go | 30 +++++++++++++++++++ bundle/config/mutator/translate_paths_test.go | 24 +++++++++++++++ 3 files changed, 65 insertions(+) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index acfd5525..8d3c8ce3 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -135,6 +135,17 @@ func translateFilePath(literal, localFullPath, localRelPath, remotePath string) return remotePath, nil } +func translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + info, err := os.Stat(localFullPath) + if err != nil { + return "", err + } + if !info.IsDir() { + return "", fmt.Errorf("%s is not a directory", localFullPath) + } + return remotePath, nil +} + func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) { return localRelPath, nil } diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index b94df5e2..564b8e02 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -50,6 +50,34 @@ func transformWhlLibrary(resource any, dir string) *transformer { } } +func transformDbtTask(resource any, dir string) *transformer { + task, ok := resource.(*jobs.Task) + if !ok || task.DbtTask == nil { + return nil + } + + return &transformer{ + dir, + &task.DbtTask.ProjectDirectory, + "tasks.dbt_task.project_directory", + translateDirectoryPath, + } +} + +func transformSqlFileTask(resource any, dir string) *transformer { + task, ok := resource.(*jobs.Task) + if !ok || task.SqlTask == nil || task.SqlTask.File == nil { + return nil + } + + return &transformer{ + dir, + &task.SqlTask.File.Path, + "tasks.sql_task.file.path", + translateFilePath, + } +} + func transformJarLibrary(resource any, dir string) *transformer { library, ok := resource.(*compute.Library) if !ok || library.Jar == "" { @@ -70,6 +98,8 @@ func applyJobTransformers(m *translatePaths, b *bundle.Bundle) error { transformSparkTask, transformWhlLibrary, transformJarLibrary, + transformDbtTask, + transformSqlFileTask, } for key, job := range b.Config.Resources.Jobs { diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index f7edee30..c24fd2e7 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -275,6 +275,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "job", "my_python_file.py")) touchEmptyFile(t, filepath.Join(dir, "job", "dist", "task.jar")) touchEmptyFile(t, filepath.Join(dir, "pipeline", "my_python_file.py")) + touchEmptyFile(t, filepath.Join(dir, "job", "my_sql_file.sql")) + touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml")) bundle := &bundle.Bundle{ Config: config.Root{ @@ -303,6 +305,18 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { {Jar: "./dist/task.jar"}, }, }, + { + SqlTask: &jobs.SqlTask{ + File: &jobs.SqlTaskFile{ + Path: "./my_sql_file.sql", + }, + }, + }, + { + DbtTask: &jobs.DbtTask{ + ProjectDirectory: "./my_dbt_project", + }, + }, }, }, }, @@ -341,6 +355,16 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { "/bundle/job/dist/task.jar", bundle.Config.Resources.Jobs["job"].Tasks[1].Libraries[0].Jar, ) + assert.Equal( + t, + "/bundle/job/my_sql_file.sql", + bundle.Config.Resources.Jobs["job"].Tasks[2].SqlTask.File.Path, + ) + assert.Equal( + t, + "/bundle/job/my_dbt_project", + bundle.Config.Resources.Jobs["job"].Tasks[3].DbtTask.ProjectDirectory, + ) assert.Equal( t, From f07832746b6d4803a3dd15df981911b7af3a717f Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 8 Nov 2023 08:33:40 +0100 Subject: [PATCH 214/310] Make configure command visible + fix bundle command description (#961) ## Changes Fixes #936 #937 --- cmd/bundle/bundle.go | 3 ++- cmd/configure/configure.go | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/bundle/bundle.go b/cmd/bundle/bundle.go index d8382d17..128c8302 100644 --- a/cmd/bundle/bundle.go +++ b/cmd/bundle/bundle.go @@ -7,7 +7,8 @@ import ( func New() *cobra.Command { cmd := &cobra.Command{ Use: "bundle", - Short: "Databricks Asset Bundles\n\nOnline documentation: https://docs.databricks.com/en/dev-tools/bundles", + Short: "Databricks Asset Bundles", + Long: "Databricks Asset Bundles\n\nOnline documentation: https://docs.databricks.com/en/dev-tools/bundles", } initVariableFlag(cmd) diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index 0c1e4052..33ab918e 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -123,7 +123,6 @@ func newConfigureCommand() *cobra.Command { If this command is invoked in non-interactive mode, it will read the token from stdin. The host must be specified with the --host flag. `, - Hidden: true, } cmd.Flags().String("host", "", "Databricks workspace host.") From 7509e4d55afb64c11c6522a7280bdf268f78b3bd Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Wed, 8 Nov 2023 09:29:22 +0100 Subject: [PATCH 215/310] Hide `--progress-format` global flag (#965) ## Changes At the moment, these flags are mostly used for VSCode integration for bundles, but they're not effective for the majority of commands. ## Tests image --- cmd/root/progress_logger.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/root/progress_logger.go b/cmd/root/progress_logger.go index 328b9947..c05ecb04 100644 --- a/cmd/root/progress_logger.go +++ b/cmd/root/progress_logger.go @@ -56,7 +56,9 @@ func initProgressLoggerFlag(cmd *cobra.Command, logFlags *logFlags) *progressLog f.Set(v) } - cmd.PersistentFlags().Var(&f.ProgressLogFormat, "progress-format", "format for progress logs (append, inplace, json)") + flags := cmd.PersistentFlags() + flags.Var(&f.ProgressLogFormat, "progress-format", "format for progress logs (append, inplace, json)") + flags.MarkHidden("progress-format") cmd.RegisterFlagCompletionFunc("progress-format", f.ProgressLogFormat.Complete) return &f } From 65dd9c5c0f8fa04f1ad9f7676718eb44b74da3d3 Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Wed, 8 Nov 2023 09:51:01 +0100 Subject: [PATCH 216/310] Log process ID in each log entry (#949) ## Changes This will help differentiate multiple cli commands that write to the same log file. Noticed that the root module wasn't using the common log utilities, refactored it to avoid missing log arguments. Relevant PR on the databricks vscode extension side: https://github.com/databricks/databricks-vscode/pull/923 ## Tests Tested manually for sdk and cli loggers --- cmd/root/logger.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/root/logger.go b/cmd/root/logger.go index dca07ca4..0ad6756a 100644 --- a/cmd/root/logger.go +++ b/cmd/root/logger.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "log/slog" + "os" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/env" @@ -113,7 +114,7 @@ func (f *logFlags) initializeContext(ctx context.Context) (context.Context, erro return nil, err } - slog.SetDefault(slog.New(handler)) + slog.SetDefault(slog.New(handler).With(slog.Int("pid", os.Getpid()))) return log.NewContext(ctx, slog.Default()), nil } From 7847388f95b5bd6843ba13c8e6518fffd2e88c72 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 8 Nov 2023 12:01:14 +0100 Subject: [PATCH 217/310] Initialize variable definitions that are defined without properties (#966) ## Changes We can debate whether or not variable definitions without properties are valid, but in no case should this panic the CLI. Fixes #934. ## Tests Unit. --- bundle/config/mutator/initialize_variables.go | 30 +++++++++++++ .../mutator/initialize_variables_test.go | 42 +++++++++++++++++++ bundle/config/mutator/mutator.go | 1 + .../without_definition/databricks.yml | 3 ++ bundle/tests/variables_test.go | 12 ++++++ 5 files changed, 88 insertions(+) create mode 100644 bundle/config/mutator/initialize_variables.go create mode 100644 bundle/config/mutator/initialize_variables_test.go create mode 100644 bundle/tests/variables/without_definition/databricks.yml diff --git a/bundle/config/mutator/initialize_variables.go b/bundle/config/mutator/initialize_variables.go new file mode 100644 index 00000000..8e50b4d0 --- /dev/null +++ b/bundle/config/mutator/initialize_variables.go @@ -0,0 +1,30 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/variable" +) + +type initializeVariables struct{} + +// InitializeVariables initializes nil variables to their corresponding zero values. +func InitializeVariables() bundle.Mutator { + return &initializeVariables{} +} + +func (m *initializeVariables) Name() string { + return "InitializeVariables" +} + +func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) error { + vars := b.Config.Variables + for k, v := range vars { + if v == nil { + vars[k] = &variable.Variable{} + } + } + + return nil +} diff --git a/bundle/config/mutator/initialize_variables_test.go b/bundle/config/mutator/initialize_variables_test.go new file mode 100644 index 00000000..46445591 --- /dev/null +++ b/bundle/config/mutator/initialize_variables_test.go @@ -0,0 +1,42 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/variable" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInitializeVariables(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Variables: map[string]*variable.Variable{ + "foo": nil, + "bar": { + Description: "This is a description", + }, + }, + }, + } + err := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) + require.NoError(t, err) + assert.NotNil(t, b.Config.Variables["foo"]) + assert.NotNil(t, b.Config.Variables["bar"]) + assert.Equal(t, "This is a description", b.Config.Variables["bar"].Description) +} + +func TestInitializeVariablesWithoutVariables(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Variables: nil, + }, + } + err := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) + require.NoError(t, err) + assert.Nil(t, b.Config.Variables) +} diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index aa762e8e..b6327e85 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -10,6 +10,7 @@ func DefaultMutators() []bundle.Mutator { return []bundle.Mutator{ scripts.Execute(config.ScriptPreInit), ProcessRootIncludes(), + InitializeVariables(), DefineDefaultTarget(), LoadGitDetails(), } diff --git a/bundle/tests/variables/without_definition/databricks.yml b/bundle/tests/variables/without_definition/databricks.yml new file mode 100644 index 00000000..68227b68 --- /dev/null +++ b/bundle/tests/variables/without_definition/databricks.yml @@ -0,0 +1,3 @@ +variables: + a: + b: diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index 93c82250..86706ebd 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -92,3 +92,15 @@ func TestVariablesTargetsBlockOverrideWithUndefinedVariables(t *testing.T) { ))) assert.ErrorContains(t, err, "variable c is not defined but is assigned a value") } + +func TestVariablesWithoutDefinition(t *testing.T) { + t.Setenv("BUNDLE_VAR_a", "foo") + t.Setenv("BUNDLE_VAR_b", "bar") + b := load(t, "./variables/without_definition") + err := bundle.Apply(context.Background(), b, mutator.SetVariables()) + require.NoError(t, err) + require.True(t, b.Config.Variables["a"].HasValue()) + require.True(t, b.Config.Variables["b"].HasValue()) + assert.Equal(t, "foo", *b.Config.Variables["a"].Value) + assert.Equal(t, "bar", *b.Config.Variables["b"].Value) +} From e68a88e14d7fdcafb6d240311b164155c2c5d3c9 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Wed, 8 Nov 2023 15:50:20 +0100 Subject: [PATCH 218/310] Added `env.UserHomeDir(ctx)` for parallel-friendly tests (#955) ## Changes `os.Getenv(..)` is not friendly with `libs/env`. This PR makes the relevant changes to places where we need to read user home directory. ## Tests Mainly done in https://github.com/databricks/cli/pull/914 --- cmd/auth/env.go | 7 +-- cmd/auth/login.go | 2 +- cmd/auth/profiles.go | 2 +- cmd/root/auth.go | 30 ++++------- libs/databrickscfg/profiles.go | 43 +++++++++------- libs/databrickscfg/profiles_test.go | 51 ++++++++++++++----- .../testdata/sample-home/.databrickscfg | 7 +++ libs/env/context.go | 21 ++++++++ libs/env/context_test.go | 7 +++ libs/env/loader.go | 50 ++++++++++++++++++ libs/env/loader_test.go | 26 ++++++++++ 11 files changed, 190 insertions(+), 56 deletions(-) create mode 100644 libs/databrickscfg/testdata/sample-home/.databrickscfg create mode 100644 libs/env/loader.go create mode 100644 libs/env/loader_test.go diff --git a/cmd/auth/env.go b/cmd/auth/env.go index 241d5f88..04aef36a 100644 --- a/cmd/auth/env.go +++ b/cmd/auth/env.go @@ -1,6 +1,7 @@ package auth import ( + "context" "encoding/json" "errors" "fmt" @@ -68,8 +69,8 @@ func resolveSection(cfg *config.Config, iniFile *config.File) (*ini.Section, err return candidates[0], nil } -func loadFromDatabricksCfg(cfg *config.Config) error { - iniFile, err := databrickscfg.Get() +func loadFromDatabricksCfg(ctx context.Context, cfg *config.Config) error { + iniFile, err := databrickscfg.Get(ctx) if errors.Is(err, fs.ErrNotExist) { // it's fine not to have ~/.databrickscfg return nil @@ -110,7 +111,7 @@ func newEnvCommand() *cobra.Command { cfg.Profile = profile } else if cfg.Host == "" { cfg.Profile = "DEFAULT" - } else if err := loadFromDatabricksCfg(cfg); err != nil { + } else if err := loadFromDatabricksCfg(cmd.Context(), cfg); err != nil { return err } // Go SDK is lazy loaded because of Terraform semantics, diff --git a/cmd/auth/login.go b/cmd/auth/login.go index 3a3f3a6d..c2b821b6 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -128,7 +128,7 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { func setHost(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { // If the chosen profile has a hostname and the user hasn't specified a host, infer the host from the profile. - _, profiles, err := databrickscfg.LoadProfiles(func(p databrickscfg.Profile) bool { + _, profiles, err := databrickscfg.LoadProfiles(ctx, func(p databrickscfg.Profile) bool { return p.Name == profileName }) if err != nil { diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 97d8eeab..51ae9b18 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -95,7 +95,7 @@ func newProfilesCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { var profiles []*profileMetadata - iniFile, err := databrickscfg.Get() + iniFile, err := databrickscfg.Get(cmd.Context()) if os.IsNotExist(err) { // return empty list for non-configured machines iniFile = &config.File{ diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 81c71479..350cbc65 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "net/http" - "os" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" @@ -55,7 +54,7 @@ func accountClientOrPrompt(ctx context.Context, cfg *config.Config, allowPrompt } // Try picking a profile dynamically if the current configuration is not valid. - profile, err := askForAccountProfile(ctx) + profile, err := AskForAccountProfile(ctx) if err != nil { return nil, err } @@ -83,7 +82,7 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { // 1. only admins will have account configured // 2. 99% of admins will have access to just one account // hence, we don't need to create a special "DEFAULT_ACCOUNT" profile yet - _, profiles, err := databrickscfg.LoadProfiles(databrickscfg.MatchAccountProfiles) + _, profiles, err := databrickscfg.LoadProfiles(cmd.Context(), databrickscfg.MatchAccountProfiles) if err != nil { return err } @@ -123,7 +122,7 @@ func workspaceClientOrPrompt(ctx context.Context, cfg *config.Config, allowPromp } // Try picking a profile dynamically if the current configuration is not valid. - profile, err := askForWorkspaceProfile(ctx) + profile, err := AskForWorkspaceProfile(ctx) if err != nil { return nil, err } @@ -173,21 +172,14 @@ func SetWorkspaceClient(ctx context.Context, w *databricks.WorkspaceClient) cont return context.WithValue(ctx, &workspaceClient, w) } -func transformLoadError(path string, err error) error { - if os.IsNotExist(err) { - return fmt.Errorf("no configuration file found at %s; please create one first", path) - } - return err -} - -func askForWorkspaceProfile(ctx context.Context) (string, error) { - path, err := databrickscfg.GetPath() +func AskForWorkspaceProfile(ctx context.Context) (string, error) { + path, err := databrickscfg.GetPath(ctx) if err != nil { return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) } - file, profiles, err := databrickscfg.LoadProfiles(databrickscfg.MatchWorkspaceProfiles) + file, profiles, err := databrickscfg.LoadProfiles(ctx, databrickscfg.MatchWorkspaceProfiles) if err != nil { - return "", transformLoadError(path, err) + return "", err } switch len(profiles) { case 0: @@ -213,14 +205,14 @@ func askForWorkspaceProfile(ctx context.Context) (string, error) { return profiles[i].Name, nil } -func askForAccountProfile(ctx context.Context) (string, error) { - path, err := databrickscfg.GetPath() +func AskForAccountProfile(ctx context.Context) (string, error) { + path, err := databrickscfg.GetPath(ctx) if err != nil { return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) } - file, profiles, err := databrickscfg.LoadProfiles(databrickscfg.MatchAccountProfiles) + file, profiles, err := databrickscfg.LoadProfiles(ctx, databrickscfg.MatchAccountProfiles) if err != nil { - return "", transformLoadError(path, err) + return "", err } switch len(profiles) { case 0: diff --git a/libs/databrickscfg/profiles.go b/libs/databrickscfg/profiles.go index 864000d0..9f31eff6 100644 --- a/libs/databrickscfg/profiles.go +++ b/libs/databrickscfg/profiles.go @@ -1,11 +1,14 @@ package databrickscfg import ( + "context" + "errors" "fmt" - "os" + "io/fs" "path/filepath" "strings" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" ) @@ -67,43 +70,45 @@ func MatchAllProfiles(p Profile) bool { } // Get the path to the .databrickscfg file, falling back to the default in the current user's home directory. -func GetPath() (string, error) { - configFile := os.Getenv("DATABRICKS_CONFIG_FILE") +func GetPath(ctx context.Context) (string, error) { + configFile := env.Get(ctx, "DATABRICKS_CONFIG_FILE") if configFile == "" { configFile = "~/.databrickscfg" } if strings.HasPrefix(configFile, "~") { - homedir, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("cannot find homedir: %w", err) - } + homedir := env.UserHomeDir(ctx) configFile = filepath.Join(homedir, configFile[1:]) } return configFile, nil } -func Get() (*config.File, error) { - configFile, err := GetPath() +var ErrNoConfiguration = errors.New("no configuration file found") + +func Get(ctx context.Context) (*config.File, error) { + path, err := GetPath(ctx) if err != nil { return nil, fmt.Errorf("cannot determine Databricks config file path: %w", err) } - return config.LoadFile(configFile) + configFile, err := config.LoadFile(path) + if errors.Is(err, fs.ErrNotExist) { + // downstreams depend on ErrNoConfiguration. TODO: expose this error through SDK + return nil, fmt.Errorf("%w at %s; please create one first", ErrNoConfiguration, path) + } else if err != nil { + return nil, err + } + return configFile, nil } -func LoadProfiles(fn ProfileMatchFunction) (file string, profiles Profiles, err error) { - f, err := Get() +func LoadProfiles(ctx context.Context, fn ProfileMatchFunction) (file string, profiles Profiles, err error) { + f, err := Get(ctx) if err != nil { return "", nil, fmt.Errorf("cannot load Databricks config file: %w", err) } - homedir, err := os.UserHomeDir() - if err != nil { - return - } - // Replace homedir with ~ if applicable. // This is to make the output more readable. - file = f.Path() + file = filepath.Clean(f.Path()) + homedir := filepath.Clean(env.UserHomeDir(ctx)) if strings.HasPrefix(file, homedir) { file = "~" + file[len(homedir):] } @@ -130,7 +135,7 @@ func LoadProfiles(fn ProfileMatchFunction) (file string, profiles Profiles, err } func ProfileCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - _, profiles, err := LoadProfiles(MatchAllProfiles) + _, profiles, err := LoadProfiles(cmd.Context(), MatchAllProfiles) if err != nil { return nil, cobra.ShellCompDirectiveError } diff --git a/libs/databrickscfg/profiles_test.go b/libs/databrickscfg/profiles_test.go index b1acdce9..33a5c9df 100644 --- a/libs/databrickscfg/profiles_test.go +++ b/libs/databrickscfg/profiles_test.go @@ -1,9 +1,11 @@ package databrickscfg import ( - "runtime" + "context" + "path/filepath" "testing" + "github.com/databricks/cli/libs/env" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -27,27 +29,50 @@ func TestProfilesSearchCaseInsensitive(t *testing.T) { } func TestLoadProfilesReturnsHomedirAsTilde(t *testing.T) { - if runtime.GOOS == "windows" { - t.Setenv("USERPROFILE", "./testdata") - } else { - t.Setenv("HOME", "./testdata") - } - t.Setenv("DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") - file, _, err := LoadProfiles(func(p Profile) bool { return true }) + ctx := context.Background() + ctx = env.WithUserHomeDir(ctx, "testdata") + ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") + file, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) require.NoError(t, err) - assert.Equal(t, "~/databrickscfg", file) + require.Equal(t, filepath.Clean("~/databrickscfg"), file) +} + +func TestLoadProfilesReturnsHomedirAsTildeExoticFile(t *testing.T) { + ctx := context.Background() + ctx = env.WithUserHomeDir(ctx, "testdata") + ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "~/databrickscfg") + file, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + require.NoError(t, err) + require.Equal(t, filepath.Clean("~/databrickscfg"), file) +} + +func TestLoadProfilesReturnsHomedirAsTildeDefaultFile(t *testing.T) { + ctx := context.Background() + ctx = env.WithUserHomeDir(ctx, "testdata/sample-home") + file, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + require.NoError(t, err) + require.Equal(t, filepath.Clean("~/.databrickscfg"), file) +} + +func TestLoadProfilesNoConfiguration(t *testing.T) { + ctx := context.Background() + ctx = env.WithUserHomeDir(ctx, "testdata") + _, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + require.ErrorIs(t, err, ErrNoConfiguration) } func TestLoadProfilesMatchWorkspace(t *testing.T) { - t.Setenv("DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") - _, profiles, err := LoadProfiles(MatchWorkspaceProfiles) + ctx := context.Background() + ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") + _, profiles, err := LoadProfiles(ctx, MatchWorkspaceProfiles) require.NoError(t, err) assert.Equal(t, []string{"DEFAULT", "query", "foo1", "foo2"}, profiles.Names()) } func TestLoadProfilesMatchAccount(t *testing.T) { - t.Setenv("DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") - _, profiles, err := LoadProfiles(MatchAccountProfiles) + ctx := context.Background() + ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") + _, profiles, err := LoadProfiles(ctx, MatchAccountProfiles) require.NoError(t, err) assert.Equal(t, []string{"acc"}, profiles.Names()) } diff --git a/libs/databrickscfg/testdata/sample-home/.databrickscfg b/libs/databrickscfg/testdata/sample-home/.databrickscfg new file mode 100644 index 00000000..96c8b7ca --- /dev/null +++ b/libs/databrickscfg/testdata/sample-home/.databrickscfg @@ -0,0 +1,7 @@ +[DEFAULT] +host = https://default +token = default + +[acc] +host = https://accounts.cloud.databricks.com +account_id = abc diff --git a/libs/env/context.go b/libs/env/context.go index bbe294d7..84518ad7 100644 --- a/libs/env/context.go +++ b/libs/env/context.go @@ -2,7 +2,9 @@ package env import ( "context" + "fmt" "os" + "runtime" "strings" ) @@ -63,6 +65,25 @@ func Set(ctx context.Context, key, value string) context.Context { return setMap(ctx, m) } +func homeEnvVar() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +func WithUserHomeDir(ctx context.Context, value string) context.Context { + return Set(ctx, homeEnvVar(), value) +} + +func UserHomeDir(ctx context.Context) string { + home := Get(ctx, homeEnvVar()) + if home == "" { + panic(fmt.Errorf("$HOME is not set")) + } + return home +} + // All returns environment variables that are defined in both os.Environ // and this package. `env.Set(ctx, x, y)` will override x from os.Environ. func All(ctx context.Context) map[string]string { diff --git a/libs/env/context_test.go b/libs/env/context_test.go index 39553448..5befe4ac 100644 --- a/libs/env/context_test.go +++ b/libs/env/context_test.go @@ -47,3 +47,10 @@ func TestContext(t *testing.T) { assert.Equal(t, "x=y", all["BAR"]) assert.NotEmpty(t, all["PATH"]) } + +func TestHome(t *testing.T) { + ctx := context.Background() + ctx = WithUserHomeDir(ctx, "...") + home := UserHomeDir(ctx) + assert.Equal(t, "...", home) +} diff --git a/libs/env/loader.go b/libs/env/loader.go new file mode 100644 index 00000000..f441ffa1 --- /dev/null +++ b/libs/env/loader.go @@ -0,0 +1,50 @@ +package env + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/config" +) + +// NewConfigLoader creates Databricks SDK Config loader that is aware of env.Set variables: +// +// ctx = env.Set(ctx, "DATABRICKS_WAREHOUSE_ID", "...") +// +// Usage: +// +// &config.Config{ +// Loaders: []config.Loader{ +// env.NewConfigLoader(ctx), +// config.ConfigAttributes, +// config.ConfigFile, +// }, +// } +func NewConfigLoader(ctx context.Context) *configLoader { + return &configLoader{ + ctx: ctx, + } +} + +type configLoader struct { + ctx context.Context +} + +func (le *configLoader) Name() string { + return "cli-env" +} + +func (le *configLoader) Configure(cfg *config.Config) error { + for _, a := range config.ConfigAttributes { + if !a.IsZero(cfg) { + continue + } + for _, k := range a.EnvVars { + v := Get(le.ctx, k) + if v == "" { + continue + } + a.Set(cfg, v) + } + } + return nil +} diff --git a/libs/env/loader_test.go b/libs/env/loader_test.go new file mode 100644 index 00000000..2d1fa400 --- /dev/null +++ b/libs/env/loader_test.go @@ -0,0 +1,26 @@ +package env + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/config" + "github.com/stretchr/testify/assert" +) + +func TestLoader(t *testing.T) { + ctx := context.Background() + ctx = Set(ctx, "DATABRICKS_WAREHOUSE_ID", "...") + ctx = Set(ctx, "DATABRICKS_CONFIG_PROFILE", "...") + loader := NewConfigLoader(ctx) + + cfg := &config.Config{ + Profile: "abc", + } + err := loader.Configure(cfg) + assert.NoError(t, err) + + assert.Equal(t, "...", cfg.WarehouseID) + assert.Equal(t, "abc", cfg.Profile) + assert.Equal(t, "cli-env", loader.Name()) +} From 47e434db2f464582642cc0f4c4768508eddcfc06 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 8 Nov 2023 16:07:29 +0100 Subject: [PATCH 219/310] Improve error message when `--json` flag is specified (#933) ## Changes Improve error message when --json input is provided ## Tests ``` cli % databricks model-registry create-model mymodel --json @./input.json Error: when --json flag is specified, no positional arguments are required. Provide NAME in your JSON input ``` --- .codegen/service.go.tmpl | 12 +- cmd/account/log-delivery/log-delivery.go | 8 +- cmd/account/networks/networks.go | 8 +- cmd/account/private-access/private-access.go | 16 ++- cmd/account/vpc-endpoints/vpc-endpoints.go | 8 +- cmd/account/workspaces/workspaces.go | 8 +- cmd/workspace/catalogs/catalogs.go | 10 +- .../cluster-policies/cluster-policies.go | 16 ++- cmd/workspace/clusters/clusters.go | 24 +++- cmd/workspace/experiments/experiments.go | 104 ++++++++++---- .../external-locations/external-locations.go | 10 +- .../git-credentials/git-credentials.go | 8 +- .../global-init-scripts.go | 16 ++- .../instance-pools/instance-pools.go | 16 ++- .../instance-profiles/instance-profiles.go | 26 +++- cmd/workspace/metastores/metastores.go | 24 +++- .../model-registry/model-registry.go | 128 +++++++++++++----- cmd/workspace/providers/providers.go | 8 +- cmd/workspace/recipients/recipients.go | 16 ++- .../registered-models/registered-models.go | 16 ++- cmd/workspace/repos/repos.go | 8 +- cmd/workspace/schemas/schemas.go | 8 +- cmd/workspace/secrets/secrets.go | 40 ++++-- cmd/workspace/shares/shares.go | 10 +- .../storage-credentials.go | 8 +- .../token-management/token-management.go | 8 +- cmd/workspace/volumes/volumes.go | 8 +- cmd/workspace/workspace/workspace.go | 8 +- 28 files changed, 438 insertions(+), 142 deletions(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 27b9a754..5feb0c87 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -141,12 +141,20 @@ func new{{.PascalName}}() *cobra.Command { cmd.Annotations = make(map[string]string) {{if $hasRequiredArgs }} cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs({{len .Request.RequiredFields}}) {{- if and .CanUseJson .Request.HasRequiredRequestBodyFields }} if cmd.Flags().Changed("json") { - check = cobra.ExactArgs({{len .Request.RequiredPathFields}}) + err := cobra.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args) + if err != nil { + {{- if eq 0 (len .Request.RequiredPathFields) }} + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") + {{- else }} + return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := .Request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := .Request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") + {{- end }} + } + return nil } {{- end }} + check := cobra.ExactArgs({{len .Request.RequiredFields}}) return check(cmd, args) } {{end}} diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index 48ebe9e9..fdc5e386 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -356,10 +356,14 @@ func newPatchStatus() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(1) + err := cobra.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only LOG_DELIVERY_CONFIGURATION_ID as positional arguments. Provide 'status' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/account/networks/networks.go b/cmd/account/networks/networks.go index 74b3ffde..1aa2520f 100755 --- a/cmd/account/networks/networks.go +++ b/cmd/account/networks/networks.go @@ -71,10 +71,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'network_name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index 094c030b..4aff4192 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -80,10 +80,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'private_access_settings_name', 'region' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -390,10 +394,14 @@ func newReplace() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(1) + err := cobra.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only PRIVATE_ACCESS_SETTINGS_ID as positional arguments. Provide 'private_access_settings_name', 'region' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/account/vpc-endpoints/vpc-endpoints.go b/cmd/account/vpc-endpoints/vpc-endpoints.go index 4cefe242..8c46ab82 100755 --- a/cmd/account/vpc-endpoints/vpc-endpoints.go +++ b/cmd/account/vpc-endpoints/vpc-endpoints.go @@ -78,10 +78,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'vpc_endpoint_name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 993e569f..1a6aa90d 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -99,10 +99,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'workspace_name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index d1b54452..7846c0e0 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -3,6 +3,8 @@ package catalogs import ( + "fmt" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -76,10 +78,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 0bd7b4a9..1412b460 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -91,10 +91,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -255,10 +259,14 @@ func newEdit() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'policy_id', 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index e4fb6e0a..bc45d14a 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -90,10 +90,14 @@ func newChangeOwner() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id', 'owner_username' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -207,10 +211,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'spark_version' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -439,10 +447,14 @@ func newEdit() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id', 'spark_version' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index ed807ae5..420593a2 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -78,10 +78,14 @@ func newCreateExperiment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -230,10 +234,14 @@ func newDeleteExperiment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -304,10 +312,14 @@ func newDeleteRun() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -381,10 +393,14 @@ func newDeleteRuns() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id', 'max_timestamp_millis' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -462,10 +478,14 @@ func newDeleteTag() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id', 'key' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -1241,10 +1261,14 @@ func newLogMetric() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'key', 'value', 'timestamp' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } @@ -1405,10 +1429,14 @@ func newLogParam() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'key', 'value' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -1487,10 +1515,14 @@ func newRestoreExperiment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -1561,10 +1593,14 @@ func newRestoreRun() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -1638,10 +1674,14 @@ func newRestoreRuns() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id', 'min_timestamp_millis' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -1869,10 +1909,14 @@ func newSetExperimentTag() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id', 'key', 'value' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } @@ -2025,10 +2069,14 @@ func newSetTag() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'key', 'value' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -2104,10 +2152,14 @@ func newUpdateExperiment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index d510d2a9..a5c69259 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -3,6 +3,8 @@ package external_locations import ( + "fmt" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -80,10 +82,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'url', 'credential_name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index 81348155..1d9e64a0 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -73,10 +73,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'git_provider' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/global-init-scripts/global-init-scripts.go b/cmd/workspace/global-init-scripts/global-init-scripts.go index 513b9637..3674d405 100755 --- a/cmd/workspace/global-init-scripts/global-init-scripts.go +++ b/cmd/workspace/global-init-scripts/global-init-scripts.go @@ -73,10 +73,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'script' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -351,10 +355,14 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(1) + err := cobra.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only SCRIPT_ID as positional arguments. Provide 'name', 'script' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 1109b921..ae23eac0 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -91,10 +91,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_pool_name', 'node_type_id' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -256,10 +260,14 @@ func newEdit() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_pool_id', 'instance_pool_name', 'node_type_id' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/instance-profiles/instance-profiles.go b/cmd/workspace/instance-profiles/instance-profiles.go index b3fdfc65..085707b7 100755 --- a/cmd/workspace/instance-profiles/instance-profiles.go +++ b/cmd/workspace/instance-profiles/instance-profiles.go @@ -3,6 +3,8 @@ package instance_profiles import ( + "fmt" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -70,10 +72,14 @@ func newAdd() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_profile_arn' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -161,10 +167,14 @@ func newEdit() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_profile_arn' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -288,10 +298,14 @@ func newRemove() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_profile_arn' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 92144ec7..d74c9bbc 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -76,10 +76,14 @@ func newAssign() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(1) + err := cobra.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only WORKSPACE_ID as positional arguments. Provide 'metastore_id', 'default_catalog_name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } @@ -159,10 +163,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'storage_root' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -362,10 +370,14 @@ func newEnableOptimization() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'metastore_id', 'enable' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index c0fe43c7..1ae5c8eb 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -71,10 +71,14 @@ func newApproveTransitionRequest() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(4) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage', 'archive_existing_versions' in your JSON input") + } + return nil } + check := cobra.ExactArgs(4) return check(cmd, args) } @@ -162,10 +166,14 @@ func newCreateComment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'comment' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } @@ -248,10 +256,14 @@ func newCreateModel() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -327,10 +339,14 @@ func newCreateModelVersion() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'source' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -406,10 +422,14 @@ func newCreateTransitionRequest() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } @@ -1005,10 +1025,14 @@ func newGetLatestVersions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -1583,10 +1607,14 @@ func newRejectTransitionRequest() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } @@ -1668,10 +1696,14 @@ func newRenameModel() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -1870,10 +1902,14 @@ func newSetModelTag() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'key', 'value' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } @@ -1950,10 +1986,14 @@ func newSetModelVersionTag() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(4) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'key', 'value' in your JSON input") + } + return nil } + check := cobra.ExactArgs(4) return check(cmd, args) } @@ -2109,10 +2149,14 @@ func newTestRegistryWebhook() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'id' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -2189,10 +2233,14 @@ func newTransitionStage() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(4) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage', 'archive_existing_versions' in your JSON input") + } + return nil } + check := cobra.ExactArgs(4) return check(cmd, args) } @@ -2278,10 +2326,14 @@ func newUpdateComment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'id', 'comment' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -2357,10 +2409,14 @@ func newUpdateModel() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -2433,10 +2489,14 @@ func newUpdateModelVersion() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -2590,10 +2650,14 @@ func newUpdateWebhook() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'id' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index 69a16725..1da8202d 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -68,10 +68,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'authentication_type' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 53576043..260729cb 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -87,10 +87,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'authentication_type' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -381,10 +385,14 @@ func newRotateToken() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(1) + err := cobra.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only NAME as positional arguments. Provide 'existing_token_expire_in_seconds' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index 64f40e17..e594f2eb 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -104,10 +104,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'catalog_name', 'schema_name', 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } @@ -488,10 +492,14 @@ func newSetAlias() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(2) + err := cobra.ExactArgs(2)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only FULL_NAME, ALIAS as positional arguments. Provide 'version_num' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 1a2a43b4..e8261c01 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -76,10 +76,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'url', 'provider' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index 70d8b633..8b42281a 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -72,10 +72,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'catalog_name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index 9715d390..c124e7ef 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -79,10 +79,14 @@ func newCreateScope() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -158,10 +162,14 @@ func newDeleteAcl() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope', 'principal' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -239,10 +247,14 @@ func newDeleteScope() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -318,10 +330,14 @@ func newDeleteSecret() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope', 'key' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -745,10 +761,14 @@ func newPutAcl() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope', 'principal', 'permission' in your JSON input") + } + return nil } + check := cobra.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index c8cab3b7..de6cc5df 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -3,6 +3,8 @@ package shares import ( + "fmt" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -68,10 +70,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 00c0c215..b70d949a 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -82,10 +82,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index 5d34a2c7..b74b0483 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -66,10 +66,14 @@ func newCreateOboToken() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'application_id', 'lifetime_seconds' in your JSON input") + } + return nil } + check := cobra.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index d443cea9..ef90eec5 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -89,10 +89,14 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(4) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'catalog_name', 'schema_name', 'name', 'volume_type' in your JSON input") + } + return nil } + check := cobra.ExactArgs(4) return check(cmd, args) } diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 4af888ac..dcfb7147 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -436,10 +436,14 @@ func newImport() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'path' in your JSON input") + } + return nil } + check := cobra.ExactArgs(1) return check(cmd, args) } From b72f2a9604413d216c1ff511a6f953813b941a94 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 8 Nov 2023 17:30:48 +0100 Subject: [PATCH 220/310] Release v0.209.1 (#969) CLI: * Hide `--progress-format` global flag ([#965](https://github.com/databricks/cli/pull/965)). * Make configure command visible + fix bundle command description ([#961](https://github.com/databricks/cli/pull/961)). * Log process ID in each log entry ([#949](https://github.com/databricks/cli/pull/949)). * Improve error message when `--json` flag is specified ([#933](https://github.com/databricks/cli/pull/933)). Bundles: * Remove validation for default value against pattern ([#959](https://github.com/databricks/cli/pull/959)). * Bundle path rewrites for dbt and SQL file tasks ([#962](https://github.com/databricks/cli/pull/962)). * Initialize variable definitions that are defined without properties ([#966](https://github.com/databricks/cli/pull/966)). Internal: * Function to merge two instances of `config.Value` ([#938](https://github.com/databricks/cli/pull/938)). * Make to/from string methods private to the jsonschema package ([#942](https://github.com/databricks/cli/pull/942)). * Make Cobra runner compatible with testing interactive flows ([#957](https://github.com/databricks/cli/pull/957)). * Added `env.UserHomeDir(ctx)` for parallel-friendly tests ([#955](https://github.com/databricks/cli/pull/955)). Dependency updates: * Bump golang.org/x/mod from 0.13.0 to 0.14.0 ([#954](https://github.com/databricks/cli/pull/954)). * Bump golang.org/x/text from 0.13.0 to 0.14.0 ([#953](https://github.com/databricks/cli/pull/953)). * Bump golang.org/x/sync from 0.4.0 to 0.5.0 ([#951](https://github.com/databricks/cli/pull/951)). * Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 ([#950](https://github.com/databricks/cli/pull/950)). * Bump github.com/fatih/color from 1.15.0 to 1.16.0 ([#952](https://github.com/databricks/cli/pull/952)). --- CHANGELOG.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 424fef90..fba60a06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Version changelog +## 0.209.1 + +CLI: + * Hide `--progress-format` global flag ([#965](https://github.com/databricks/cli/pull/965)). + * Make configure command visible + fix bundle command description ([#961](https://github.com/databricks/cli/pull/961)). + * Log process ID in each log entry ([#949](https://github.com/databricks/cli/pull/949)). + * Improve error message when `--json` flag is specified ([#933](https://github.com/databricks/cli/pull/933)). + +Bundles: + * Remove validation for default value against pattern ([#959](https://github.com/databricks/cli/pull/959)). + * Bundle path rewrites for dbt and SQL file tasks ([#962](https://github.com/databricks/cli/pull/962)). + * Initialize variable definitions that are defined without properties ([#966](https://github.com/databricks/cli/pull/966)). + +Internal: + * Function to merge two instances of `config.Value` ([#938](https://github.com/databricks/cli/pull/938)). + * Make to/from string methods private to the jsonschema package ([#942](https://github.com/databricks/cli/pull/942)). + * Make Cobra runner compatible with testing interactive flows ([#957](https://github.com/databricks/cli/pull/957)). + * Added `env.UserHomeDir(ctx)` for parallel-friendly tests ([#955](https://github.com/databricks/cli/pull/955)). + + +Dependency updates: + * Bump golang.org/x/mod from 0.13.0 to 0.14.0 ([#954](https://github.com/databricks/cli/pull/954)). + * Bump golang.org/x/text from 0.13.0 to 0.14.0 ([#953](https://github.com/databricks/cli/pull/953)). + * Bump golang.org/x/sync from 0.4.0 to 0.5.0 ([#951](https://github.com/databricks/cli/pull/951)). + * Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 ([#950](https://github.com/databricks/cli/pull/950)). + * Bump github.com/fatih/color from 1.15.0 to 1.16.0 ([#952](https://github.com/databricks/cli/pull/952)). + ## 0.209.0 CLI: From d4d4b7480fd9e25c89989c94c05af81fe8f26a40 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 8 Nov 2023 17:48:37 +0100 Subject: [PATCH 221/310] Do not allow empty descriptions for bundle template inputs (#967) ## Changes We rely on the descriptions to render the prompts to a user. Thus we should not allow empty descriptions here. Note, both mlops stacks and the default-python template have descriptions for all their properties so this should not be an issue. ## Tests Unit test --- libs/template/config.go | 7 +++++++ libs/template/config_test.go | 5 +++++ .../testdata/config-test-schema/invalid-test-schema.json | 8 ++++++++ .../template/testdata/config-test-schema/test-schema.json | 8 ++++++-- 4 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 libs/template/testdata/config-test-schema/invalid-test-schema.json diff --git a/libs/template/config.go b/libs/template/config.go index 58b671fb..51283e03 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -25,6 +25,13 @@ func newConfig(ctx context.Context, schemaPath string) (*config, error) { return nil, err } + // Validate that all properties have a description + for name, p := range schema.Properties { + if p.Description == "" { + return nil, fmt.Errorf("template property %s is missing a description", name) + } + } + // Do not allow template input variables that are not defined in the schema. schema.AdditionalProperties = false diff --git a/libs/template/config_test.go b/libs/template/config_test.go index 9a0a9931..69e7054f 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -189,3 +189,8 @@ func TestAssignDefaultValuesWithTemplatedDefaults(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "my_file", c.values["string_val"]) } + +func TestTemplateSchemaErrorsWithEmptyDescription(t *testing.T) { + _, err := newConfig(context.Background(), "./testdata/config-test-schema/invalid-test-schema.json") + assert.EqualError(t, err, "template property property-without-description is missing a description") +} diff --git a/libs/template/testdata/config-test-schema/invalid-test-schema.json b/libs/template/testdata/config-test-schema/invalid-test-schema.json new file mode 100644 index 00000000..53514057 --- /dev/null +++ b/libs/template/testdata/config-test-schema/invalid-test-schema.json @@ -0,0 +1,8 @@ +{ + "properties": { + "property-without-description": { + "type": "integer", + "default": 123 + } + } +} diff --git a/libs/template/testdata/config-test-schema/test-schema.json b/libs/template/testdata/config-test-schema/test-schema.json index 6daf4959..10f8652f 100644 --- a/libs/template/testdata/config-test-schema/test-schema.json +++ b/libs/template/testdata/config-test-schema/test-schema.json @@ -2,16 +2,20 @@ "properties": { "int_val": { "type": "integer", + "description": "This is an integer value", "default": 123 }, "float_val": { - "type": "number" + "type": "number", + "description": "This is a float value" }, "bool_val": { - "type": "boolean" + "type": "boolean", + "description": "This is a boolean value" }, "string_val": { "type": "string", + "description": "This is a string value", "default": "{{template \"file_name\"}}" } } From d4c0027556c248079765847ff46c13b852481183 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:40:47 +0100 Subject: [PATCH 222/310] Add `--debug` as shortcut for `--log-level debug` (#964) ## Changes This PR exposes simpler interfaces to end users. ## Tests image --- cmd/root/logger.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/cmd/root/logger.go b/cmd/root/logger.go index 0ad6756a..be342a7a 100644 --- a/cmd/root/logger.go +++ b/cmd/root/logger.go @@ -73,6 +73,7 @@ type logFlags struct { file flags.LogFileFlag level flags.LogLevelFlag output flags.Output + debug bool } func (f *logFlags) makeLogHandler(opts slog.HandlerOptions) (slog.Handler, error) { @@ -95,6 +96,10 @@ func (f *logFlags) makeLogHandler(opts slog.HandlerOptions) (slog.Handler, error } func (f *logFlags) initializeContext(ctx context.Context) (context.Context, error) { + if f.debug { + f.level.Set("debug") + } + opts := slog.HandlerOptions{} opts.Level = f.level.Level() opts.AddSource = true @@ -137,9 +142,17 @@ func initLogFlags(cmd *cobra.Command) *logFlags { f.output.Set(v) } - cmd.PersistentFlags().Var(&f.file, "log-file", "file to write logs to") - cmd.PersistentFlags().Var(&f.level, "log-level", "log level") - cmd.PersistentFlags().Var(&f.output, "log-format", "log output format (text or json)") + flags := cmd.PersistentFlags() + flags.BoolVar(&f.debug, "debug", false, "enable debug logging") + flags.Var(&f.file, "log-file", "file to write logs to") + flags.Var(&f.level, "log-level", "log level") + flags.Var(&f.output, "log-format", "log output format (text or json)") + + // mark fine-grained flags hidden from global --help + flags.MarkHidden("log-file") + flags.MarkHidden("log-level") + flags.MarkHidden("log-format") + cmd.RegisterFlagCompletionFunc("log-file", f.file.Complete) cmd.RegisterFlagCompletionFunc("log-level", f.level.Complete) cmd.RegisterFlagCompletionFunc("log-format", f.output.Complete) From f111b0846e5b3fab4c3f67e62fa92a93a4eb400e Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Thu, 9 Nov 2023 15:24:05 +0100 Subject: [PATCH 223/310] Added process stubbing for easier testing of launched subprocesses (#963) ## Changes This PR makes unit testing with subprocesses fast. ``` ctx := context.Background() ctx, stub := process.WithStub(ctx) stub.WithDefaultOutput("meeee") ctx = env.Set(ctx, "FOO", "bar") out, err := process.Background(ctx, []string{"/usr/local/bin/meeecho", "1", "--foo", "bar"}) require.NoError(t, err) require.Equal(t, "meeee", out) require.Equal(t, 1, stub.Len()) require.Equal(t, []string{"meeecho 1 --foo bar"}, stub.Commands()) allEnv := stub.CombinedEnvironment() require.Equal(t, "bar", allEnv["FOO"]) require.Equal(t, "bar", stub.LookupEnv("FOO")) ``` This should make further iterations of https://github.com/databricks/cli/pull/914 easier ## Tests `make test` --- libs/process/background.go | 2 +- libs/process/forwarded.go | 7 +- libs/process/stub.go | 154 +++++++++++++++++++++++++++++++++++++ libs/process/stub_test.go | 81 +++++++++++++++++++ 4 files changed, 237 insertions(+), 7 deletions(-) create mode 100644 libs/process/stub.go create mode 100644 libs/process/stub_test.go diff --git a/libs/process/background.go b/libs/process/background.go index 26178a1d..2649d0ef 100644 --- a/libs/process/background.go +++ b/libs/process/background.go @@ -47,7 +47,7 @@ func Background(ctx context.Context, args []string, opts ...execOption) (string, return "", err } } - if err := cmd.Run(); err != nil { + if err := runCmd(ctx, cmd); err != nil { return stdout.String(), &ProcessError{ Err: err, Command: commandStr, diff --git a/libs/process/forwarded.go b/libs/process/forwarded.go index df3c2dbd..1d7fdb71 100644 --- a/libs/process/forwarded.go +++ b/libs/process/forwarded.go @@ -34,10 +34,5 @@ func Forwarded(ctx context.Context, args []string, src io.Reader, outWriter, err } } - err := cmd.Start() - if err != nil { - return err - } - - return cmd.Wait() + return runCmd(ctx, cmd) } diff --git a/libs/process/stub.go b/libs/process/stub.go new file mode 100644 index 00000000..280a9a8a --- /dev/null +++ b/libs/process/stub.go @@ -0,0 +1,154 @@ +package process + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +var stubKey int + +// WithStub creates process stub for fast and flexible testing of subprocesses +func WithStub(ctx context.Context) (context.Context, *processStub) { + stub := &processStub{responses: map[string]reponseStub{}} + ctx = context.WithValue(ctx, &stubKey, stub) + return ctx, stub +} + +func runCmd(ctx context.Context, cmd *exec.Cmd) error { + stub, ok := ctx.Value(&stubKey).(*processStub) + if ok { + return stub.run(cmd) + } + return cmd.Run() +} + +type reponseStub struct { + stdout string + stderr string + err error +} + +type processStub struct { + reponseStub + calls []*exec.Cmd + callback func(*exec.Cmd) error + responses map[string]reponseStub +} + +func (s *processStub) WithStdout(output string) *processStub { + s.reponseStub.stdout = output + return s +} + +func (s *processStub) WithFailure(err error) *processStub { + s.reponseStub.err = err + return s +} + +func (s *processStub) WithCallback(cb func(cmd *exec.Cmd) error) *processStub { + s.callback = cb + return s +} + +// WithStdoutFor predefines standard output response for a command. The first word +// in the command string is the executable name, and NOT the executable path. +// The following command would stub "2" output for "/usr/local/bin/echo 1" command: +// +// stub.WithStdoutFor("echo 1", "2") +func (s *processStub) WithStdoutFor(command, out string) *processStub { + s.responses[command] = reponseStub{ + stdout: out, + stderr: s.responses[command].stderr, + err: s.responses[command].err, + } + return s +} + +// WithStderrFor same as [WithStdoutFor], but for standard error +func (s *processStub) WithStderrFor(command, out string) *processStub { + s.responses[command] = reponseStub{ + stderr: out, + stdout: s.responses[command].stdout, + err: s.responses[command].err, + } + return s +} + +// WithFailureFor same as [WithStdoutFor], but for process failures +func (s *processStub) WithFailureFor(command string, err error) *processStub { + s.responses[command] = reponseStub{ + err: err, + stderr: s.responses[command].stderr, + stdout: s.responses[command].stdout, + } + return s +} + +func (s *processStub) String() string { + return fmt.Sprintf("process stub with %d calls", s.Len()) +} + +func (s *processStub) Len() int { + return len(s.calls) +} + +func (s *processStub) Commands() (called []string) { + for _, v := range s.calls { + called = append(called, s.normCmd(v)) + } + return +} + +// CombinedEnvironment returns all enviroment variables used for all commands +func (s *processStub) CombinedEnvironment() map[string]string { + environment := map[string]string{} + for _, cmd := range s.calls { + for _, line := range cmd.Env { + k, v, ok := strings.Cut(line, "=") + if !ok { + continue + } + environment[k] = v + } + } + return environment +} + +// LookupEnv returns a value from any of the triggered process environments +func (s *processStub) LookupEnv(key string) string { + environment := s.CombinedEnvironment() + return environment[key] +} + +func (s *processStub) normCmd(v *exec.Cmd) string { + // to reduce testing noise, we collect here only the deterministic binary basenames, e.g. + // "/var/folders/bc/7qf8yghj6v14t40096pdcqy40000gp/T/tmp.03CAcYcbOI/python3" becomes "python3". + // Use [processStub.WithCallback] if you need to match against the full executable path. + binaryName := filepath.Base(v.Path) + args := strings.Join(v.Args[1:], " ") + return fmt.Sprintf("%s %s", binaryName, args) +} + +func (s *processStub) run(cmd *exec.Cmd) error { + s.calls = append(s.calls, cmd) + resp, ok := s.responses[s.normCmd(cmd)] + if ok { + if resp.stdout != "" { + cmd.Stdout.Write([]byte(resp.stdout)) + } + if resp.stderr != "" { + cmd.Stderr.Write([]byte(resp.stderr)) + } + return resp.err + } + if s.callback != nil { + return s.callback(cmd) + } + if s.reponseStub.stdout != "" { + cmd.Stdout.Write([]byte(s.reponseStub.stdout)) + } + return s.reponseStub.err +} diff --git a/libs/process/stub_test.go b/libs/process/stub_test.go new file mode 100644 index 00000000..65f59f81 --- /dev/null +++ b/libs/process/stub_test.go @@ -0,0 +1,81 @@ +package process_test + +import ( + "context" + "fmt" + "os/exec" + "testing" + + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/process" + "github.com/stretchr/testify/require" +) + +func TestStubOutput(t *testing.T) { + ctx := context.Background() + ctx, stub := process.WithStub(ctx) + stub.WithStdout("meeee") + + ctx = env.Set(ctx, "FOO", "bar") + + out, err := process.Background(ctx, []string{"/usr/local/bin/meeecho", "1", "--foo", "bar"}) + require.NoError(t, err) + require.Equal(t, "meeee", out) + require.Equal(t, 1, stub.Len()) + require.Equal(t, []string{"meeecho 1 --foo bar"}, stub.Commands()) + + allEnv := stub.CombinedEnvironment() + require.Equal(t, "bar", allEnv["FOO"]) + require.Equal(t, "bar", stub.LookupEnv("FOO")) +} + +func TestStubFailure(t *testing.T) { + ctx := context.Background() + ctx, stub := process.WithStub(ctx) + stub.WithFailure(fmt.Errorf("nope")) + + _, err := process.Background(ctx, []string{"/bin/meeecho", "1"}) + require.EqualError(t, err, "/bin/meeecho 1: nope") + require.Equal(t, 1, stub.Len()) +} + +func TestStubCallback(t *testing.T) { + ctx := context.Background() + ctx, stub := process.WithStub(ctx) + stub.WithCallback(func(cmd *exec.Cmd) error { + cmd.Stderr.Write([]byte("something...")) + cmd.Stdout.Write([]byte("else...")) + return fmt.Errorf("yep") + }) + + _, err := process.Background(ctx, []string{"/bin/meeecho", "1"}) + require.EqualError(t, err, "/bin/meeecho 1: yep") + require.Equal(t, 1, stub.Len()) + + var processError *process.ProcessError + require.ErrorAs(t, err, &processError) + require.Equal(t, "something...", processError.Stderr) + require.Equal(t, "else...", processError.Stdout) +} + +func TestStubResponses(t *testing.T) { + ctx := context.Background() + ctx, stub := process.WithStub(ctx) + stub. + WithStdoutFor("qux 1", "first"). + WithStdoutFor("qux 2", "second"). + WithFailureFor("qux 3", fmt.Errorf("nope")) + + first, err := process.Background(ctx, []string{"/path/is/irrelevant/qux", "1"}) + require.NoError(t, err) + require.Equal(t, "first", first) + + second, err := process.Background(ctx, []string{"/path/is/irrelevant/qux", "2"}) + require.NoError(t, err) + require.Equal(t, "second", second) + + _, err = process.Background(ctx, []string{"/path/is/irrelevant/qux", "3"}) + require.EqualError(t, err, "/path/is/irrelevant/qux 3: nope") + + require.Equal(t, "process stub with 3 calls", stub.String()) +} From 3284a8c56c65cfa53cd098874f99b2b76714353e Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Thu, 9 Nov 2023 17:38:45 +0100 Subject: [PATCH 224/310] Improved usability of `databricks auth login ... --configure-cluster` flow by displaying cluster type and runtime version (#956) This PR adds selectors for Databricks-connect compatible clusters and SQL warehouses Tested in https://github.com/databricks/cli/pull/914 --- cmd/auth/login.go | 17 +- libs/databrickscfg/cfgpickers/clusters.go | 192 ++++++++++++++++++ .../databrickscfg/cfgpickers/clusters_test.go | 146 +++++++++++++ libs/databrickscfg/cfgpickers/warehouses.go | 65 ++++++ .../cfgpickers/warehouses_test.go | 66 ++++++ 5 files changed, 475 insertions(+), 11 deletions(-) create mode 100644 libs/databrickscfg/cfgpickers/clusters.go create mode 100644 libs/databrickscfg/cfgpickers/clusters_test.go create mode 100644 libs/databrickscfg/cfgpickers/warehouses.go create mode 100644 libs/databrickscfg/cfgpickers/warehouses_test.go diff --git a/cmd/auth/login.go b/cmd/auth/login.go index c2b821b6..28e0025d 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -8,9 +8,9 @@ import ( "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/databrickscfg/cfgpickers" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" - "github.com/databricks/databricks-sdk-go/service/compute" "github.com/spf13/cobra" ) @@ -28,6 +28,8 @@ func configureHost(ctx context.Context, persistentAuth *auth.PersistentAuth, arg return nil } +const minimalDbConnectVersion = "13.1" + func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { cmd := &cobra.Command{ Use: "login [HOST]", @@ -95,19 +97,12 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { return err } ctx := cmd.Context() - - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "Loading list of clusters to select from" - names, err := w.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load clusters list. Original error: %w", err) - } - clusterId, err := cmdio.Select(ctx, names, "Choose cluster") + clusterID, err := cfgpickers.AskForCluster(ctx, w, + cfgpickers.WithDatabricksConnect(minimalDbConnectVersion)) if err != nil { return err } - cfg.ClusterID = clusterId + cfg.ClusterID = clusterID } if profileName != "" { diff --git a/libs/databrickscfg/cfgpickers/clusters.go b/libs/databrickscfg/cfgpickers/clusters.go new file mode 100644 index 00000000..ac037698 --- /dev/null +++ b/libs/databrickscfg/cfgpickers/clusters.go @@ -0,0 +1,192 @@ +package cfgpickers + +import ( + "context" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/fatih/color" + "github.com/manifoldco/promptui" + "golang.org/x/mod/semver" +) + +var minUcRuntime = canonicalVersion("v12.0") + +var dbrVersionRegex = regexp.MustCompile(`^(\d+\.\d+)\.x-.*`) +var dbrSnapshotVersionRegex = regexp.MustCompile(`^(\d+)\.x-snapshot.*`) + +func canonicalVersion(v string) string { + return semver.Canonical("v" + strings.TrimPrefix(v, "v")) +} + +func GetRuntimeVersion(cluster compute.ClusterDetails) (string, bool) { + match := dbrVersionRegex.FindStringSubmatch(cluster.SparkVersion) + if len(match) < 1 { + match = dbrSnapshotVersionRegex.FindStringSubmatch(cluster.SparkVersion) + if len(match) > 1 { + // we return 14.999 for 14.x-snapshot for semver.Compare() to work properly + return fmt.Sprintf("%s.999", match[1]), true + } + return "", false + } + return match[1], true +} + +func IsCompatibleWithUC(cluster compute.ClusterDetails, minVersion string) bool { + minVersion = canonicalVersion(minVersion) + if semver.Compare(minUcRuntime, minVersion) >= 0 { + return false + } + runtimeVersion, ok := GetRuntimeVersion(cluster) + if !ok { + return false + } + clusterRuntime := canonicalVersion(runtimeVersion) + if semver.Compare(minVersion, clusterRuntime) > 0 { + return false + } + switch cluster.DataSecurityMode { + case compute.DataSecurityModeUserIsolation, compute.DataSecurityModeSingleUser: + return true + default: + return false + } +} + +var ErrNoCompatibleClusters = errors.New("no compatible clusters found") + +type compatibleCluster struct { + compute.ClusterDetails + versionName string +} + +func (v compatibleCluster) Access() string { + switch v.DataSecurityMode { + case compute.DataSecurityModeUserIsolation: + return "Shared" + case compute.DataSecurityModeSingleUser: + return "Assigned" + default: + return "Unknown" + } +} + +func (v compatibleCluster) Runtime() string { + runtime, _, _ := strings.Cut(v.versionName, " (") + return runtime +} + +func (v compatibleCluster) State() string { + state := v.ClusterDetails.State + switch state { + case compute.StateRunning, compute.StateResizing: + return color.GreenString(state.String()) + case compute.StateError, compute.StateTerminated, compute.StateTerminating, compute.StateUnknown: + return color.RedString(state.String()) + default: + return color.BlueString(state.String()) + } +} + +type clusterFilter func(cluster *compute.ClusterDetails, me *iam.User) bool + +func WithDatabricksConnect(minVersion string) func(*compute.ClusterDetails, *iam.User) bool { + return func(cluster *compute.ClusterDetails, me *iam.User) bool { + if !IsCompatibleWithUC(*cluster, minVersion) { + return false + } + switch cluster.ClusterSource { + case compute.ClusterSourceJob, + compute.ClusterSourceModels, + compute.ClusterSourcePipeline, + compute.ClusterSourcePipelineMaintenance, + compute.ClusterSourceSql: + // only UI and API clusters are usable for DBConnect. + // `CanUseClient: "NOTEBOOKS"`` didn't seem to have an effect. + return false + } + if cluster.SingleUserName != "" && cluster.SingleUserName != me.UserName { + return false + } + return true + } +} + +func loadInteractiveClusters(ctx context.Context, w *databricks.WorkspaceClient, filters []clusterFilter) ([]compatibleCluster, error) { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "Loading list of clusters to select from" + defer close(promptSpinner) + all, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{ + CanUseClient: "NOTEBOOKS", + }) + if err != nil { + return nil, fmt.Errorf("list clusters: %w", err) + } + me, err := w.CurrentUser.Me(ctx) + if err != nil { + return nil, fmt.Errorf("current user: %w", err) + } + versions := map[string]string{} + sv, err := w.Clusters.SparkVersions(ctx) + if err != nil { + return nil, fmt.Errorf("list runtime versions: %w", err) + } + for _, v := range sv.Versions { + versions[v.Key] = v.Name + } + var compatible []compatibleCluster + for _, cluster := range all { + var skip bool + for _, filter := range filters { + if !filter(&cluster, me) { + skip = true + } + } + if skip { + continue + } + compatible = append(compatible, compatibleCluster{ + ClusterDetails: cluster, + versionName: versions[cluster.SparkVersion], + }) + } + return compatible, nil +} + +func AskForCluster(ctx context.Context, w *databricks.WorkspaceClient, filters ...clusterFilter) (string, error) { + compatible, err := loadInteractiveClusters(ctx, w, filters) + if err != nil { + return "", fmt.Errorf("load: %w", err) + } + if len(compatible) == 0 { + return "", ErrNoCompatibleClusters + } + if len(compatible) == 1 { + return compatible[0].ClusterId, nil + } + i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ + Label: "Choose compatible cluster", + Items: compatible, + Searcher: func(input string, idx int) bool { + lower := strings.ToLower(compatible[idx].ClusterName) + return strings.Contains(lower, input) + }, + StartInSearchMode: true, + Templates: &promptui.SelectTemplates{ + Label: "{{.ClusterName | faint}}", + Active: `{{.ClusterName | bold}} ({{.State}} {{.Access}} Runtime {{.Runtime}}) ({{.ClusterId | faint}})`, + Inactive: `{{.ClusterName}} ({{.State}} {{.Access}} Runtime {{.Runtime}})`, + Selected: `{{ "Configured cluster" | faint }}: {{ .ClusterName | bold }} ({{.ClusterId | faint}})`, + }, + }) + if err != nil { + return "", err + } + return compatible[i].ClusterId, nil +} diff --git a/libs/databrickscfg/cfgpickers/clusters_test.go b/libs/databrickscfg/cfgpickers/clusters_test.go new file mode 100644 index 00000000..362d6904 --- /dev/null +++ b/libs/databrickscfg/cfgpickers/clusters_test.go @@ -0,0 +1,146 @@ +package cfgpickers + +import ( + "bytes" + "context" + "testing" + + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/qa" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/require" +) + +func TestIsCompatible(t *testing.T) { + require.True(t, IsCompatibleWithUC(compute.ClusterDetails{ + SparkVersion: "13.2.x-aarch64-scala2.12", + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, "13.0")) + require.False(t, IsCompatibleWithUC(compute.ClusterDetails{ + SparkVersion: "13.2.x-aarch64-scala2.12", + DataSecurityMode: compute.DataSecurityModeNone, + }, "13.0")) + require.False(t, IsCompatibleWithUC(compute.ClusterDetails{ + SparkVersion: "9.1.x-photon-scala2.12", + DataSecurityMode: compute.DataSecurityModeNone, + }, "13.0")) + require.False(t, IsCompatibleWithUC(compute.ClusterDetails{ + SparkVersion: "9.1.x-photon-scala2.12", + DataSecurityMode: compute.DataSecurityModeNone, + }, "10.0")) + require.False(t, IsCompatibleWithUC(compute.ClusterDetails{ + SparkVersion: "custom-9.1.x-photon-scala2.12", + DataSecurityMode: compute.DataSecurityModeNone, + }, "14.0")) +} + +func TestIsCompatibleWithSnapshots(t *testing.T) { + require.True(t, IsCompatibleWithUC(compute.ClusterDetails{ + SparkVersion: "14.x-snapshot-cpu-ml-scala2.12", + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, "14.0")) +} + +func TestFirstCompatibleCluster(t *testing.T) { + cfg, server := qa.HTTPFixtures{ + { + Method: "GET", + Resource: "/api/2.0/clusters/list?can_use_client=NOTEBOOKS", + Response: compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{ + { + ClusterId: "abc-id", + ClusterName: "first shared", + DataSecurityMode: compute.DataSecurityModeUserIsolation, + SparkVersion: "12.2.x-whatever", + State: compute.StateRunning, + }, + { + ClusterId: "bcd-id", + ClusterName: "second personal", + DataSecurityMode: compute.DataSecurityModeSingleUser, + SparkVersion: "14.5.x-whatever", + State: compute.StateRunning, + SingleUserName: "serge", + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/Me", + Response: iam.User{ + UserName: "serge", + }, + }, + { + Method: "GET", + Resource: "/api/2.0/clusters/spark-versions", + Response: compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ + { + Key: "14.5.x-whatever", + Name: "14.5 (Awesome)", + }, + }, + }, + }, + }.Config(t) + defer server.Close() + w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) + + ctx := context.Background() + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "...")) + clusterID, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) + require.NoError(t, err) + require.Equal(t, "bcd-id", clusterID) +} + +func TestNoCompatibleClusters(t *testing.T) { + cfg, server := qa.HTTPFixtures{ + { + Method: "GET", + Resource: "/api/2.0/clusters/list?can_use_client=NOTEBOOKS", + Response: compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{ + { + ClusterId: "abc-id", + ClusterName: "first shared", + DataSecurityMode: compute.DataSecurityModeUserIsolation, + SparkVersion: "12.2.x-whatever", + State: compute.StateRunning, + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/Me", + Response: iam.User{ + UserName: "serge", + }, + }, + { + Method: "GET", + Resource: "/api/2.0/clusters/spark-versions", + Response: compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ + { + Key: "14.5.x-whatever", + Name: "14.5 (Awesome)", + }, + }, + }, + }, + }.Config(t) + defer server.Close() + w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) + + ctx := context.Background() + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "...")) + _, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) + require.Equal(t, ErrNoCompatibleClusters, err) +} diff --git a/libs/databrickscfg/cfgpickers/warehouses.go b/libs/databrickscfg/cfgpickers/warehouses.go new file mode 100644 index 00000000..65b5f8c8 --- /dev/null +++ b/libs/databrickscfg/cfgpickers/warehouses.go @@ -0,0 +1,65 @@ +package cfgpickers + +import ( + "context" + "errors" + "fmt" + + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/fatih/color" +) + +var ErrNoCompatibleWarehouses = errors.New("no compatible warehouses") + +type warehouseFilter func(sql.EndpointInfo) bool + +func WithWarehouseTypes(types ...sql.EndpointInfoWarehouseType) func(sql.EndpointInfo) bool { + allowed := map[sql.EndpointInfoWarehouseType]bool{} + for _, v := range types { + allowed[v] = true + } + return func(ei sql.EndpointInfo) bool { + return allowed[ei.WarehouseType] + } +} + +func AskForWarehouse(ctx context.Context, w *databricks.WorkspaceClient, filters ...warehouseFilter) (string, error) { + all, err := w.Warehouses.ListAll(ctx, sql.ListWarehousesRequest{}) + if err != nil { + return "", fmt.Errorf("list warehouses: %w", err) + } + var lastWarehouseID string + names := map[string]string{} + for _, warehouse := range all { + var skip bool + for _, filter := range filters { + if !filter(warehouse) { + skip = true + } + } + if skip { + continue + } + var state string + switch warehouse.State { + case sql.StateRunning: + state = color.GreenString(warehouse.State.String()) + case sql.StateStopped, sql.StateDeleted, sql.StateStopping, sql.StateDeleting: + state = color.RedString(warehouse.State.String()) + default: + state = color.BlueString(warehouse.State.String()) + } + visibleTouser := fmt.Sprintf("%s (%s %s)", warehouse.Name, state, warehouse.WarehouseType) + names[visibleTouser] = warehouse.Id + lastWarehouseID = warehouse.Id + } + if len(names) == 0 { + return "", ErrNoCompatibleWarehouses + } + if len(names) == 1 { + return lastWarehouseID, nil + } + return cmdio.Select(ctx, names, "Choose SQL Warehouse") +} diff --git a/libs/databrickscfg/cfgpickers/warehouses_test.go b/libs/databrickscfg/cfgpickers/warehouses_test.go new file mode 100644 index 00000000..d6030b49 --- /dev/null +++ b/libs/databrickscfg/cfgpickers/warehouses_test.go @@ -0,0 +1,66 @@ +package cfgpickers + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/qa" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFirstCompatibleWarehouse(t *testing.T) { + cfg, server := qa.HTTPFixtures{ + { + Method: "GET", + Resource: "/api/2.0/sql/warehouses?", + Response: sql.ListWarehousesResponse{ + Warehouses: []sql.EndpointInfo{ + { + Id: "efg-id", + Name: "First PRO Warehouse", + WarehouseType: sql.EndpointInfoWarehouseTypePro, + }, + { + Id: "ghe-id", + Name: "Second UNKNOWN Warehouse", + WarehouseType: sql.EndpointInfoWarehouseTypeTypeUnspecified, + }, + }, + }, + }, + }.Config(t) + defer server.Close() + w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) + + ctx := context.Background() + clusterID, err := AskForWarehouse(ctx, w, WithWarehouseTypes(sql.EndpointInfoWarehouseTypePro)) + require.NoError(t, err) + assert.Equal(t, "efg-id", clusterID) +} + +func TestNoCompatibleWarehouses(t *testing.T) { + cfg, server := qa.HTTPFixtures{ + { + Method: "GET", + Resource: "/api/2.0/sql/warehouses?", + Response: sql.ListWarehousesResponse{ + Warehouses: []sql.EndpointInfo{ + { + Id: "efg-id", + Name: "...", + WarehouseType: sql.EndpointInfoWarehouseTypeClassic, + }, + }, + }, + }, + }.Config(t) + defer server.Close() + w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) + + ctx := context.Background() + _, err := AskForWarehouse(ctx, w, WithWarehouseTypes(sql.EndpointInfoWarehouseTypePro)) + assert.Equal(t, ErrNoCompatibleWarehouses, err) +} From f208853626a09a0947620efccdd51ba2a251fe30 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 10 Nov 2023 12:05:32 +0100 Subject: [PATCH 225/310] Fix integration test asserting errors on unknown template parameters (#977) ## Changes Recent descriptions were made mandatory for input parameters so this test started failing. ## Tests The test passes now. --- .../init/field-does-not-exist/databricks_template_schema.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/testdata/init/field-does-not-exist/databricks_template_schema.json b/internal/testdata/init/field-does-not-exist/databricks_template_schema.json index c37fc089..928e5039 100644 --- a/internal/testdata/init/field-does-not-exist/databricks_template_schema.json +++ b/internal/testdata/init/field-does-not-exist/databricks_template_schema.json @@ -2,7 +2,8 @@ "properties": { "foo": { "type": "string", - "default": "abc" + "default": "abc", + "description": "foo-bar" } } } From e82a49b4e900d3b78cbf43dcc5ef3ba148f9ce6f Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Fri, 10 Nov 2023 15:03:57 +0100 Subject: [PATCH 226/310] Make `databricks configure` save only explicit fields (#973) ## Changes Save only explicit fields to the config file This applies to two commands: `configure` and `auth login`. The latter only pulls env vars in the case of the `--configure-cluster` flag ## Tests Manual, plus additional unit test for the `configure` command --- cmd/auth/login.go | 9 ++++- cmd/configure/configure.go | 6 ++- cmd/configure/configure_test.go | 66 +++++++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+), 3 deletions(-) diff --git a/cmd/auth/login.go b/cmd/auth/login.go index 28e0025d..8c6d52fc 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -106,8 +106,13 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { } if profileName != "" { - cfg.Profile = profileName - err = databrickscfg.SaveToProfile(ctx, &cfg) + err = databrickscfg.SaveToProfile(ctx, &config.Config{ + Profile: profileName, + Host: cfg.Host, + AuthType: cfg.AuthType, + AccountID: cfg.AccountID, + ClusterID: cfg.ClusterID, + }) if err != nil { return err } diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index 33ab918e..55ede538 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -160,7 +160,11 @@ func newConfigureCommand() *cobra.Command { cfg.DatabricksCliPath = "" // Save profile to config file. - return databrickscfg.SaveToProfile(ctx, &cfg) + return databrickscfg.SaveToProfile(ctx, &config.Config{ + Profile: cfg.Profile, + Host: cfg.Host, + Token: cfg.Token, + }) } return cmd diff --git a/cmd/configure/configure_test.go b/cmd/configure/configure_test.go index cf0505ed..259c83ad 100644 --- a/cmd/configure/configure_test.go +++ b/cmd/configure/configure_test.go @@ -106,6 +106,72 @@ func TestConfigFileFromEnvNoInteractive(t *testing.T) { assertKeyValueInSection(t, defaultSection, "token", "token") } +func TestEnvVarsConfigureNoInteractive(t *testing.T) { + ctx := context.Background() + tempHomeDir := setup(t) + cfgPath := filepath.Join(tempHomeDir, ".databrickscfg") + inp := getTempFileWithContent(t, tempHomeDir, "token\n") + defer inp.Close() + oldStdin := os.Stdin + t.Cleanup(func() { os.Stdin = oldStdin }) + os.Stdin = inp + + t.Setenv("DATABRICKS_HOST", "https://host") + t.Setenv("DATABRICKS_AUTH_TYPE", "metadata-service") + t.Setenv("DATABRICKS_METADATA_SERVICE_URL", "https://metadata") + + cmd := cmd.New(ctx) + cmd.SetArgs([]string{"configure", "--token"}) + + err := cmd.ExecuteContext(ctx) + assert.NoError(t, err) + + _, err = os.Stat(cfgPath) + assert.NoError(t, err) + + cfg, err := ini.Load(cfgPath) + assert.NoError(t, err) + + defaultSection, err := cfg.GetSection("DEFAULT") + assert.NoError(t, err) + + assertKeyValueInSection(t, defaultSection, "host", "https://host") + assertKeyValueInSection(t, defaultSection, "token", "token") + + // We should only save host and token for a profile, other env variables should not be saved + _, err = defaultSection.GetKey("auth_type") + assert.NotNil(t, err) + _, err = defaultSection.GetKey("metadata_service_url") + assert.NotNil(t, err) +} + +func TestEnvVarsConfigureNoArgsNoInteractive(t *testing.T) { + ctx := context.Background() + tempHomeDir := setup(t) + cfgPath := filepath.Join(tempHomeDir, ".databrickscfg") + + t.Setenv("DATABRICKS_HOST", "https://host") + t.Setenv("DATABRICKS_TOKEN", "secret") + + cmd := cmd.New(ctx) + cmd.SetArgs([]string{"configure"}) + + err := cmd.ExecuteContext(ctx) + assert.NoError(t, err) + + _, err = os.Stat(cfgPath) + assert.NoError(t, err) + + cfg, err := ini.Load(cfgPath) + assert.NoError(t, err) + + defaultSection, err := cfg.GetSection("DEFAULT") + assert.NoError(t, err) + + assertKeyValueInSection(t, defaultSection, "host", "https://host") + assertKeyValueInSection(t, defaultSection, "token", "secret") +} + func TestCustomProfileConfigureNoInteractive(t *testing.T) { ctx := context.Background() tempHomeDir := setup(t) From ea4153e3237e8ba308fa0f722198a7a1d7b1e5e9 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 10 Nov 2023 15:09:02 +0100 Subject: [PATCH 227/310] Fixed flaky TestBackgroundCombinedOutputFailure (#978) ## Changes `TestBackgroundCombinedOutputFailure` was occasionally failing because combined output could have been in different order, see https://github.com/databricks/cli/actions/runs/6823883271/job/18558675165?pr=928 --- libs/process/background_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libs/process/background_test.go b/libs/process/background_test.go index 94f7e881..5bf2400b 100644 --- a/libs/process/background_test.go +++ b/libs/process/background_test.go @@ -66,7 +66,10 @@ func TestBackgroundCombinedOutputFailure(t *testing.T) { assert.Equal(t, "2", strings.TrimSpace(processErr.Stdout)) } assert.Equal(t, "2", strings.TrimSpace(res)) - assert.Equal(t, "1\n2\n", strings.ReplaceAll(buf.String(), "\r", "")) + + out := strings.ReplaceAll(buf.String(), "\r", "") + assert.Contains(t, out, "1\n") + assert.Contains(t, out, "2\n") } func TestBackgroundNoStdin(t *testing.T) { From ef76fe02f03e0ea288dc0b7ffd650819e54f75a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 11:14:08 +0000 Subject: [PATCH 228/310] Bump golang.org/x/term from 0.13.0 to 0.14.0 (#981) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.13.0 to 0.14.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.13.0&new-version=0.14.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e52bb383..86a94551 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( golang.org/x/mod v0.14.0 golang.org/x/oauth2 v0.13.0 golang.org/x/sync v0.5.0 - golang.org/x/term v0.13.0 + golang.org/x/term v0.14.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) diff --git a/go.sum b/go.sum index 665b0dea..2ae6936d 100644 --- a/go.sum +++ b/go.sum @@ -223,8 +223,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From 14d2d0a2d59967e4b63c48e86767ead9f4f0f198 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 11:18:16 +0000 Subject: [PATCH 229/310] Bump github.com/hashicorp/terraform-json from 0.17.1 to 0.18.0 (#979) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/hashicorp/terraform-json](https://github.com/hashicorp/terraform-json) from 0.17.1 to 0.18.0.
Release notes

Sourced from github.com/hashicorp/terraform-json's releases.

v0.18.0

NOTES:

ENHANCEMENTS:

BUG FIXES:

INTERNAL:

New Contributors

Commits
  • ac10835 Bump actions/checkout from 4.1.0 to 4.1.1 (#110)
  • 39a2ed7 Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 (#109)
  • d379256 Bump github.com/zclconf/go-cty from 1.14.0 to 1.14.1 (#107)
  • 33462c3 Bump actions/checkout from 4.0.0 to 4.1.0 (#105)
  • e58a208 Merge pull request #104 from hashicorp/alisdair/should-i-use-this-library
  • 25b978a Update README.md
  • a4dc39d Update README.md
  • 9aff8f0 Fix typo
  • 9901d28 Add PreviousAddress field to ResourceChange to support moved block (#95)
  • a7fb827 Update README to explain when not to use this library
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/terraform-json&package-manager=go_modules&previous-version=0.17.1&new-version=0.18.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 86a94551..db810251 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/hashicorp/go-version v1.6.0 // MPL 2.0 github.com/hashicorp/hc-install v0.6.1 // MPL 2.0 github.com/hashicorp/terraform-exec v0.19.0 // MPL 2.0 - github.com/hashicorp/terraform-json v0.17.1 // MPL 2.0 + github.com/hashicorp/terraform-json v0.18.0 // MPL 2.0 github.com/imdario/mergo v0.3.15 // BSD-3-Clause github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT @@ -50,7 +50,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/zclconf/go-cty v1.14.0 // indirect + github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/net v0.17.0 // indirect diff --git a/go.sum b/go.sum index 2ae6936d..6443c15e 100644 --- a/go.sum +++ b/go.sum @@ -100,8 +100,8 @@ github.com/hashicorp/hc-install v0.6.1 h1:IGxShH7AVhPaSuSJpKtVi/EFORNjO+OYVJJrAt github.com/hashicorp/hc-install v0.6.1/go.mod h1:0fW3jpg+wraYSnFDJ6Rlie3RvLf1bIqVIkzoon4KoVE= github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= -github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA= -github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o= +github.com/hashicorp/terraform-json v0.18.0 h1:pCjgJEqqDESv4y0Tzdqfxr/edOIGkjs8keY42xfNBwU= +github.com/hashicorp/terraform-json v0.18.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -154,8 +154,8 @@ github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgw github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.14.0 h1:/Xrd39K7DXbHzlisFP9c4pHao4yyf+/Ug9LEz+Y/yhc= -github.com/zclconf/go-cty v1.14.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= +github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= From f3db42e622867843d86c0b2af7f80f98ab62ef8d Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 13 Nov 2023 12:29:40 +0100 Subject: [PATCH 230/310] Added support for top-level permissions (#928) ## Changes Now it's possible to define top level `permissions` section in bundle configuration and permissions defined there will be applied to all resources defined in the bundle. Supported top-level permission levels: CAN_MANAGE, CAN_VIEW, CAN_RUN. Permissions are applied to: Jobs, DLT Pipelines, ML Models, ML Experiments and Model Service Endpoints ``` bundle: name: permissions workspace: host: *** permissions: - level: CAN_VIEW group_name: test-group - level: CAN_MANAGE user_name: user@company.com - level: CAN_RUN service_principal_name: 123456-abcdef ``` ## Tests Added corresponding unit tests + ran `bundle validate` and `bundle deploy` manually --- bundle/config/root.go | 12 ++ bundle/config/target.go | 7 +- bundle/permissions/mutator.go | 136 +++++++++++++++++ bundle/permissions/mutator_test.go | 141 ++++++++++++++++++ bundle/permissions/utils.go | 81 ++++++++++ bundle/permissions/workspace_root.go | 78 ++++++++++ bundle/permissions/workspace_root_test.go | 129 ++++++++++++++++ bundle/phases/deploy.go | 2 + bundle/phases/initialize.go | 2 + .../tests/bundle_permissions/databricks.yml | 35 +++++ bundle/tests/bundle_permissions_test.go | 56 +++++++ 11 files changed, 678 insertions(+), 1 deletion(-) create mode 100644 bundle/permissions/mutator.go create mode 100644 bundle/permissions/mutator_test.go create mode 100644 bundle/permissions/utils.go create mode 100644 bundle/permissions/workspace_root.go create mode 100644 bundle/permissions/workspace_root_test.go create mode 100644 bundle/tests/bundle_permissions/databricks.yml create mode 100644 bundle/tests/bundle_permissions_test.go diff --git a/bundle/config/root.go b/bundle/config/root.go index 31867c6c..1fb5773b 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -6,6 +6,7 @@ import ( "path/filepath" "strings" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/ghodss/yaml" @@ -56,6 +57,10 @@ type Root struct { RunAs *jobs.JobRunAs `json:"run_as,omitempty"` Experimental *Experimental `json:"experimental,omitempty"` + + // Permissions section allows to define permissions which will be + // applied to all resources defined in bundle + Permissions []resources.Permission `json:"permissions,omitempty"` } // Load loads the bundle configuration file at the specified path. @@ -237,5 +242,12 @@ func (r *Root) MergeTargetOverrides(target *Target) error { } } + if target.Permissions != nil { + err = mergo.Merge(&r.Permissions, target.Permissions, mergo.WithAppendSlice) + if err != nil { + return err + } + } + return nil } diff --git a/bundle/config/target.go b/bundle/config/target.go index fc776c7b..1264430e 100644 --- a/bundle/config/target.go +++ b/bundle/config/target.go @@ -1,6 +1,9 @@ package config -import "github.com/databricks/databricks-sdk-go/service/jobs" +import ( + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" +) type Mode string @@ -37,6 +40,8 @@ type Target struct { RunAs *jobs.JobRunAs `json:"run_as,omitempty"` Sync *Sync `json:"sync,omitempty"` + + Permissions []resources.Permission `json:"permissions,omitempty"` } const ( diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go new file mode 100644 index 00000000..025556f3 --- /dev/null +++ b/bundle/permissions/mutator.go @@ -0,0 +1,136 @@ +package permissions + +import ( + "context" + "fmt" + "slices" + "strings" + + "github.com/databricks/cli/bundle" +) + +const CAN_MANAGE = "CAN_MANAGE" +const CAN_VIEW = "CAN_VIEW" +const CAN_RUN = "CAN_RUN" + +var allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} +var levelsMap = map[string](map[string]string){ + "jobs": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_MANAGE_RUN", + }, + "pipelines": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_RUN", + }, + "mlflow_experiments": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + "mlflow_models": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + "model_serving_endpoints": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_QUERY", + }, +} + +type bundlePermissions struct{} + +func ApplyBundlePermissions() bundle.Mutator { + return &bundlePermissions{} +} + +func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) error { + err := validate(b) + if err != nil { + return err + } + + applyForJobs(ctx, b) + applyForPipelines(ctx, b) + applyForMlModels(ctx, b) + applyForMlExperiments(ctx, b) + applyForModelServiceEndpoints(ctx, b) + + return nil +} + +func validate(b *bundle.Bundle) error { + for _, p := range b.Config.Permissions { + if !slices.Contains(allowedLevels, p.Level) { + return fmt.Errorf("invalid permission level: %s, allowed values: [%s]", p.Level, strings.Join(allowedLevels, ", ")) + } + } + + return nil +} + +func applyForJobs(ctx context.Context, b *bundle.Bundle) { + for _, job := range b.Config.Resources.Jobs { + job.Permissions = append(job.Permissions, convert( + ctx, + b.Config.Permissions, + job.Permissions, + job.Name, + levelsMap["jobs"], + )...) + } +} + +func applyForPipelines(ctx context.Context, b *bundle.Bundle) { + for _, pipeline := range b.Config.Resources.Pipelines { + pipeline.Permissions = append(pipeline.Permissions, convert( + ctx, + b.Config.Permissions, + pipeline.Permissions, + pipeline.Name, + levelsMap["pipelines"], + )...) + } +} + +func applyForMlExperiments(ctx context.Context, b *bundle.Bundle) { + for _, experiment := range b.Config.Resources.Experiments { + experiment.Permissions = append(experiment.Permissions, convert( + ctx, + b.Config.Permissions, + experiment.Permissions, + experiment.Name, + levelsMap["mlflow_experiments"], + )...) + } +} + +func applyForMlModels(ctx context.Context, b *bundle.Bundle) { + for _, model := range b.Config.Resources.Models { + model.Permissions = append(model.Permissions, convert( + ctx, + b.Config.Permissions, + model.Permissions, + model.Name, + levelsMap["mlflow_models"], + )...) + } +} + +func applyForModelServiceEndpoints(ctx context.Context, b *bundle.Bundle) { + for _, model := range b.Config.Resources.ModelServingEndpoints { + model.Permissions = append(model.Permissions, convert( + ctx, + b.Config.Permissions, + model.Permissions, + model.Name, + levelsMap["model_serving_endpoints"], + )...) + } +} + +func (m *bundlePermissions) Name() string { + return "ApplyBundlePermissions" +} diff --git a/bundle/permissions/mutator_test.go b/bundle/permissions/mutator_test.go new file mode 100644 index 00000000..d9bf3efe --- /dev/null +++ b/bundle/permissions/mutator_test.go @@ -0,0 +1,141 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/stretchr/testify/require" +) + +func TestApplyBundlePermissions(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Users/foo@bar.com", + }, + Permissions: []resources.Permission{ + {Level: CAN_MANAGE, UserName: "TestUser"}, + {Level: CAN_VIEW, GroupName: "TestGroup"}, + {Level: CAN_RUN, ServicePrincipalName: "TestServicePrincipal"}, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job_1": {JobSettings: &jobs.JobSettings{}}, + "job_2": {JobSettings: &jobs.JobSettings{}}, + }, + Pipelines: map[string]*resources.Pipeline{ + "pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}}, + "pipeline_2": {PipelineSpec: &pipelines.PipelineSpec{}}, + }, + Models: map[string]*resources.MlflowModel{ + "model_1": {Model: &ml.Model{}}, + "model_2": {Model: &ml.Model{}}, + }, + Experiments: map[string]*resources.MlflowExperiment{ + "experiment_1": {Experiment: &ml.Experiment{}}, + "experiment_2": {Experiment: &ml.Experiment{}}, + }, + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "endpoint_1": {CreateServingEndpoint: &serving.CreateServingEndpoint{}}, + "endpoint_2": {CreateServingEndpoint: &serving.CreateServingEndpoint{}}, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) + require.NoError(t, err) + + require.Len(t, b.Config.Resources.Jobs["job_1"].Permissions, 3) + require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) + require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_MANAGE_RUN", ServicePrincipalName: "TestServicePrincipal"}) + + require.Len(t, b.Config.Resources.Jobs["job_2"].Permissions, 3) + require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) + require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_MANAGE_RUN", ServicePrincipalName: "TestServicePrincipal"}) + + require.Len(t, b.Config.Resources.Pipelines["pipeline_1"].Permissions, 3) + require.Contains(t, b.Config.Resources.Pipelines["pipeline_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Pipelines["pipeline_1"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) + require.Contains(t, b.Config.Resources.Pipelines["pipeline_1"].Permissions, resources.Permission{Level: "CAN_RUN", ServicePrincipalName: "TestServicePrincipal"}) + + require.Len(t, b.Config.Resources.Pipelines["pipeline_2"].Permissions, 3) + require.Contains(t, b.Config.Resources.Pipelines["pipeline_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Pipelines["pipeline_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) + require.Contains(t, b.Config.Resources.Pipelines["pipeline_2"].Permissions, resources.Permission{Level: "CAN_RUN", ServicePrincipalName: "TestServicePrincipal"}) + + require.Len(t, b.Config.Resources.Models["model_1"].Permissions, 2) + require.Contains(t, b.Config.Resources.Models["model_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Models["model_1"].Permissions, resources.Permission{Level: "CAN_READ", GroupName: "TestGroup"}) + + require.Len(t, b.Config.Resources.Models["model_2"].Permissions, 2) + require.Contains(t, b.Config.Resources.Models["model_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Models["model_2"].Permissions, resources.Permission{Level: "CAN_READ", GroupName: "TestGroup"}) + + require.Len(t, b.Config.Resources.Experiments["experiment_1"].Permissions, 2) + require.Contains(t, b.Config.Resources.Experiments["experiment_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Experiments["experiment_1"].Permissions, resources.Permission{Level: "CAN_READ", GroupName: "TestGroup"}) + + require.Len(t, b.Config.Resources.Experiments["experiment_2"].Permissions, 2) + require.Contains(t, b.Config.Resources.Experiments["experiment_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Experiments["experiment_2"].Permissions, resources.Permission{Level: "CAN_READ", GroupName: "TestGroup"}) + + require.Len(t, b.Config.Resources.ModelServingEndpoints["endpoint_1"].Permissions, 3) + require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_1"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) + require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_1"].Permissions, resources.Permission{Level: "CAN_QUERY", ServicePrincipalName: "TestServicePrincipal"}) + + require.Len(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, 3) + require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) + require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_QUERY", ServicePrincipalName: "TestServicePrincipal"}) +} + +func TestWarningOnOverlapPermission(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Users/foo@bar.com", + }, + Permissions: []resources.Permission{ + {Level: CAN_MANAGE, UserName: "TestUser"}, + {Level: CAN_VIEW, GroupName: "TestGroup"}, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job_1": { + Permissions: []resources.Permission{ + {Level: CAN_VIEW, UserName: "TestUser"}, + }, + JobSettings: &jobs.JobSettings{}, + }, + "job_2": { + Permissions: []resources.Permission{ + {Level: CAN_VIEW, UserName: "TestUser2"}, + }, + JobSettings: &jobs.JobSettings{}, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) + require.NoError(t, err) + + require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_VIEW", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) + require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_VIEW", UserName: "TestUser2"}) + require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) + +} diff --git a/bundle/permissions/utils.go b/bundle/permissions/utils.go new file mode 100644 index 00000000..9072cd25 --- /dev/null +++ b/bundle/permissions/utils.go @@ -0,0 +1,81 @@ +package permissions + +import ( + "context" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" +) + +func convert( + ctx context.Context, + bundlePermissions []resources.Permission, + resourcePermissions []resources.Permission, + resourceName string, + lm map[string]string, +) []resources.Permission { + permissions := make([]resources.Permission, 0) + for _, p := range bundlePermissions { + level, ok := lm[p.Level] + // If there is no bundle permission level defined in the map, it means + // it's not applicable for the resource, therefore skipping + if !ok { + continue + } + + if notifyForPermissionOverlap(ctx, p, resourcePermissions, resourceName) { + continue + } + + permissions = append(permissions, resources.Permission{ + Level: level, + UserName: p.UserName, + GroupName: p.GroupName, + ServicePrincipalName: p.ServicePrincipalName, + }) + } + + return permissions +} + +func isPermissionOverlap( + permission resources.Permission, + resourcePermissions []resources.Permission, + resourceName string, +) (bool, diag.Diagnostics) { + var diagnostics diag.Diagnostics + for _, rp := range resourcePermissions { + if rp.GroupName != "" && rp.GroupName == permission.GroupName { + diagnostics = diagnostics.Extend( + diag.Warningf("'%s' already has permissions set for '%s' group", resourceName, rp.GroupName), + ) + } + + if rp.UserName != "" && rp.UserName == permission.UserName { + diagnostics = diagnostics.Extend( + diag.Warningf("'%s' already has permissions set for '%s' user name", resourceName, rp.UserName), + ) + } + + if rp.ServicePrincipalName != "" && rp.ServicePrincipalName == permission.ServicePrincipalName { + diagnostics = diagnostics.Extend( + diag.Warningf("'%s' already has permissions set for '%s' service principal name", resourceName, rp.ServicePrincipalName), + ) + } + } + + return len(diagnostics) > 0, diagnostics +} + +func notifyForPermissionOverlap( + ctx context.Context, + permission resources.Permission, + resourcePermissions []resources.Permission, + resourceName string, +) bool { + isOverlap, _ := isPermissionOverlap(permission, resourcePermissions, resourceName) + // TODO: When we start to collect all diagnostics at the top level and visualize jointly, + // use diagnostics returned from isPermissionOverlap to display warnings + + return isOverlap +} diff --git a/bundle/permissions/workspace_root.go b/bundle/permissions/workspace_root.go new file mode 100644 index 00000000..a8eb9e27 --- /dev/null +++ b/bundle/permissions/workspace_root.go @@ -0,0 +1,78 @@ +package permissions + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type workspaceRootPermissions struct { +} + +func ApplyWorkspaceRootPermissions() bundle.Mutator { + return &workspaceRootPermissions{} +} + +// Apply implements bundle.Mutator. +func (*workspaceRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) error { + err := giveAccessForWorkspaceRoot(ctx, b) + if err != nil { + return err + } + + return nil +} + +func (*workspaceRootPermissions) Name() string { + return "ApplyWorkspaceRootPermissions" +} + +func giveAccessForWorkspaceRoot(ctx context.Context, b *bundle.Bundle) error { + permissions := make([]workspace.WorkspaceObjectAccessControlRequest, 0) + + for _, p := range b.Config.Permissions { + level, err := getWorkspaceObjectPermissionLevel(p.Level) + if err != nil { + return err + } + + permissions = append(permissions, workspace.WorkspaceObjectAccessControlRequest{ + GroupName: p.GroupName, + UserName: p.UserName, + ServicePrincipalName: p.ServicePrincipalName, + PermissionLevel: level, + }) + } + + if len(permissions) == 0 { + return nil + } + + w := b.WorkspaceClient().Workspace + obj, err := w.GetStatusByPath(ctx, b.Config.Workspace.RootPath) + if err != nil { + return err + } + + _, err = w.UpdatePermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{ + WorkspaceObjectId: fmt.Sprint(obj.ObjectId), + WorkspaceObjectType: "directories", + AccessControlList: permissions, + }) + return err +} + +func getWorkspaceObjectPermissionLevel(bundlePermission string) (workspace.WorkspaceObjectPermissionLevel, error) { + switch bundlePermission { + case CAN_MANAGE: + return workspace.WorkspaceObjectPermissionLevelCanManage, nil + case CAN_RUN: + return workspace.WorkspaceObjectPermissionLevelCanRun, nil + case CAN_VIEW: + return workspace.WorkspaceObjectPermissionLevelCanRead, nil + default: + return "", fmt.Errorf("unsupported bundle permission level %s", bundlePermission) + } +} diff --git a/bundle/permissions/workspace_root_test.go b/bundle/permissions/workspace_root_test.go new file mode 100644 index 00000000..21cc4176 --- /dev/null +++ b/bundle/permissions/workspace_root_test.go @@ -0,0 +1,129 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/require" +) + +type MockWorkspaceClient struct { + t *testing.T +} + +// Delete implements workspace.WorkspaceService. +func (MockWorkspaceClient) Delete(ctx context.Context, request workspace.Delete) error { + panic("unimplemented") +} + +// Export implements workspace.WorkspaceService. +func (MockWorkspaceClient) Export(ctx context.Context, request workspace.ExportRequest) (*workspace.ExportResponse, error) { + panic("unimplemented") +} + +// GetPermissionLevels implements workspace.WorkspaceService. +func (MockWorkspaceClient) GetPermissionLevels(ctx context.Context, request workspace.GetWorkspaceObjectPermissionLevelsRequest) (*workspace.GetWorkspaceObjectPermissionLevelsResponse, error) { + panic("unimplemented") +} + +// GetPermissions implements workspace.WorkspaceService. +func (MockWorkspaceClient) GetPermissions(ctx context.Context, request workspace.GetWorkspaceObjectPermissionsRequest) (*workspace.WorkspaceObjectPermissions, error) { + panic("unimplemented") +} + +// GetStatus implements workspace.WorkspaceService. +func (MockWorkspaceClient) GetStatus(ctx context.Context, request workspace.GetStatusRequest) (*workspace.ObjectInfo, error) { + return &workspace.ObjectInfo{ + ObjectId: 1234, ObjectType: "directories", Path: "/Users/foo@bar.com", + }, nil +} + +// Import implements workspace.WorkspaceService. +func (MockWorkspaceClient) Import(ctx context.Context, request workspace.Import) error { + panic("unimplemented") +} + +// List implements workspace.WorkspaceService. +func (MockWorkspaceClient) List(ctx context.Context, request workspace.ListWorkspaceRequest) (*workspace.ListResponse, error) { + panic("unimplemented") +} + +// Mkdirs implements workspace.WorkspaceService. +func (MockWorkspaceClient) Mkdirs(ctx context.Context, request workspace.Mkdirs) error { + panic("unimplemented") +} + +// SetPermissions implements workspace.WorkspaceService. +func (MockWorkspaceClient) SetPermissions(ctx context.Context, request workspace.WorkspaceObjectPermissionsRequest) (*workspace.WorkspaceObjectPermissions, error) { + panic("unimplemented") +} + +// UpdatePermissions implements workspace.WorkspaceService. +func (m MockWorkspaceClient) UpdatePermissions(ctx context.Context, request workspace.WorkspaceObjectPermissionsRequest) (*workspace.WorkspaceObjectPermissions, error) { + require.Equal(m.t, "1234", request.WorkspaceObjectId) + require.Equal(m.t, "directories", request.WorkspaceObjectType) + require.Contains(m.t, request.AccessControlList, workspace.WorkspaceObjectAccessControlRequest{ + UserName: "TestUser", + PermissionLevel: "CAN_MANAGE", + }) + require.Contains(m.t, request.AccessControlList, workspace.WorkspaceObjectAccessControlRequest{ + GroupName: "TestGroup", + PermissionLevel: "CAN_READ", + }) + require.Contains(m.t, request.AccessControlList, workspace.WorkspaceObjectAccessControlRequest{ + ServicePrincipalName: "TestServicePrincipal", + PermissionLevel: "CAN_RUN", + }) + + return nil, nil +} + +func TestApplyWorkspaceRootPermissions(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + RootPath: "/Users/foo@bar.com", + }, + Permissions: []resources.Permission{ + {Level: CAN_MANAGE, UserName: "TestUser"}, + {Level: CAN_VIEW, GroupName: "TestGroup"}, + {Level: CAN_RUN, ServicePrincipalName: "TestServicePrincipal"}, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job_1": {JobSettings: &jobs.JobSettings{}}, + "job_2": {JobSettings: &jobs.JobSettings{}}, + }, + Pipelines: map[string]*resources.Pipeline{ + "pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}}, + "pipeline_2": {PipelineSpec: &pipelines.PipelineSpec{}}, + }, + Models: map[string]*resources.MlflowModel{ + "model_1": {Model: &ml.Model{}}, + "model_2": {Model: &ml.Model{}}, + }, + Experiments: map[string]*resources.MlflowExperiment{ + "experiment_1": {Experiment: &ml.Experiment{}}, + "experiment_2": {Experiment: &ml.Experiment{}}, + }, + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "endpoint_1": {CreateServingEndpoint: &serving.CreateServingEndpoint{}}, + "endpoint_2": {CreateServingEndpoint: &serving.CreateServingEndpoint{}}, + }, + }, + }, + } + + b.WorkspaceClient().Workspace.WithImpl(MockWorkspaceClient{t}) + + err := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions()) + require.NoError(t, err) +} diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 805bae80..6f0d3a6c 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/bundle/deploy/metadata" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/bundle/permissions" "github.com/databricks/cli/bundle/python" "github.com/databricks/cli/bundle/scripts" ) @@ -27,6 +28,7 @@ func Deploy() bundle.Mutator { artifacts.UploadAll(), python.TransformWheelTask(), files.Upload(), + permissions.ApplyWorkspaceRootPermissions(), terraform.Interpolate(), terraform.Write(), terraform.StatePull(), diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index e03a6336..fb9e7b24 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/bundle/permissions" "github.com/databricks/cli/bundle/python" "github.com/databricks/cli/bundle/scripts" ) @@ -34,6 +35,7 @@ func Initialize() bundle.Mutator { mutator.ExpandPipelineGlobPaths(), mutator.TranslatePaths(), python.WrapperWarning(), + permissions.ApplyBundlePermissions(), terraform.Initialize(), scripts.Execute(config.ScriptPostInit), }, diff --git a/bundle/tests/bundle_permissions/databricks.yml b/bundle/tests/bundle_permissions/databricks.yml new file mode 100644 index 00000000..78f3d3d7 --- /dev/null +++ b/bundle/tests/bundle_permissions/databricks.yml @@ -0,0 +1,35 @@ +bundle: + name: bundle_permissions + +permissions: + - level: CAN_RUN + user_name: test@company.com + +targets: + development: + permissions: + - level: CAN_MANAGE + group_name: devs + - level: CAN_VIEW + service_principal_name: 1234-abcd + - level: CAN_RUN + user_name: bot@company.com + +resources: + pipelines: + nyc_taxi_pipeline: + target: nyc_taxi_production + development: false + photon: true + + jobs: + pipeline_schedule: + name: Daily refresh of production pipeline + + schedule: + quartz_cron_expression: 6 6 11 * * ? + timezone_id: UTC + + tasks: + - pipeline_task: + pipeline_id: "to be interpolated" diff --git a/bundle/tests/bundle_permissions_test.go b/bundle/tests/bundle_permissions_test.go new file mode 100644 index 00000000..3ea9dc2e --- /dev/null +++ b/bundle/tests/bundle_permissions_test.go @@ -0,0 +1,56 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/permissions" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBundlePermissions(t *testing.T) { + b := load(t, "./bundle_permissions") + assert.Contains(t, b.Config.Permissions, resources.Permission{Level: "CAN_RUN", UserName: "test@company.com"}) + assert.NotContains(t, b.Config.Permissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) + assert.NotContains(t, b.Config.Permissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) + assert.NotContains(t, b.Config.Permissions, resources.Permission{Level: "CAN_RUN", UserName: "bot@company.com"}) + + err := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) + require.NoError(t, err) + pipelinePermissions := b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Permissions + assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_RUN", UserName: "test@company.com"}) + assert.NotContains(t, pipelinePermissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) + assert.NotContains(t, pipelinePermissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) + assert.NotContains(t, pipelinePermissions, resources.Permission{Level: "CAN_RUN", UserName: "bot@company.com"}) + + jobsPermissions := b.Config.Resources.Jobs["pipeline_schedule"].Permissions + assert.Contains(t, jobsPermissions, resources.Permission{Level: "CAN_MANAGE_RUN", UserName: "test@company.com"}) + assert.NotContains(t, jobsPermissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) + assert.NotContains(t, jobsPermissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) + assert.NotContains(t, jobsPermissions, resources.Permission{Level: "CAN_RUN", UserName: "bot@company.com"}) +} + +func TestBundlePermissionsDevTarget(t *testing.T) { + b := loadTarget(t, "./bundle_permissions", "development") + assert.Contains(t, b.Config.Permissions, resources.Permission{Level: "CAN_RUN", UserName: "test@company.com"}) + assert.Contains(t, b.Config.Permissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) + assert.Contains(t, b.Config.Permissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) + assert.Contains(t, b.Config.Permissions, resources.Permission{Level: "CAN_RUN", UserName: "bot@company.com"}) + + err := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) + require.NoError(t, err) + pipelinePermissions := b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Permissions + assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_RUN", UserName: "test@company.com"}) + assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) + assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) + assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_RUN", UserName: "bot@company.com"}) + + jobsPermissions := b.Config.Resources.Jobs["pipeline_schedule"].Permissions + assert.Contains(t, jobsPermissions, resources.Permission{Level: "CAN_MANAGE_RUN", UserName: "test@company.com"}) + assert.Contains(t, jobsPermissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) + assert.Contains(t, jobsPermissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) + assert.Contains(t, jobsPermissions, resources.Permission{Level: "CAN_MANAGE_RUN", UserName: "bot@company.com"}) +} From b2055ed49e05fa3b87c9a2a576d0b4782c3d6b66 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 13:10:46 +0100 Subject: [PATCH 231/310] Bump golang.org/x/oauth2 from 0.13.0 to 0.14.0 (#982) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.13.0 to 0.14.0.
Commits
  • e067960 go.mod: update golang.org/x dependencies
  • 4c91c17 google: adds header to security considerations section
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.13.0&new-version=0.14.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index db810251..5b108a52 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/whilp/git-urls v1.0.0 // MIT golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/mod v0.14.0 - golang.org/x/oauth2 v0.13.0 + golang.org/x/oauth2 v0.14.0 golang.org/x/sync v0.5.0 golang.org/x/term v0.14.0 golang.org/x/text v0.14.0 @@ -52,8 +52,8 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/net v0.17.0 // indirect + golang.org/x/crypto v0.15.0 // indirect + golang.org/x/net v0.18.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/api v0.148.0 // indirect diff --git a/go.sum b/go.sum index 6443c15e..3959456d 100644 --- a/go.sum +++ b/go.sum @@ -163,8 +163,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= @@ -187,11 +187,11 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= +golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From c3ced68c607e7376bc38a291e9706c8073ac40e5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 16:38:35 +0100 Subject: [PATCH 232/310] Bump github.com/databricks/databricks-sdk-go from 0.24.0 to 0.25.0 (#980) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.24.0 to 0.25.0.
Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

0.25.0

  • Make sure path parameters are first in order in RequiredFields (#669).
  • Added Field.IsRequestBodyField method for code generation (#670).
  • Added regressions question to the issue template (#676).
  • Added telemetry for CI/CD platform to useragent (#665).
  • Skiped GCP Integration Tests using Statement Execution API (#678).
  • Added more detailed error message on default credentials not found error (#679).
  • Updated SDK to latest OpenAPI Spec (#685).

API Changes:

OpenAPI SHA: e7b127cb07af8dd4d8c61c7cc045c8910cdbb02a, Date: 2023-11-08 Dependency updates:

  • Bump google.golang.org/api from 0.146.0 to 0.150.0 (#683).
  • Bump golang.org/x/mod from 0.13.0 to 0.14.0 (#681).
  • Bump google.golang.org/grpc from 1.58.2 to 1.58.3 in /examples/slog (#672).
  • Bump google.golang.org/grpc to 1.58.3 in /examples/zerolog (#684).
  • Bump golang.org/x/time from 0.3.0 to 0.4.0 (#680).
Commits
  • 03b614c Release v0.25.0 (#687)
  • 16970df Update SDK to latest OpenAPI Spec (#685)
  • 712fcfe Bump golang.org/x/time from 0.3.0 to 0.4.0 (#680)
  • 3215e79 Bump google.golang.org/grpc from 1.58.2 to 1.58.3 in /examples/zerolog (#684)
  • f06dd73 Bump google.golang.org/grpc from 1.58.2 to 1.58.3 in /examples/slog (#672)
  • 2bb6c8d Bump google.golang.org/grpc from 1.57.0 to 1.57.1 in /examples/zerolog (#674)
  • e045c01 Bump golang.org/x/mod from 0.13.0 to 0.14.0 (#681)
  • 7e28d29 Bump google.golang.org/api from 0.146.0 to 0.150.0 (#683)
  • 6e2bb1d Add more detailed error message on default credentials not found error (#679)
  • 33c1c87 Skip GCP Integration Tests using Statement Execution API (#678)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.24.0&new-version=0.25.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .gitattributes | 2 +- cmd/account/cmd.go | 4 +- .../network-connectivity.go | 483 ++++++++++++++++++ .../o-auth-enrollment/o-auth-enrollment.go | 169 ------ cmd/account/o-auth-enrollment/overrides.go | 107 ---- cmd/account/users/users.go | 4 +- cmd/account/workspaces/workspaces.go | 1 + cmd/workspace/apps/apps.go | 216 +++++++- .../cluster-policies/cluster-policies.go | 38 +- .../external-locations/external-locations.go | 1 + cmd/workspace/functions/functions.go | 10 +- cmd/workspace/jobs/jobs.go | 1 + cmd/workspace/metastores/metastores.go | 19 +- cmd/workspace/users/users.go | 4 +- go.mod | 10 +- go.sum | 20 +- 17 files changed, 735 insertions(+), 356 deletions(-) create mode 100755 cmd/account/network-connectivity/network-connectivity.go delete mode 100755 cmd/account/o-auth-enrollment/o-auth-enrollment.go delete mode 100644 cmd/account/o-auth-enrollment/overrides.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 23aa4473..7c42f6dc 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -5903bb39137fd76ac384b2044e425f9c56840e00 \ No newline at end of file +e7b127cb07af8dd4d8c61c7cc045c8910cdbb02a \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index e94cfcd7..0a8a7191 100755 --- a/.gitattributes +++ b/.gitattributes @@ -10,9 +10,9 @@ cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/account/log-delivery/log-delivery.go linguist-generated=true cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true cmd/account/metastores/metastores.go linguist-generated=true +cmd/account/network-connectivity/network-connectivity.go linguist-generated=true cmd/account/network-policy/network-policy.go linguist-generated=true cmd/account/networks/networks.go linguist-generated=true -cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true cmd/account/private-access/private-access.go linguist-generated=true cmd/account/published-app-integration/published-app-integration.go linguist-generated=true diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 38be7314..72bf9107 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -16,9 +16,9 @@ import ( log_delivery "github.com/databricks/cli/cmd/account/log-delivery" account_metastore_assignments "github.com/databricks/cli/cmd/account/metastore-assignments" account_metastores "github.com/databricks/cli/cmd/account/metastores" + network_connectivity "github.com/databricks/cli/cmd/account/network-connectivity" account_network_policy "github.com/databricks/cli/cmd/account/network-policy" networks "github.com/databricks/cli/cmd/account/networks" - o_auth_enrollment "github.com/databricks/cli/cmd/account/o-auth-enrollment" o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps" private_access "github.com/databricks/cli/cmd/account/private-access" published_app_integration "github.com/databricks/cli/cmd/account/published-app-integration" @@ -50,9 +50,9 @@ func New() *cobra.Command { cmd.AddCommand(log_delivery.New()) cmd.AddCommand(account_metastore_assignments.New()) cmd.AddCommand(account_metastores.New()) + cmd.AddCommand(network_connectivity.New()) cmd.AddCommand(account_network_policy.New()) cmd.AddCommand(networks.New()) - cmd.AddCommand(o_auth_enrollment.New()) cmd.AddCommand(o_auth_published_apps.New()) cmd.AddCommand(private_access.New()) cmd.AddCommand(published_app_integration.New()) diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go new file mode 100755 index 00000000..3bc1e74e --- /dev/null +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -0,0 +1,483 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package network_connectivity + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "network-connectivity", + Short: `These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources.`, + Long: `These APIs provide configurations for the network connectivity of your + workspaces for serverless compute resources. This API provides stable subnets + for your workspace so that you can configure your firewalls on your Azure + Storage accounts to allow access from Databricks. You can also use the API to + provision private endpoints for Databricks to privately connect serverless + compute resources to your Azure resources using Azure Private Link. See + [configure serverless secure connectivity]. + + [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security`, + GroupID: "settings", + Annotations: map[string]string{ + "package": "settings", + }, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-network-connectivity-configuration command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createNetworkConnectivityConfigurationOverrides []func( + *cobra.Command, + *settings.CreateNetworkConnectivityConfigRequest, +) + +func newCreateNetworkConnectivityConfiguration() *cobra.Command { + cmd := &cobra.Command{} + + var createNetworkConnectivityConfigurationReq settings.CreateNetworkConnectivityConfigRequest + var createNetworkConnectivityConfigurationJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createNetworkConnectivityConfigurationJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create-network-connectivity-configuration NAME REGION" + cmd.Short = `Create a network connectivity configuration.` + cmd.Long = `Create a network connectivity configuration. + + Creates a network connectivity configuration (NCC), which provides stable + Azure service subnets when accessing your Azure Storage accounts. You can also + use a network connectivity configuration to create Databricks-managed private + endpoints so that Databricks serverless compute resources privately access + your resources. + + **IMPORTANT**: After you create the network connectivity configuration, you + must assign one or more workspaces to the new network connectivity + configuration. You can share one network connectivity configuration with + multiple workspaces from the same Azure region within the same Databricks + account. See [configure serverless secure connectivity]. + + [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'region' in your JSON input") + } + return nil + } + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = createNetworkConnectivityConfigurationJson.Unmarshal(&createNetworkConnectivityConfigurationReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + createNetworkConnectivityConfigurationReq.Name = args[0] + } + if !cmd.Flags().Changed("json") { + createNetworkConnectivityConfigurationReq.Region = args[1] + } + + response, err := a.NetworkConnectivity.CreateNetworkConnectivityConfiguration(ctx, createNetworkConnectivityConfigurationReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createNetworkConnectivityConfigurationOverrides { + fn(cmd, &createNetworkConnectivityConfigurationReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newCreateNetworkConnectivityConfiguration()) + }) +} + +// start create-private-endpoint-rule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createPrivateEndpointRuleOverrides []func( + *cobra.Command, + *settings.CreatePrivateEndpointRuleRequest, +) + +func newCreatePrivateEndpointRule() *cobra.Command { + cmd := &cobra.Command{} + + var createPrivateEndpointRuleReq settings.CreatePrivateEndpointRuleRequest + var createPrivateEndpointRuleJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createPrivateEndpointRuleJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID RESOURCE_ID GROUP_ID" + cmd.Short = `Create a private endpoint rule.` + cmd.Long = `Create a private endpoint rule. + + Create a private endpoint rule for the specified network connectivity config + object. Once the object is created, Databricks asynchronously provisions a new + Azure private endpoint to your specified Azure resource. + + **IMPORTANT**: You must use Azure portal or other Azure tools to approve the + private endpoint to complete the connection. To get the information of the + private endpoint created, make a GET request on the new private endpoint + rule. See [serverless private link]. + + [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := cobra.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only NETWORK_CONNECTIVITY_CONFIG_ID as positional arguments. Provide 'resource_id', 'group_id' in your JSON input") + } + return nil + } + check := cobra.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = createPrivateEndpointRuleJson.Unmarshal(&createPrivateEndpointRuleReq) + if err != nil { + return err + } + } + createPrivateEndpointRuleReq.NetworkConnectivityConfigId = args[0] + if !cmd.Flags().Changed("json") { + createPrivateEndpointRuleReq.ResourceId = args[1] + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &createPrivateEndpointRuleReq.GroupId) + if err != nil { + return fmt.Errorf("invalid GROUP_ID: %s", args[2]) + } + } + + response, err := a.NetworkConnectivity.CreatePrivateEndpointRule(ctx, createPrivateEndpointRuleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createPrivateEndpointRuleOverrides { + fn(cmd, &createPrivateEndpointRuleReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newCreatePrivateEndpointRule()) + }) +} + +// start delete-network-connectivity-configuration command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteNetworkConnectivityConfigurationOverrides []func( + *cobra.Command, + *settings.DeleteNetworkConnectivityConfigurationRequest, +) + +func newDeleteNetworkConnectivityConfiguration() *cobra.Command { + cmd := &cobra.Command{} + + var deleteNetworkConnectivityConfigurationReq settings.DeleteNetworkConnectivityConfigurationRequest + + // TODO: short flags + + cmd.Use = "delete-network-connectivity-configuration NETWORK_CONNECTIVITY_CONFIG_ID" + cmd.Short = `Delete a network connectivity configuration.` + cmd.Long = `Delete a network connectivity configuration. + + Deletes a network connectivity configuration.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + deleteNetworkConnectivityConfigurationReq.NetworkConnectivityConfigId = args[0] + + err = a.NetworkConnectivity.DeleteNetworkConnectivityConfiguration(ctx, deleteNetworkConnectivityConfigurationReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteNetworkConnectivityConfigurationOverrides { + fn(cmd, &deleteNetworkConnectivityConfigurationReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDeleteNetworkConnectivityConfiguration()) + }) +} + +// start delete-private-endpoint-rule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deletePrivateEndpointRuleOverrides []func( + *cobra.Command, + *settings.DeletePrivateEndpointRuleRequest, +) + +func newDeletePrivateEndpointRule() *cobra.Command { + cmd := &cobra.Command{} + + var deletePrivateEndpointRuleReq settings.DeletePrivateEndpointRuleRequest + + // TODO: short flags + + cmd.Use = "delete-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID PRIVATE_ENDPOINT_RULE_ID" + cmd.Short = `Delete a private endpoint rule.` + cmd.Long = `Delete a private endpoint rule. + + Initiates deleting a private endpoint rule. The private endpoint will be + deactivated and will be purged after seven days of deactivation. When a + private endpoint is in deactivated state, deactivated field is set to true + and the private endpoint is not available to your serverless compute + resources.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + deletePrivateEndpointRuleReq.NetworkConnectivityConfigId = args[0] + deletePrivateEndpointRuleReq.PrivateEndpointRuleId = args[1] + + response, err := a.NetworkConnectivity.DeletePrivateEndpointRule(ctx, deletePrivateEndpointRuleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deletePrivateEndpointRuleOverrides { + fn(cmd, &deletePrivateEndpointRuleReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDeletePrivateEndpointRule()) + }) +} + +// start get-network-connectivity-configuration command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getNetworkConnectivityConfigurationOverrides []func( + *cobra.Command, + *settings.GetNetworkConnectivityConfigurationRequest, +) + +func newGetNetworkConnectivityConfiguration() *cobra.Command { + cmd := &cobra.Command{} + + var getNetworkConnectivityConfigurationReq settings.GetNetworkConnectivityConfigurationRequest + + // TODO: short flags + + cmd.Use = "get-network-connectivity-configuration NETWORK_CONNECTIVITY_CONFIG_ID" + cmd.Short = `Get a network connectivity configuration.` + cmd.Long = `Get a network connectivity configuration. + + Gets a network connectivity configuration.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + getNetworkConnectivityConfigurationReq.NetworkConnectivityConfigId = args[0] + + response, err := a.NetworkConnectivity.GetNetworkConnectivityConfiguration(ctx, getNetworkConnectivityConfigurationReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getNetworkConnectivityConfigurationOverrides { + fn(cmd, &getNetworkConnectivityConfigurationReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetNetworkConnectivityConfiguration()) + }) +} + +// start get-private-endpoint-rule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPrivateEndpointRuleOverrides []func( + *cobra.Command, + *settings.GetPrivateEndpointRuleRequest, +) + +func newGetPrivateEndpointRule() *cobra.Command { + cmd := &cobra.Command{} + + var getPrivateEndpointRuleReq settings.GetPrivateEndpointRuleRequest + + // TODO: short flags + + cmd.Use = "get-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID PRIVATE_ENDPOINT_RULE_ID" + cmd.Short = `Get a private endpoint rule.` + cmd.Long = `Get a private endpoint rule. + + Gets the private endpoint rule.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + getPrivateEndpointRuleReq.NetworkConnectivityConfigId = args[0] + getPrivateEndpointRuleReq.PrivateEndpointRuleId = args[1] + + response, err := a.NetworkConnectivity.GetPrivateEndpointRule(ctx, getPrivateEndpointRuleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPrivateEndpointRuleOverrides { + fn(cmd, &getPrivateEndpointRuleReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetPrivateEndpointRule()) + }) +} + +// end service NetworkConnectivity diff --git a/cmd/account/o-auth-enrollment/o-auth-enrollment.go b/cmd/account/o-auth-enrollment/o-auth-enrollment.go deleted file mode 100755 index 7ba2e59a..00000000 --- a/cmd/account/o-auth-enrollment/o-auth-enrollment.go +++ /dev/null @@ -1,169 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package o_auth_enrollment - -import ( - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/oauth2" - "github.com/spf13/cobra" -) - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cmdOverrides []func(*cobra.Command) - -func New() *cobra.Command { - cmd := &cobra.Command{ - Use: "o-auth-enrollment", - Short: `These APIs enable administrators to enroll OAuth for their accounts, which is required for adding/using any OAuth published/custom application integration.`, - Long: `These APIs enable administrators to enroll OAuth for their accounts, which is - required for adding/using any OAuth published/custom application integration. - - **Note:** Your account must be on the E2 version to use these APIs, this is - because OAuth is only supported on the E2 version.`, - GroupID: "oauth2", - Annotations: map[string]string{ - "package": "oauth2", - }, - } - - // Apply optional overrides to this command. - for _, fn := range cmdOverrides { - fn(cmd) - } - - return cmd -} - -// start create command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var createOverrides []func( - *cobra.Command, - *oauth2.CreateOAuthEnrollment, -) - -func newCreate() *cobra.Command { - cmd := &cobra.Command{} - - var createReq oauth2.CreateOAuthEnrollment - var createJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().BoolVar(&createReq.EnableAllPublishedApps, "enable-all-published-apps", createReq.EnableAllPublishedApps, `If true, enable OAuth for all the published applications in the account.`) - - cmd.Use = "create" - cmd.Short = `Create OAuth Enrollment request.` - cmd.Long = `Create OAuth Enrollment request. - - Create an OAuth Enrollment request to enroll OAuth for this account and - optionally enable the OAuth integration for all the partner applications in - the account. - - The parter applications are: - Power BI - Tableau Desktop - Databricks CLI - - The enrollment is executed asynchronously, so the API will return 204 - immediately. The actual enrollment take a few minutes, you can check the - status via API :method:OAuthEnrollment/get.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&createReq) - if err != nil { - return err - } - } - - err = a.OAuthEnrollment.Create(ctx, createReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range createOverrides { - fn(cmd, &createReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - -// start get command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getOverrides []func( - *cobra.Command, -) - -func newGet() *cobra.Command { - cmd := &cobra.Command{} - - cmd.Use = "get" - cmd.Short = `Get OAuth enrollment status.` - cmd.Long = `Get OAuth enrollment status. - - Gets the OAuth enrollment status for this Account. - - You can only add/use the OAuth published/custom application integrations when - OAuth enrollment status is enabled.` - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - response, err := a.OAuthEnrollment.Get(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getOverrides { - fn(cmd) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - -// end service OAuthEnrollment diff --git a/cmd/account/o-auth-enrollment/overrides.go b/cmd/account/o-auth-enrollment/overrides.go deleted file mode 100644 index 1fc3aacc..00000000 --- a/cmd/account/o-auth-enrollment/overrides.go +++ /dev/null @@ -1,107 +0,0 @@ -package o_auth_enrollment - -import ( - "context" - "fmt" - "time" - - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/retries" - "github.com/databricks/databricks-sdk-go/service/oauth2" - "github.com/spf13/cobra" -) - -func promptForBasicAccountConfig(ctx context.Context) (*databricks.Config, error) { - if !cmdio.IsInTTY(ctx) { - return nil, fmt.Errorf("this command requires a TTY") - } - // OAuth Enrollment only works on AWS - host, err := cmdio.DefaultPrompt(ctx, "Host", "https://accounts.cloud.databricks.com") - if err != nil { - return nil, fmt.Errorf("host: %w", err) - } - accountID, err := cmdio.SimplePrompt(ctx, "Account ID") - if err != nil { - return nil, fmt.Errorf("account: %w", err) - } - username, err := cmdio.SimplePrompt(ctx, "Username") - if err != nil { - return nil, fmt.Errorf("username: %w", err) - } - password, err := cmdio.Secret(ctx, "Password") - if err != nil { - return nil, fmt.Errorf("password: %w", err) - } - return &databricks.Config{ - Host: host, - AccountID: accountID, - Username: username, - Password: password, - }, nil -} - -func enableOAuthForAccount(ctx context.Context, cfg *databricks.Config) error { - ac, err := databricks.NewAccountClient(cfg) - if err != nil { - return fmt.Errorf("failed to instantiate account client: %w", err) - } - // The enrollment is executed asynchronously, so the API returns HTTP 204 immediately - err = ac.OAuthEnrollment.Create(ctx, oauth2.CreateOAuthEnrollment{ - EnableAllPublishedApps: true, - }) - if err != nil { - return fmt.Errorf("failed to create oauth enrollment: %w", err) - } - enableSpinner := cmdio.Spinner(ctx) - // The actual enrollment take a few minutes - err = retries.Wait(ctx, 10*time.Minute, func() *retries.Err { - status, err := ac.OAuthEnrollment.Get(ctx) - if err != nil { - return retries.Halt(err) - } - if !status.IsEnabled { - msg := "Enabling OAuth..." - enableSpinner <- msg - return retries.Continues(msg) - } - enableSpinner <- "OAuth is enabled" - close(enableSpinner) - return nil - }) - if err != nil { - return fmt.Errorf("wait for enrollment: %w", err) - } - // enable Databricks CLI, so that `databricks auth login` works - _, err = ac.PublishedAppIntegration.Create(ctx, oauth2.CreatePublishedAppIntegration{ - AppId: "databricks-cli", - }) - if err != nil { - return fmt.Errorf("failed to enable databricks CLI: %w", err) - } - return nil -} - -func newEnable() *cobra.Command { - return &cobra.Command{ - Use: "enable", - Short: "Enable Databricks CLI, Tableau Desktop, and PowerBI for this account.", - Long: `Before you can do 'databricks auth login', you have to enable OAuth for this account. - -This command prompts you for Account ID, username, and password and waits until OAuth is enabled.`, - RunE: func(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() - cfg, err := promptForBasicAccountConfig(ctx) - if err != nil { - return fmt.Errorf("account config: %w", err) - } - return enableOAuthForAccount(ctx, cfg) - }, - } -} - -func init() { - cmdOverrides = append(cmdOverrides, func(c *cobra.Command) { - c.AddCommand(newEnable()) - }) -} diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 20769699..730f3fc1 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -67,7 +67,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.DisplayName, "display-name", createReq.DisplayName, `String that represents a concatenation of given and family names.`) // TODO: array: emails // TODO: array: entitlements - cmd.Flags().StringVar(&createReq.ExternalId, "external-id", createReq.ExternalId, ``) + cmd.Flags().StringVar(&createReq.ExternalId, "external-id", createReq.ExternalId, `External ID is not currently supported.`) // TODO: array: groups cmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Databricks user ID.`) // TODO: complex arg: name @@ -455,7 +455,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.DisplayName, "display-name", updateReq.DisplayName, `String that represents a concatenation of given and family names.`) // TODO: array: emails // TODO: array: entitlements - cmd.Flags().StringVar(&updateReq.ExternalId, "external-id", updateReq.ExternalId, ``) + cmd.Flags().StringVar(&updateReq.ExternalId, "external-id", updateReq.ExternalId, `External ID is not currently supported.`) // TODO: array: groups cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Databricks user ID.`) // TODO: complex arg: name diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 1a6aa90d..60eeb505 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -413,6 +413,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.CredentialsId, "credentials-id", updateReq.CredentialsId, `ID of the workspace's credential configuration object.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) + cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, `The ID of the network connectivity configuration object, which is the parent resource of this private endpoint rule object.`) cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`) cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`) cmd.Flags().StringVar(&updateReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.StorageCustomerManagedKeyId, `The ID of the key configuration object for workspace storage.`) diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 69222bfe..ff5433d1 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -107,23 +107,23 @@ func init() { }) } -// start delete command +// start delete-app command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var deleteOverrides []func( +var deleteAppOverrides []func( *cobra.Command, *serving.DeleteAppRequest, ) -func newDelete() *cobra.Command { +func newDeleteApp() *cobra.Command { cmd := &cobra.Command{} - var deleteReq serving.DeleteAppRequest + var deleteAppReq serving.DeleteAppRequest // TODO: short flags - cmd.Use = "delete NAME" + cmd.Use = "delete-app NAME" cmd.Short = `Delete an application.` cmd.Long = `Delete an application. @@ -141,13 +141,13 @@ func newDelete() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - deleteReq.Name = args[0] + deleteAppReq.Name = args[0] - err = w.Apps.Delete(ctx, deleteReq) + response, err := w.Apps.DeleteApp(ctx, deleteAppReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -155,8 +155,8 @@ func newDelete() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range deleteOverrides { - fn(cmd, &deleteReq) + for _, fn := range deleteAppOverrides { + fn(cmd, &deleteAppReq) } return cmd @@ -164,27 +164,27 @@ func newDelete() *cobra.Command { func init() { cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeleteApp()) }) } -// start get command +// start get-app command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var getOverrides []func( +var getAppOverrides []func( *cobra.Command, *serving.GetAppRequest, ) -func newGet() *cobra.Command { +func newGetApp() *cobra.Command { cmd := &cobra.Command{} - var getReq serving.GetAppRequest + var getAppReq serving.GetAppRequest // TODO: short flags - cmd.Use = "get NAME" + cmd.Use = "get-app NAME" cmd.Short = `Get definition for an application.` cmd.Long = `Get definition for an application. @@ -202,13 +202,13 @@ func newGet() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getReq.Name = args[0] + getAppReq.Name = args[0] - err = w.Apps.Get(ctx, getReq) + response, err := w.Apps.GetApp(ctx, getAppReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -216,8 +216,8 @@ func newGet() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range getOverrides { - fn(cmd, &getReq) + for _, fn := range getAppOverrides { + fn(cmd, &getAppReq) } return cmd @@ -225,7 +225,179 @@ func newGet() *cobra.Command { func init() { cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) + cmd.AddCommand(newGetApp()) + }) +} + +// start get-app-deployment-status command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getAppDeploymentStatusOverrides []func( + *cobra.Command, + *serving.GetAppDeploymentStatusRequest, +) + +func newGetAppDeploymentStatus() *cobra.Command { + cmd := &cobra.Command{} + + var getAppDeploymentStatusReq serving.GetAppDeploymentStatusRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getAppDeploymentStatusReq.IncludeAppLog, "include-app-log", getAppDeploymentStatusReq.IncludeAppLog, `Boolean flag to include application logs.`) + + cmd.Use = "get-app-deployment-status DEPLOYMENT_ID" + cmd.Short = `Get deployment status for an application.` + cmd.Long = `Get deployment status for an application. + + Get deployment status for an application` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getAppDeploymentStatusReq.DeploymentId = args[0] + + response, err := w.Apps.GetAppDeploymentStatus(ctx, getAppDeploymentStatusReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getAppDeploymentStatusOverrides { + fn(cmd, &getAppDeploymentStatusReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetAppDeploymentStatus()) + }) +} + +// start get-apps command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getAppsOverrides []func( + *cobra.Command, +) + +func newGetApps() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "get-apps" + cmd.Short = `List all applications.` + cmd.Long = `List all applications. + + List all available applications` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.Apps.GetApps(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getAppsOverrides { + fn(cmd) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetApps()) + }) +} + +// start get-events command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getEventsOverrides []func( + *cobra.Command, + *serving.GetEventsRequest, +) + +func newGetEvents() *cobra.Command { + cmd := &cobra.Command{} + + var getEventsReq serving.GetEventsRequest + + // TODO: short flags + + cmd.Use = "get-events NAME" + cmd.Short = `Get deployment events for an application.` + cmd.Long = `Get deployment events for an application. + + Get deployment events for an application` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getEventsReq.Name = args[0] + + response, err := w.Apps.GetEvents(ctx, getEventsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getEventsOverrides { + fn(cmd, &getEventsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetEvents()) }) } diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 1412b460..80608718 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -19,29 +19,27 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "cluster-policies", - Short: `Cluster policy limits the ability to configure clusters based on a set of rules.`, - Long: `Cluster policy limits the ability to configure clusters based on a set of - rules. The policy rules limit the attributes or attribute values available for - cluster creation. Cluster policies have ACLs that limit their use to specific - users and groups. + Short: `You can use cluster policies to control users' ability to configure clusters based on a set of rules.`, + Long: `You can use cluster policies to control users' ability to configure clusters + based on a set of rules. These rules specify which attributes or attribute + values can be used during cluster creation. Cluster policies have ACLs that + limit their use to specific users and groups. - Cluster policies let you limit users to create clusters with prescribed - settings, simplify the user interface and enable more users to create their - own clusters (by fixing and hiding some values), control cost by limiting per - cluster maximum cost (by setting limits on attributes whose values contribute - to hourly price). + With cluster policies, you can: - Auto-install cluster libraries on the next + restart by listing them in the policy's "libraries" field. - Limit users to + creating clusters with the prescribed settings. - Simplify the user interface, + enabling more users to create clusters, by fixing and hiding some fields. - + Manage costs by setting limits on attributes that impact the hourly rate. Cluster policy permissions limit which policies a user can select in the - Policy drop-down when the user creates a cluster: - A user who has cluster - create permission can select the Unrestricted policy and create - fully-configurable clusters. - A user who has both cluster create permission - and access to cluster policies can select the Unrestricted policy and policies - they have access to. - A user that has access to only cluster policies, can - select the policies they have access to. - - If no policies have been created in the workspace, the Policy drop-down does - not display. + Policy drop-down when the user creates a cluster: - A user who has + unrestricted cluster create permission can select the Unrestricted policy and + create fully-configurable clusters. - A user who has both unrestricted cluster + create permission and access to cluster policies can select the Unrestricted + policy and policies they have access to. - A user that has access to only + cluster policies, can select the policies they have access to. + If no policies exist in the workspace, the Policy drop-down doesn't appear. Only admin users can create, edit, and delete policies. Admin users also have access to all policies.`, GroupID: "compute", @@ -78,6 +76,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Definition, "definition", createReq.Definition, `Policy definition document expressed in Databricks Cluster Policy Definition Language.`) cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `Additional human-readable description of the cluster policy.`) + // TODO: array: libraries cmd.Flags().Int64Var(&createReq.MaxClustersPerUser, "max-clusters-per-user", createReq.MaxClustersPerUser, `Max number of clusters per user that can be active using this policy.`) cmd.Flags().StringVar(&createReq.PolicyFamilyDefinitionOverrides, "policy-family-definition-overrides", createReq.PolicyFamilyDefinitionOverrides, `Policy definition JSON document expressed in Databricks Policy Definition Language.`) cmd.Flags().StringVar(&createReq.PolicyFamilyId, "policy-family-id", createReq.PolicyFamilyId, `ID of the policy family.`) @@ -245,6 +244,7 @@ func newEdit() *cobra.Command { cmd.Flags().StringVar(&editReq.Definition, "definition", editReq.Definition, `Policy definition document expressed in Databricks Cluster Policy Definition Language.`) cmd.Flags().StringVar(&editReq.Description, "description", editReq.Description, `Additional human-readable description of the cluster policy.`) + // TODO: array: libraries cmd.Flags().Int64Var(&editReq.MaxClustersPerUser, "max-clusters-per-user", editReq.MaxClustersPerUser, `Max number of clusters per user that can be active using this policy.`) cmd.Flags().StringVar(&editReq.PolicyFamilyDefinitionOverrides, "policy-family-definition-overrides", editReq.PolicyFamilyDefinitionOverrides, `Policy definition JSON document expressed in Databricks Policy Definition Language.`) cmd.Flags().StringVar(&editReq.PolicyFamilyId, "policy-family-id", editReq.PolicyFamilyId, `ID of the policy family.`) diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index a5c69259..2803f186 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -343,6 +343,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the external location.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the external location.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Indicates whether the external location is read-only.`) + cmd.Flags().BoolVar(&updateReq.SkipValidation, "skip-validation", updateReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`) cmd.Flags().StringVar(&updateReq.Url, "url", updateReq.Url, `Path URL of the external location.`) cmd.Use = "update NAME" diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index 911c6d14..6510fce6 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -46,24 +46,18 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *catalog.CreateFunction, + *catalog.CreateFunctionRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq catalog.CreateFunction + var createReq catalog.CreateFunctionRequest var createJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) - cmd.Flags().StringVar(&createReq.ExternalLanguage, "external-language", createReq.ExternalLanguage, `External function language.`) - cmd.Flags().StringVar(&createReq.ExternalName, "external-name", createReq.ExternalName, `External function name.`) - // TODO: map via StringToStringVar: properties - cmd.Flags().StringVar(&createReq.SqlPath, "sql-path", createReq.SqlPath, `List of schemes whose objects can be referenced without qualification.`) - cmd.Use = "create" cmd.Short = `Create a function.` cmd.Long = `Create a function. diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 7759539e..218975ec 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -793,6 +793,7 @@ func newGetRun() *cobra.Command { // TODO: short flags cmd.Flags().BoolVar(&getRunReq.IncludeHistory, "include-history", getRunReq.IncludeHistory, `Whether to include the repair history in the response.`) + cmd.Flags().BoolVar(&getRunReq.IncludeResolvedValues, "include-resolved-values", getRunReq.IncludeResolvedValues, `Whether to include resolved parameter values in the response.`) cmd.Use = "get-run RUN_ID" cmd.Short = `Get a single job run.` diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index d74c9bbc..ef473df2 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -153,12 +153,17 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createReq.Region, "region", createReq.Region, `Cloud region which the metastore serves (e.g., us-west-2, westus).`) + cmd.Flags().StringVar(&createReq.StorageRoot, "storage-root", createReq.StorageRoot, `The storage root URL for metastore.`) - cmd.Use = "create NAME STORAGE_ROOT" + cmd.Use = "create NAME" cmd.Short = `Create a metastore.` cmd.Long = `Create a metastore. - Creates a new metastore based on a provided name and storage root path.` + Creates a new metastore based on a provided name and optional storage root + path. By default (if the __owner__ field is not set), the owner of the new + metastore is the user calling the __createMetastore__ API. If the __owner__ + field is set to the empty string (**""**), the ownership is assigned to the + System User instead.` cmd.Annotations = make(map[string]string) @@ -166,11 +171,11 @@ func newCreate() *cobra.Command { if cmd.Flags().Changed("json") { err := cobra.ExactArgs(0)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'storage_root' in your JSON input") + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -188,9 +193,6 @@ func newCreate() *cobra.Command { if !cmd.Flags().Changed("json") { createReq.Name = args[0] } - if !cmd.Flags().Changed("json") { - createReq.StorageRoot = args[1] - } response, err := w.Metastores.Create(ctx, createReq) if err != nil { @@ -696,7 +698,8 @@ func newUpdate() *cobra.Command { cmd.Long = `Update a metastore. Updates information for a specific metastore. The caller must be a metastore - admin.` + admin. If the __owner__ field is set to the empty string (**""**), the + ownership is updated to the System User.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index 97b6dcde..daa95df3 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -67,7 +67,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.DisplayName, "display-name", createReq.DisplayName, `String that represents a concatenation of given and family names.`) // TODO: array: emails // TODO: array: entitlements - cmd.Flags().StringVar(&createReq.ExternalId, "external-id", createReq.ExternalId, ``) + cmd.Flags().StringVar(&createReq.ExternalId, "external-id", createReq.ExternalId, `External ID is not currently supported.`) // TODO: array: groups cmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Databricks user ID.`) // TODO: complex arg: name @@ -623,7 +623,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.DisplayName, "display-name", updateReq.DisplayName, `String that represents a concatenation of given and family names.`) // TODO: array: emails // TODO: array: entitlements - cmd.Flags().StringVar(&updateReq.ExternalId, "external-id", updateReq.ExternalId, ``) + cmd.Flags().StringVar(&updateReq.ExternalId, "external-id", updateReq.ExternalId, `External ID is not currently supported.`) // TODO: array: groups cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Databricks user ID.`) // TODO: complex arg: name diff --git a/go.mod b/go.mod index 5b108a52..7cef4cd4 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.24.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.25.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.4.0 // BSD-3-Clause @@ -55,11 +55,11 @@ require ( golang.org/x/crypto v0.15.0 // indirect golang.org/x/net v0.18.0 // indirect golang.org/x/sys v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/api v0.148.0 // indirect + golang.org/x/time v0.4.0 // indirect + google.golang.org/api v0.150.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a // indirect - google.golang.org/grpc v1.58.3 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 3959456d..25409bd6 100644 --- a/go.sum +++ b/go.sum @@ -31,8 +31,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.24.0 h1:fx34MOGYXVc72QBSFnKuDa/H3ekDMqZYH4jKZF8mrXk= -github.com/databricks/databricks-sdk-go v0.24.0/go.mod h1:a6rErRNh5bz+IJbO07nwW70iGyvtWidy1p/S5thepXI= +github.com/databricks/databricks-sdk-go v0.25.0 h1:qEpYHQ18HHqLIsIXXHhixakTtt6Q0tT3m34xws6BuZ8= +github.com/databricks/databricks-sdk-go v0.25.0/go.mod h1:s3/f2T8UGyKkcMywIyporj/Kb/lsiWkiksT/C84Swrs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -234,8 +234,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY= +golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -248,8 +248,8 @@ golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.148.0 h1:HBq4TZlN4/1pNcu0geJZ/Q50vIwIXT532UIMYoo0vOs= -google.golang.org/api v0.148.0/go.mod h1:8/TBgwaKjfqTdacOJrOv2+2Q6fBDU1uHKK06oGSkxzU= +google.golang.org/api v0.150.0 h1:Z9k22qD289SZ8gCJrk4DrWXkNjtfvKAUo/l1ma8eBYE= +google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -257,15 +257,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 0ab125c10931341c114124ffbf47da3180fa121f Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 13 Nov 2023 20:50:39 +0100 Subject: [PATCH 233/310] Allow jobs to be manually unpaused in development mode (#885) Partly mitigates #859. It's still not clear to me if there is an actual use case or if users are trying to use "development" mode jobs for production, but making this overridable is reasonable. Beyond this fix I think we could do something in the Jobs schedule UI, but it would help to better understand the use case (or actual reason of confusion). I expect we should hint customers to move away from dev mode rather than unpause. --- bundle/config/mutator/process_target_mode.go | 10 +++++--- .../mutator/process_target_mode_test.go | 25 ++++++++++++++++++- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 39321069..bd161e95 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -47,13 +47,17 @@ func transformDevelopmentMode(b *bundle.Bundle) error { if r.Jobs[i].MaxConcurrentRuns == 0 { r.Jobs[i].MaxConcurrentRuns = developmentConcurrentRuns } - if r.Jobs[i].Schedule != nil { + + // Pause each job. As an exception, we don't pause jobs that are explicitly + // marked as "unpaused". This allows users to override the default behavior + // of the development mode. + if r.Jobs[i].Schedule != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused { r.Jobs[i].Schedule.PauseStatus = jobs.PauseStatusPaused } - if r.Jobs[i].Continuous != nil { + if r.Jobs[i].Continuous != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused { r.Jobs[i].Continuous.PauseStatus = jobs.PauseStatusPaused } - if r.Jobs[i].Trigger != nil { + if r.Jobs[i].Trigger != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused { r.Jobs[i].Trigger.PauseStatus = jobs.PauseStatusPaused } } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index a9da0b0f..f7e78da2 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -45,7 +45,23 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "job1": {JobSettings: &jobs.JobSettings{Name: "job1"}}, + "job1": { + JobSettings: &jobs.JobSettings{ + Name: "job1", + Schedule: &jobs.CronSchedule{ + QuartzCronExpression: "* * * * *", + }, + }, + }, + "job2": { + JobSettings: &jobs.JobSettings{ + Name: "job2", + Schedule: &jobs.CronSchedule{ + QuartzCronExpression: "* * * * *", + PauseStatus: jobs.PauseStatusUnpaused, + }, + }, + }, }, Pipelines: map[string]*resources.Pipeline{ "pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}}, @@ -82,6 +98,12 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Job 1 assert.Equal(t, "[dev lennart] job1", bundle.Config.Resources.Jobs["job1"].Name) assert.Equal(t, bundle.Config.Resources.Jobs["job1"].Tags["dev"], "lennart") + assert.Equal(t, bundle.Config.Resources.Jobs["job1"].Schedule.PauseStatus, jobs.PauseStatusPaused) + + // Job 2 + assert.Equal(t, "[dev lennart] job2", bundle.Config.Resources.Jobs["job2"].Name) + assert.Equal(t, bundle.Config.Resources.Jobs["job2"].Tags["dev"], "lennart") + assert.Equal(t, bundle.Config.Resources.Jobs["job2"].Schedule.PauseStatus, jobs.PauseStatusUnpaused) // Pipeline 1 assert.Equal(t, "[dev lennart] pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name) @@ -182,6 +204,7 @@ func TestProcessTargetModeProduction(t *testing.T) { } bundle.Config.Resources.Jobs["job1"].Permissions = permissions bundle.Config.Resources.Jobs["job1"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} + bundle.Config.Resources.Jobs["job2"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} bundle.Config.Resources.Pipelines["pipeline1"].Permissions = permissions bundle.Config.Resources.Experiments["experiment1"].Permissions = permissions bundle.Config.Resources.Experiments["experiment2"].Permissions = permissions From 0f58f6c875962042064e1b7afb39983754dbcc0f Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 14 Nov 2023 17:28:32 +0100 Subject: [PATCH 234/310] Serialise empty files_path and job.relative_path in the deployment metadata (#984) ## Changes The Jobs service expects these fields to always be present in the metadata in their validation logic, which is reasonable. This PR removes the omit empty tags so these fields are always uploaded to the workspace `metadata.json` file. --- bundle/metadata/metadata.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bundle/metadata/metadata.go b/bundle/metadata/metadata.go index 27edd584..441f1463 100644 --- a/bundle/metadata/metadata.go +++ b/bundle/metadata/metadata.go @@ -11,7 +11,7 @@ type Bundle struct { } type Workspace struct { - FilesPath string `json:"file_path,omitempty"` + FilesPath string `json:"file_path"` } type Job struct { @@ -19,7 +19,7 @@ type Job struct { // Relative path from the bundle root to the configuration file that holds // the definition of this resource. - RelativePath string `json:"relative_path,omitempty"` + RelativePath string `json:"relative_path"` } type Resources struct { From b39750188068dca7d7ebb66f7e4a12476527e128 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 14 Nov 2023 23:09:18 +0100 Subject: [PATCH 235/310] Fix template initialization from current working directory (#976) ## Changes If args[0] == "." was provided to bundle init command, it would try to resolve it as a built in template and error out. ## Tests Manually before: ``` shreyas.goenka@THW32HFW6T mlops-stack % cli bundle init . Error: open /var/folders/lg/njll3hjx7pjcgxs6n7b290bw0000gp/T/templates3934264356/templates/databricks_template_schema.json: no such file or directory ``` after: ``` shreyas.goenka@THW32HFW6T mlops-stack % cli bundle init . Welcome to MLOps Stacks. For detailed information on project generation, see the README at https://github.com/databricks/mlops-stacks/blob/main/README.md. Project Name [my-mlops-project]: ^C ``` --- libs/template/materialize.go | 6 ++++++ libs/template/renderer_test.go | 12 ++++++++++++ 2 files changed, 18 insertions(+) diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 7c9105b7..da0bc45d 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -104,6 +104,12 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st // If the given templateRoot matches func prepareBuiltinTemplates(templateRoot string, tempDir string) (string, error) { + // Check that `templateRoot` is a clean basename, i.e. `some_path` and not `./some_path` or "." + // Return early if that's not the case. + if templateRoot == "." || path.Base(templateRoot) != templateRoot { + return templateRoot, nil + } + _, err := fs.Stat(builtinTemplates, path.Join("templates", templateRoot)) if err != nil { // The given path doesn't appear to be using out built-in templates diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index d513eac8..8d0c2101 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -86,6 +86,18 @@ func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target st } } +func TestPrepareBuiltInTemplatesWithRelativePaths(t *testing.T) { + // CWD should not be resolved as a built in template + dir, err := prepareBuiltinTemplates(".", t.TempDir()) + assert.NoError(t, err) + assert.Equal(t, ".", dir) + + // relative path should not be resolved as a built in template + dir, err = prepareBuiltinTemplates("./default-python", t.TempDir()) + assert.NoError(t, err) + assert.Equal(t, "./default-python", dir) +} + func TestBuiltinTemplateValid(t *testing.T) { // Test option combinations options := []string{"yes", "no"} From a25f10f24765b3ce464cf28059239fa4a299dcde Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 14 Nov 2023 23:27:58 +0100 Subject: [PATCH 236/310] Add `--tag` and `--branch` options to bundle init command (#975) ## Tests Tested manually. Specified branch / tag are indeed cloned and used by bundle init. --- cmd/bundle/init.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index cd2af420..ccdc6859 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -60,14 +60,27 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf var configFile string var outputDir string var templateDir string + var tag string + var branch string cmd.Flags().StringVar(&configFile, "config-file", "", "File containing input parameters for template initialization.") cmd.Flags().StringVar(&templateDir, "template-dir", "", "Directory path within a Git repository containing the template.") cmd.Flags().StringVar(&outputDir, "output-dir", "", "Directory to write the initialized template to.") + cmd.Flags().StringVar(&branch, "tag", "", "Git tag to use for template initialization") + cmd.Flags().StringVar(&tag, "branch", "", "Git branch to use for template initialization") cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() + if tag != "" && branch != "" { + return errors.New("only one of --tag or --branch can be specified") + } + // Git ref to use for template initialization + ref := branch + if tag != "" { + ref = tag + } + + ctx := cmd.Context() var templatePath string if len(args) > 0 { templatePath = args[0] @@ -104,7 +117,7 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf } // TODO: Add automated test that the downloaded git repo is cleaned up. // Clone the repository in the temporary directory - err = git.Clone(ctx, templatePath, "", repoDir) + err = git.Clone(ctx, templatePath, ref, repoDir) if err != nil { return err } From 2c908f8fea97319dcfdedb5ea3b5b767912ba370 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 15 Nov 2023 10:19:51 +0100 Subject: [PATCH 237/310] Function to convert Go struct back to `config.Value` (#935) ## Changes This PR is the counterpart to #904. With this change, we are able to convert a `config.Value` into a Go struct, make modifications to the Go struct, and reflect those changes in a new `config.Value`. This functionality allows us to incrementally introduce this configuration representation to existing bundle mutators. Bundle mutators expect a `*bundle.Bundle` argument and mutate its configuration directly. These mutations are not reflected in the corresponding `config.Value` (once introduced), which means we cannot use the `config.Value` as source of truth until we update _all_ mutators. To address this, we can run `convert.ToTyped` and `convert.FromTyped` at the mutator boundary (from `bundle.Apply`) and capture changes made to the Go struct. Then we can incrementally make mutators aware of the `config.Value` configuration and have them mutate that structure directly. ## Tests New unit tests pass. Manual spot checks against the bundle configuration type. --- libs/config/convert/end_to_end_test.go | 61 ++++ libs/config/convert/from_typed.go | 214 +++++++++++++ libs/config/convert/from_typed_test.go | 394 ++++++++++++++++++++++++ libs/config/convert/struct_info.go | 28 ++ libs/config/convert/struct_info_test.go | 107 +++++++ libs/config/convert/to_typed.go | 6 + 6 files changed, 810 insertions(+) create mode 100644 libs/config/convert/end_to_end_test.go create mode 100644 libs/config/convert/from_typed.go create mode 100644 libs/config/convert/from_typed_test.go diff --git a/libs/config/convert/end_to_end_test.go b/libs/config/convert/end_to_end_test.go new file mode 100644 index 00000000..c06830e8 --- /dev/null +++ b/libs/config/convert/end_to_end_test.go @@ -0,0 +1,61 @@ +package convert + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func assertFromTypedToTypedEqual[T any](t *testing.T, src T) { + nv, err := FromTyped(src, config.NilValue) + require.NoError(t, err) + + var dst T + err = ToTyped(&dst, nv) + require.NoError(t, err) + assert.Equal(t, src, dst) +} + +func TestAdditional(t *testing.T) { + type StructType struct { + Str string `json:"str"` + } + + type Tmp struct { + MapToPointer map[string]*string `json:"map_to_pointer"` + SliceOfPointer []*string `json:"slice_of_pointer"` + NestedStruct StructType `json:"nested_struct"` + } + + t.Run("nil", func(t *testing.T) { + assertFromTypedToTypedEqual(t, Tmp{}) + }) + + t.Run("empty map", func(t *testing.T) { + assertFromTypedToTypedEqual(t, Tmp{ + MapToPointer: map[string]*string{}, + }) + }) + + t.Run("map with nil value", func(t *testing.T) { + assertFromTypedToTypedEqual(t, Tmp{ + MapToPointer: map[string]*string{ + "key": nil, + }, + }) + }) + + t.Run("empty slice", func(t *testing.T) { + assertFromTypedToTypedEqual(t, Tmp{ + SliceOfPointer: []*string{}, + }) + }) + + t.Run("slice with nil value", func(t *testing.T) { + assertFromTypedToTypedEqual(t, Tmp{ + SliceOfPointer: []*string{nil}, + }) + }) +} diff --git a/libs/config/convert/from_typed.go b/libs/config/convert/from_typed.go new file mode 100644 index 00000000..e3911a9e --- /dev/null +++ b/libs/config/convert/from_typed.go @@ -0,0 +1,214 @@ +package convert + +import ( + "fmt" + "reflect" + + "github.com/databricks/cli/libs/config" +) + +// FromTyped converts changes made in the typed structure w.r.t. the configuration value +// back to the configuration value, retaining existing location information where possible. +func FromTyped(src any, ref config.Value) (config.Value, error) { + srcv := reflect.ValueOf(src) + + // Dereference pointer if necessary + for srcv.Kind() == reflect.Pointer { + if srcv.IsNil() { + return config.NilValue, nil + } + srcv = srcv.Elem() + } + + switch srcv.Kind() { + case reflect.Struct: + return fromTypedStruct(srcv, ref) + case reflect.Map: + return fromTypedMap(srcv, ref) + case reflect.Slice: + return fromTypedSlice(srcv, ref) + case reflect.String: + return fromTypedString(srcv, ref) + case reflect.Bool: + return fromTypedBool(srcv, ref) + case reflect.Int, reflect.Int32, reflect.Int64: + return fromTypedInt(srcv, ref) + case reflect.Float32, reflect.Float64: + return fromTypedFloat(srcv, ref) + } + + return config.NilValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) +} + +func fromTypedStruct(src reflect.Value, ref config.Value) (config.Value, error) { + // Check that the reference value is compatible or nil. + switch ref.Kind() { + case config.KindMap, config.KindNil: + default: + return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + } + + out := make(map[string]config.Value) + info := getStructInfo(src.Type()) + for k, v := range info.FieldValues(src) { + // Convert the field taking into account the reference value (may be equal to config.NilValue). + nv, err := FromTyped(v.Interface(), ref.Get(k)) + if err != nil { + return config.Value{}, err + } + + if nv != config.NilValue { + out[k] = nv + } + } + + // If the struct was equal to its zero value, emit a nil. + if len(out) == 0 { + return config.NilValue, nil + } + + return config.NewValue(out, ref.Location()), nil +} + +func fromTypedMap(src reflect.Value, ref config.Value) (config.Value, error) { + // Check that the reference value is compatible or nil. + switch ref.Kind() { + case config.KindMap, config.KindNil: + default: + return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + } + + // Return nil if the map is nil. + if src.IsNil() { + return config.NilValue, nil + } + + out := make(map[string]config.Value) + iter := src.MapRange() + for iter.Next() { + k := iter.Key().String() + v := iter.Value() + + // Convert entry taking into account the reference value (may be equal to config.NilValue). + nv, err := FromTyped(v.Interface(), ref.Get(k)) + if err != nil { + return config.Value{}, err + } + + // Every entry is represented, even if it is a nil. + // Otherwise, a map with zero-valued structs would yield a nil as well. + out[k] = nv + } + + return config.NewValue(out, ref.Location()), nil +} + +func fromTypedSlice(src reflect.Value, ref config.Value) (config.Value, error) { + // Check that the reference value is compatible or nil. + switch ref.Kind() { + case config.KindSequence, config.KindNil: + default: + return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + } + + // Return nil if the slice is nil. + if src.IsNil() { + return config.NilValue, nil + } + + out := make([]config.Value, src.Len()) + for i := 0; i < src.Len(); i++ { + v := src.Index(i) + + // Convert entry taking into account the reference value (may be equal to config.NilValue). + nv, err := FromTyped(v.Interface(), ref.Index(i)) + if err != nil { + return config.Value{}, err + } + + out[i] = nv + } + + return config.NewValue(out, ref.Location()), nil +} + +func fromTypedString(src reflect.Value, ref config.Value) (config.Value, error) { + switch ref.Kind() { + case config.KindString: + value := src.String() + if value == ref.MustString() { + return ref, nil + } + + return config.V(value), nil + case config.KindNil: + // This field is not set in the reference, so we only include it if it has a non-zero value. + // Otherwise, we would always include all zero valued fields. + if src.IsZero() { + return config.NilValue, nil + } + return config.V(src.String()), nil + } + + return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) +} + +func fromTypedBool(src reflect.Value, ref config.Value) (config.Value, error) { + switch ref.Kind() { + case config.KindBool: + value := src.Bool() + if value == ref.MustBool() { + return ref, nil + } + return config.V(value), nil + case config.KindNil: + // This field is not set in the reference, so we only include it if it has a non-zero value. + // Otherwise, we would always include all zero valued fields. + if src.IsZero() { + return config.NilValue, nil + } + return config.V(src.Bool()), nil + } + + return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) +} + +func fromTypedInt(src reflect.Value, ref config.Value) (config.Value, error) { + switch ref.Kind() { + case config.KindInt: + value := src.Int() + if value == ref.MustInt() { + return ref, nil + } + return config.V(value), nil + case config.KindNil: + // This field is not set in the reference, so we only include it if it has a non-zero value. + // Otherwise, we would always include all zero valued fields. + if src.IsZero() { + return config.NilValue, nil + } + return config.V(src.Int()), nil + } + + return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) +} + +func fromTypedFloat(src reflect.Value, ref config.Value) (config.Value, error) { + switch ref.Kind() { + case config.KindFloat: + value := src.Float() + if value == ref.MustFloat() { + return ref, nil + } + return config.V(value), nil + case config.KindNil: + // This field is not set in the reference, so we only include it if it has a non-zero value. + // Otherwise, we would always include all zero valued fields. + if src.IsZero() { + return config.NilValue, nil + } + return config.V(src.Float()), nil + } + + return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) +} diff --git a/libs/config/convert/from_typed_test.go b/libs/config/convert/from_typed_test.go new file mode 100644 index 00000000..2b28f549 --- /dev/null +++ b/libs/config/convert/from_typed_test.go @@ -0,0 +1,394 @@ +package convert + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFromTypedStructZeroFields(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + src := Tmp{} + ref := config.NilValue + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NilValue, nv) +} + +func TestFromTypedStructSetFields(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + src := Tmp{ + Foo: "foo", + Bar: "bar", + } + + ref := config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(map[string]config.Value{ + "foo": config.V("foo"), + "bar": config.V("bar"), + }), nv) +} + +func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + src := Tmp{ + Foo: "bar", + Bar: "qux", + } + + ref := config.V(map[string]config.Value{ + "foo": config.NewValue("bar", config.Location{File: "foo"}), + "bar": config.NewValue("baz", config.Location{File: "bar"}), + }) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + + // Assert foo has retained its location. + assert.Equal(t, config.NewValue("bar", config.Location{File: "foo"}), nv.Get("foo")) + + // Assert bar lost its location (because it was overwritten). + assert.Equal(t, config.NewValue("qux", config.Location{}), nv.Get("bar")) +} + +func TestFromTypedMapNil(t *testing.T) { + var src map[string]string = nil + + ref := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NilValue, nv) +} + +func TestFromTypedMapEmpty(t *testing.T) { + var src = map[string]string{} + + ref := config.V(map[string]config.Value{ + "foo": config.V("bar"), + "bar": config.V("baz"), + }) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(map[string]config.Value{}), nv) +} + +func TestFromTypedMapNonEmpty(t *testing.T) { + var src = map[string]string{ + "foo": "foo", + "bar": "bar", + } + + ref := config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(map[string]config.Value{ + "foo": config.V("foo"), + "bar": config.V("bar"), + }), nv) +} + +func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { + var src = map[string]string{ + "foo": "bar", + "bar": "qux", + } + + ref := config.V(map[string]config.Value{ + "foo": config.NewValue("bar", config.Location{File: "foo"}), + "bar": config.NewValue("baz", config.Location{File: "bar"}), + }) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + + // Assert foo has retained its location. + assert.Equal(t, config.NewValue("bar", config.Location{File: "foo"}), nv.Get("foo")) + + // Assert bar lost its location (because it was overwritten). + assert.Equal(t, config.NewValue("qux", config.Location{}), nv.Get("bar")) +} + +func TestFromTypedMapFieldWithZeroValue(t *testing.T) { + var src = map[string]string{ + "foo": "", + } + + ref := config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(map[string]config.Value{ + "foo": config.NilValue, + }), nv) +} + +func TestFromTypedSliceNil(t *testing.T) { + var src []string = nil + + ref := config.V([]config.Value{ + config.V("bar"), + config.V("baz"), + }) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NilValue, nv) +} + +func TestFromTypedSliceEmpty(t *testing.T) { + var src = []string{} + + ref := config.V([]config.Value{ + config.V("bar"), + config.V("baz"), + }) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V([]config.Value{}), nv) +} + +func TestFromTypedSliceNonEmpty(t *testing.T) { + var src = []string{ + "foo", + "bar", + } + + ref := config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V([]config.Value{ + config.V("foo"), + config.V("bar"), + }), nv) +} + +func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { + var src = []string{ + "foo", + "bar", + } + + ref := config.V([]config.Value{ + config.NewValue("foo", config.Location{File: "foo"}), + config.NewValue("baz", config.Location{File: "baz"}), + }) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + + // Assert foo has retained its location. + assert.Equal(t, config.NewValue("foo", config.Location{File: "foo"}), nv.Index(0)) + + // Assert bar lost its location (because it was overwritten). + assert.Equal(t, config.NewValue("bar", config.Location{}), nv.Index(1)) +} + +func TestFromTypedStringEmpty(t *testing.T) { + var src string + var ref = config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NilValue, nv) +} + +func TestFromTypedStringEmptyOverwrite(t *testing.T) { + var src string + var ref = config.V("old") + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(""), nv) +} + +func TestFromTypedStringNonEmpty(t *testing.T) { + var src string = "new" + var ref = config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V("new"), nv) +} + +func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { + var src string = "new" + var ref = config.V("old") + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V("new"), nv) +} + +func TestFromTypedStringRetainsLocationsIfUnchanged(t *testing.T) { + var src string = "foo" + var ref = config.NewValue("foo", config.Location{File: "foo"}) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NewValue("foo", config.Location{File: "foo"}), nv) +} + +func TestFromTypedStringTypeError(t *testing.T) { + var src string = "foo" + var ref = config.V(1234) + _, err := FromTyped(src, ref) + require.Error(t, err) +} + +func TestFromTypedBoolEmpty(t *testing.T) { + var src bool + var ref = config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NilValue, nv) +} + +func TestFromTypedBoolEmptyOverwrite(t *testing.T) { + var src bool + var ref = config.V(true) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(false), nv) +} + +func TestFromTypedBoolNonEmpty(t *testing.T) { + var src bool = true + var ref = config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(true), nv) +} + +func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { + var src bool = true + var ref = config.V(false) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(true), nv) +} + +func TestFromTypedBoolRetainsLocationsIfUnchanged(t *testing.T) { + var src bool = true + var ref = config.NewValue(true, config.Location{File: "foo"}) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NewValue(true, config.Location{File: "foo"}), nv) +} + +func TestFromTypedBoolTypeError(t *testing.T) { + var src bool = true + var ref = config.V("string") + _, err := FromTyped(src, ref) + require.Error(t, err) +} + +func TestFromTypedIntEmpty(t *testing.T) { + var src int + var ref = config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NilValue, nv) +} + +func TestFromTypedIntEmptyOverwrite(t *testing.T) { + var src int + var ref = config.V(1234) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(int64(0)), nv) +} + +func TestFromTypedIntNonEmpty(t *testing.T) { + var src int = 1234 + var ref = config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(int64(1234)), nv) +} + +func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { + var src int = 1234 + var ref = config.V(1233) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(int64(1234)), nv) +} + +func TestFromTypedIntRetainsLocationsIfUnchanged(t *testing.T) { + var src int = 1234 + var ref = config.NewValue(1234, config.Location{File: "foo"}) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NewValue(1234, config.Location{File: "foo"}), nv) +} + +func TestFromTypedIntTypeError(t *testing.T) { + var src int = 1234 + var ref = config.V("string") + _, err := FromTyped(src, ref) + require.Error(t, err) +} + +func TestFromTypedFloatEmpty(t *testing.T) { + var src float64 + var ref = config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NilValue, nv) +} + +func TestFromTypedFloatEmptyOverwrite(t *testing.T) { + var src float64 + var ref = config.V(1.23) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(0.0), nv) +} + +func TestFromTypedFloatNonEmpty(t *testing.T) { + var src float64 = 1.23 + var ref = config.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(1.23), nv) +} + +func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { + var src float64 = 1.23 + var ref = config.V(1.24) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.V(1.23), nv) +} + +func TestFromTypedFloatRetainsLocationsIfUnchanged(t *testing.T) { + var src float64 = 1.23 + var ref = config.NewValue(1.23, config.Location{File: "foo"}) + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, config.NewValue(1.23, config.Location{File: "foo"}), nv) +} + +func TestFromTypedFloatTypeError(t *testing.T) { + var src float64 = 1.23 + var ref = config.V("string") + _, err := FromTyped(src, ref) + require.Error(t, err) +} diff --git a/libs/config/convert/struct_info.go b/libs/config/convert/struct_info.go index 367b9ecd..2457b3c2 100644 --- a/libs/config/convert/struct_info.go +++ b/libs/config/convert/struct_info.go @@ -85,3 +85,31 @@ func buildStructInfo(typ reflect.Type) structInfo { return out } + +func (s *structInfo) FieldValues(v reflect.Value) map[string]reflect.Value { + var out = make(map[string]reflect.Value) + + for k, index := range s.Fields { + fv := v + + // Locate value in struct (it could be an embedded type). + for i, x := range index { + if i > 0 { + if fv.Kind() == reflect.Pointer && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + fv = reflect.Value{} + break + } + fv = fv.Elem() + } + } + fv = fv.Field(x) + } + + if fv.IsValid() { + out[k] = fv + } + } + + return out +} diff --git a/libs/config/convert/struct_info_test.go b/libs/config/convert/struct_info_test.go index 3079958b..2e31adac 100644 --- a/libs/config/convert/struct_info_test.go +++ b/libs/config/convert/struct_info_test.go @@ -87,3 +87,110 @@ func TestStructInfoAnonymousByPointer(t *testing.T) { assert.Equal(t, []int{0, 0}, si.Fields["foo"]) assert.Equal(t, []int{0, 1, 0}, si.Fields["bar"]) } + +func TestStructInfoFieldValues(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + var src = Tmp{ + Foo: "foo", + Bar: "bar", + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + fv := si.FieldValues(reflect.ValueOf(src)) + assert.Len(t, fv, 2) + assert.Equal(t, "foo", fv["foo"].String()) + assert.Equal(t, "bar", fv["bar"].String()) +} + +func TestStructInfoFieldValuesAnonymousByValue(t *testing.T) { + type Bar struct { + Bar string `json:"bar"` + } + + type Foo struct { + Foo string `json:"foo"` + Bar + } + + type Tmp struct { + Foo + } + + var src = Tmp{ + Foo: Foo{ + Foo: "foo", + Bar: Bar{ + Bar: "bar", + }, + }, + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + fv := si.FieldValues(reflect.ValueOf(src)) + assert.Len(t, fv, 2) + assert.Equal(t, "foo", fv["foo"].String()) + assert.Equal(t, "bar", fv["bar"].String()) +} + +func TestStructInfoFieldValuesAnonymousByPointer(t *testing.T) { + type Bar struct { + Bar string `json:"bar"` + } + + type Foo struct { + Foo string `json:"foo"` + *Bar + } + + type Tmp struct { + *Foo + } + + // Test that the embedded fields are dereferenced properly. + t.Run("all are set", func(t *testing.T) { + src := Tmp{ + Foo: &Foo{ + Foo: "foo", + Bar: &Bar{ + Bar: "bar", + }, + }, + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + fv := si.FieldValues(reflect.ValueOf(src)) + assert.Len(t, fv, 2) + assert.Equal(t, "foo", fv["foo"].String()) + assert.Equal(t, "bar", fv["bar"].String()) + }) + + // Test that fields of embedded types are skipped if the embedded type is nil. + t.Run("top level is set", func(t *testing.T) { + src := Tmp{ + Foo: &Foo{ + Foo: "foo", + Bar: nil, + }, + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + fv := si.FieldValues(reflect.ValueOf(src)) + assert.Len(t, fv, 1) + assert.Equal(t, "foo", fv["foo"].String()) + }) + + // Test that fields of embedded types are skipped if the embedded type is nil. + t.Run("none are set", func(t *testing.T) { + src := Tmp{ + Foo: nil, + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + fv := si.FieldValues(reflect.ValueOf(src)) + assert.Empty(t, fv) + }) +} diff --git a/libs/config/convert/to_typed.go b/libs/config/convert/to_typed.go index 9915d30a..ca09fce4 100644 --- a/libs/config/convert/to_typed.go +++ b/libs/config/convert/to_typed.go @@ -13,6 +13,12 @@ func ToTyped(dst any, src config.Value) error { // Dereference pointer if necessary for dstv.Kind() == reflect.Pointer { + // If the source value is nil and the destination is a settable pointer, + // set the destination to nil. Also see `end_to_end_test.go`. + if dstv.CanSet() && src == config.NilValue { + dstv.SetZero() + return nil + } if dstv.IsNil() { dstv.Set(reflect.New(dstv.Type().Elem())) } From 0c837e57725045c31c5440f6790880e8c1e8586e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 15 Nov 2023 14:37:26 +0100 Subject: [PATCH 238/310] Make `file_path` and `artifact_path` fields consistent with json tag (#987) ## Changes This PR: 1. Renames `FilesPath` -> `FilePath` and `ArtifactsPath` -> `ArtifactPath` in the bundle and metadata configuration to make them consistant with the json tags. 2. Fixes development / production mode error messages to point to `file_path` and `artifact_path` ## Tests Existing unit tests. This is a strightforward renaming of the fields. --- bundle/artifacts/artifacts.go | 2 +- bundle/config/mutator/default_workspace_paths.go | 8 ++++---- .../mutator/default_workspace_paths_test.go | 16 ++++++++-------- bundle/config/mutator/process_target_mode.go | 8 ++++---- .../config/mutator/process_target_mode_test.go | 10 +++++----- bundle/config/mutator/trampoline.go | 2 +- bundle/config/mutator/translate_paths.go | 2 +- bundle/config/mutator/translate_paths_test.go | 16 ++++++++-------- bundle/config/workspace.go | 4 ++-- bundle/deploy/files/sync.go | 2 +- bundle/deploy/files/upload.go | 2 +- bundle/deploy/metadata/compute.go | 2 +- bundle/deploy/metadata/compute_test.go | 8 ++++---- bundle/metadata/metadata.go | 2 +- cmd/bundle/sync.go | 2 +- cmd/sync/sync.go | 2 +- cmd/sync/sync_test.go | 2 +- internal/bundle/artifacts_test.go | 2 +- internal/bundle/job_metadata_test.go | 2 +- 19 files changed, 47 insertions(+), 47 deletions(-) diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index e55ae4e8..e703668e 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -160,7 +160,7 @@ func uploadArtifactFile(ctx context.Context, file string, uploadPath string, cli } func getUploadBasePath(b *bundle.Bundle) (string, error) { - artifactPath := b.Config.Workspace.ArtifactsPath + artifactPath := b.Config.Workspace.ArtifactPath if artifactPath == "" { return "", fmt.Errorf("remote artifact path not configured") } diff --git a/bundle/config/mutator/default_workspace_paths.go b/bundle/config/mutator/default_workspace_paths.go index b444ba96..04f2b0dc 100644 --- a/bundle/config/mutator/default_workspace_paths.go +++ b/bundle/config/mutator/default_workspace_paths.go @@ -25,12 +25,12 @@ func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundl return fmt.Errorf("unable to define default workspace paths: workspace root not defined") } - if b.Config.Workspace.FilesPath == "" { - b.Config.Workspace.FilesPath = path.Join(root, "files") + if b.Config.Workspace.FilePath == "" { + b.Config.Workspace.FilePath = path.Join(root, "files") } - if b.Config.Workspace.ArtifactsPath == "" { - b.Config.Workspace.ArtifactsPath = path.Join(root, "artifacts") + if b.Config.Workspace.ArtifactPath == "" { + b.Config.Workspace.ArtifactPath = path.Join(root, "artifacts") } if b.Config.Workspace.StatePath == "" { diff --git a/bundle/config/mutator/default_workspace_paths_test.go b/bundle/config/mutator/default_workspace_paths_test.go index 308f82c4..033b7f48 100644 --- a/bundle/config/mutator/default_workspace_paths_test.go +++ b/bundle/config/mutator/default_workspace_paths_test.go @@ -21,8 +21,8 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) { } err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle) require.NoError(t, err) - assert.Equal(t, "/files", bundle.Config.Workspace.FilesPath) - assert.Equal(t, "/artifacts", bundle.Config.Workspace.ArtifactsPath) + assert.Equal(t, "/files", bundle.Config.Workspace.FilePath) + assert.Equal(t, "/artifacts", bundle.Config.Workspace.ArtifactPath) assert.Equal(t, "/state", bundle.Config.Workspace.StatePath) } @@ -30,16 +30,16 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) { bundle := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ - RootPath: "/", - FilesPath: "/foo/bar", - ArtifactsPath: "/foo/bar", - StatePath: "/foo/bar", + RootPath: "/", + FilePath: "/foo/bar", + ArtifactPath: "/foo/bar", + StatePath: "/foo/bar", }, }, } err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle) require.NoError(t, err) - assert.Equal(t, "/foo/bar", bundle.Config.Workspace.FilesPath) - assert.Equal(t, "/foo/bar", bundle.Config.Workspace.ArtifactsPath) + assert.Equal(t, "/foo/bar", bundle.Config.Workspace.FilePath) + assert.Equal(t, "/foo/bar", bundle.Config.Workspace.ArtifactPath) assert.Equal(t, "/foo/bar", bundle.Config.Workspace.StatePath) } diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index bd161e95..f9d2795a 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -120,11 +120,11 @@ func findIncorrectPath(b *bundle.Bundle, mode config.Mode) string { if strings.Contains(b.Config.Workspace.StatePath, username) != containsExpected { return "state_path" } - if strings.Contains(b.Config.Workspace.FilesPath, username) != containsExpected { - return "files_path" + if strings.Contains(b.Config.Workspace.FilePath, username) != containsExpected { + return "file_path" } - if strings.Contains(b.Config.Workspace.ArtifactsPath, username) != containsExpected { - return "artifacts_path" + if strings.Contains(b.Config.Workspace.ArtifactPath, username) != containsExpected { + return "artifact_path" } return "" } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index f7e78da2..2e438f6e 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -39,9 +39,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle { Id: "1", }, }, - StatePath: "/Users/lennart@company.com/.bundle/x/y/state", - ArtifactsPath: "/Users/lennart@company.com/.bundle/x/y/artifacts", - FilesPath: "/Users/lennart@company.com/.bundle/x/y/files", + StatePath: "/Users/lennart@company.com/.bundle/x/y/state", + ArtifactPath: "/Users/lennart@company.com/.bundle/x/y/artifacts", + FilePath: "/Users/lennart@company.com/.bundle/x/y/files", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -190,8 +190,8 @@ func TestProcessTargetModeProduction(t *testing.T) { require.ErrorContains(t, err, "state_path") bundle.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state" - bundle.Config.Workspace.ArtifactsPath = "/Shared/.bundle/x/y/artifacts" - bundle.Config.Workspace.FilesPath = "/Shared/.bundle/x/y/files" + bundle.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts" + bundle.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files" err = validateProductionMode(context.Background(), bundle, false) require.ErrorContains(t, err, "production") diff --git a/bundle/config/mutator/trampoline.go b/bundle/config/mutator/trampoline.go index 52d62c1b..24600f52 100644 --- a/bundle/config/mutator/trampoline.go +++ b/bundle/config/mutator/trampoline.go @@ -90,7 +90,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund if err != nil { return err } - remotePath := path.Join(b.Config.Workspace.FilesPath, filepath.ToSlash(internalDirRel), notebookName) + remotePath := path.Join(b.Config.Workspace.FilePath, filepath.ToSlash(internalDirRel), notebookName) task.Task.NotebookTask = &jobs.NotebookTask{ NotebookPath: remotePath, diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 8d3c8ce3..b4a17afc 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -92,7 +92,7 @@ func (m *translatePaths) rewritePath( } // Prefix remote path with its remote root path. - remotePath := path.Join(b.Config.Workspace.FilesPath, filepath.ToSlash(localRelPath)) + remotePath := path.Join(b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath)) // Convert local path into workspace path via specified function. interp, err := fn(*p, localPath, localRelPath, filepath.ToSlash(remotePath)) diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index c24fd2e7..2e578dd9 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -39,7 +39,7 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) { Config: config.Root{ Path: dir, Workspace: config.Workspace{ - FilesPath: "/bundle", + FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -111,7 +111,7 @@ func TestTranslatePaths(t *testing.T) { Config: config.Root{ Path: dir, Workspace: config.Workspace{ - FilesPath: "/bundle", + FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -282,7 +282,7 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { Config: config.Root{ Path: dir, Workspace: config.Workspace{ - FilesPath: "/bundle", + FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -380,7 +380,7 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { Config: config.Root{ Path: dir, Workspace: config.Workspace{ - FilesPath: "/bundle", + FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -539,7 +539,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { Config: config.Root{ Path: dir, Workspace: config.Workspace{ - FilesPath: "/bundle", + FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -574,7 +574,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { Config: config.Root{ Path: dir, Workspace: config.Workspace{ - FilesPath: "/bundle", + FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -609,7 +609,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { Config: config.Root{ Path: dir, Workspace: config.Workspace{ - FilesPath: "/bundle", + FilePath: "/bundle", }, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ @@ -644,7 +644,7 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { Config: config.Root{ Path: dir, Workspace: config.Workspace{ - FilesPath: "/bundle", + FilePath: "/bundle", }, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ diff --git a/bundle/config/workspace.go b/bundle/config/workspace.go index 16a70afb..5f8691ba 100644 --- a/bundle/config/workspace.go +++ b/bundle/config/workspace.go @@ -52,11 +52,11 @@ type Workspace struct { // Remote workspace path to synchronize local files to. // This defaults to "${workspace.root}/files". - FilesPath string `json:"file_path,omitempty"` + FilePath string `json:"file_path,omitempty"` // Remote workspace path for build artifacts. // This defaults to "${workspace.root}/artifacts". - ArtifactsPath string `json:"artifact_path,omitempty"` + ArtifactPath string `json:"artifact_path,omitempty"` // Remote workspace path for deployment state. // This defaults to "${workspace.root}/state". diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index ff3d78d0..148a63ff 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -21,7 +21,7 @@ func getSync(ctx context.Context, b *bundle.Bundle) (*sync.Sync, error) { opts := sync.SyncOptions{ LocalPath: b.Config.Path, - RemotePath: b.Config.Workspace.FilesPath, + RemotePath: b.Config.Workspace.FilePath, Include: includes, Exclude: b.Config.Sync.Exclude, diff --git a/bundle/deploy/files/upload.go b/bundle/deploy/files/upload.go index 9b7a85a4..aebbf6d5 100644 --- a/bundle/deploy/files/upload.go +++ b/bundle/deploy/files/upload.go @@ -26,7 +26,7 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - cmdio.LogString(ctx, fmt.Sprintf("Uploaded bundle files at %s!\n", b.Config.Workspace.FilesPath)) + cmdio.LogString(ctx, fmt.Sprintf("Uploaded bundle files at %s!\n", b.Config.Workspace.FilePath)) return nil } diff --git a/bundle/deploy/metadata/compute.go b/bundle/deploy/metadata/compute.go index 9a3ae0e3..460a81c9 100644 --- a/bundle/deploy/metadata/compute.go +++ b/bundle/deploy/metadata/compute.go @@ -46,6 +46,6 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) error { b.Metadata.Config.Resources.Jobs = jobsMetadata // Set file upload destination of the bundle in metadata - b.Metadata.Config.Workspace.FilesPath = b.Config.Workspace.FilesPath + b.Metadata.Config.Workspace.FilePath = b.Config.Workspace.FilePath return nil } diff --git a/bundle/deploy/metadata/compute_test.go b/bundle/deploy/metadata/compute_test.go index 9e4b475c..ffa352d0 100644 --- a/bundle/deploy/metadata/compute_test.go +++ b/bundle/deploy/metadata/compute_test.go @@ -18,9 +18,9 @@ func TestComputeMetadataMutator(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ - RootPath: "/Users/shreyas.goenka@databricks.com", - ArtifactsPath: "/Users/shreyas.goenka@databricks.com/artifacts", - FilesPath: "/Users/shreyas.goenka@databricks.com/files", + RootPath: "/Users/shreyas.goenka@databricks.com", + ArtifactPath: "/Users/shreyas.goenka@databricks.com/artifacts", + FilePath: "/Users/shreyas.goenka@databricks.com/files", }, Bundle: config.Bundle{ Name: "my-bundle", @@ -68,7 +68,7 @@ func TestComputeMetadataMutator(t *testing.T) { Version: metadata.Version, Config: metadata.Config{ Workspace: metadata.Workspace{ - FilesPath: "/Users/shreyas.goenka@databricks.com/files", + FilePath: "/Users/shreyas.goenka@databricks.com/files", }, Bundle: metadata.Bundle{ Git: config.Git{ diff --git a/bundle/metadata/metadata.go b/bundle/metadata/metadata.go index 441f1463..78c8cb18 100644 --- a/bundle/metadata/metadata.go +++ b/bundle/metadata/metadata.go @@ -11,7 +11,7 @@ type Bundle struct { } type Workspace struct { - FilesPath string `json:"file_path"` + FilePath string `json:"file_path"` } type Job struct { diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index 6d6a6f5a..ca81275b 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -30,7 +30,7 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) opts := sync.SyncOptions{ LocalPath: b.Config.Path, - RemotePath: b.Config.Workspace.FilesPath, + RemotePath: b.Config.Workspace.FilePath, Include: includes, Exclude: b.Config.Sync.Exclude, Full: f.full, diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 2870e1e0..f00c02a8 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -41,7 +41,7 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b * opts := sync.SyncOptions{ LocalPath: b.Config.Path, - RemotePath: b.Config.Workspace.FilesPath, + RemotePath: b.Config.Workspace.FilePath, Include: includes, Exclude: b.Config.Sync.Exclude, Full: f.full, diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index 14f641ff..827c4d50 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -24,7 +24,7 @@ func TestSyncOptionsFromBundle(t *testing.T) { }, Workspace: config.Workspace{ - FilesPath: "/Users/jane@doe.com/path", + FilePath: "/Users/jane@doe.com/path", }, }, } diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 9d1a171a..689a4b4b 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -54,7 +54,7 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { Target: "whatever", }, Workspace: config.Workspace{ - ArtifactsPath: wsDir, + ArtifactPath: wsDir, }, Artifacts: config.Artifacts{ "test": artifact, diff --git a/internal/bundle/job_metadata_test.go b/internal/bundle/job_metadata_test.go index 70962c4c..3e2bb7f0 100644 --- a/internal/bundle/job_metadata_test.go +++ b/internal/bundle/job_metadata_test.go @@ -83,7 +83,7 @@ func TestAccJobsMetadataFile(t *testing.T) { }, }, Workspace: metadata.Workspace{ - FilesPath: path.Join(root, "files"), + FilePath: path.Join(root, "files"), }, Resources: metadata.Resources{ Jobs: map[string]*metadata.Job{ From d80c35f66af9570f98bdf7e970af7970e1a4dead Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 15 Nov 2023 15:03:36 +0100 Subject: [PATCH 239/310] Rename variable `bundle -> b` (#989) ## Changes All calls to apply a mutator must go through `bundle.Apply`. This conflicts with the existing use of the variable `bundle`. This change un-aliases the variable from the package name by renaming all variables to `b`. ## Tests Pass. --- bundle/bundle.go | 10 +- bundle/config/mutator/default_target_test.go | 12 +- .../mutator/default_workspace_paths_test.go | 20 +-- .../mutator/default_workspace_root_test.go | 6 +- .../mutator/expand_workspace_root_test.go | 20 +-- .../config/mutator/override_compute_test.go | 40 +++--- bundle/config/mutator/process_include_test.go | 10 +- .../mutator/process_root_includes_test.go | 54 +++---- .../mutator/process_target_mode_test.go | 136 +++++++++--------- .../mutator/select_default_target_test.go | 28 ++-- bundle/config/mutator/select_target_test.go | 10 +- bundle/config/mutator/set_variables_test.go | 10 +- bundle/config/mutator/translate_paths_test.go | 86 +++++------ .../mutator/validate_git_details_test.go | 12 +- bundle/context.go | 6 +- bundle/deferred_test.go | 20 +-- bundle/deploy/terraform/init_test.go | 6 +- bundle/mutator_test.go | 4 +- bundle/python/transform_test.go | 4 +- bundle/seq_test.go | 20 +-- 20 files changed, 257 insertions(+), 257 deletions(-) diff --git a/bundle/bundle.go b/bundle/bundle.go index a2d774bb..b4f5ee10 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -63,7 +63,7 @@ type Bundle struct { } func Load(ctx context.Context, path string) (*Bundle, error) { - bundle := &Bundle{} + b := &Bundle{} stat, err := os.Stat(path) if err != nil { return nil, err @@ -74,13 +74,13 @@ func Load(ctx context.Context, path string) (*Bundle, error) { _, hasIncludesEnv := env.Includes(ctx) if hasRootEnv && hasIncludesEnv && stat.IsDir() { log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path) - bundle.Config = config.Root{ + b.Config = config.Root{ Path: path, Bundle: config.Bundle{ Name: filepath.Base(path), }, } - return bundle, nil + return b, nil } return nil, err } @@ -89,8 +89,8 @@ func Load(ctx context.Context, path string) (*Bundle, error) { if err != nil { return nil, err } - bundle.Config = *root - return bundle, nil + b.Config = *root + return b, nil } // MustLoad returns a bundle configuration. diff --git a/bundle/config/mutator/default_target_test.go b/bundle/config/mutator/default_target_test.go index 49fbe6de..9214c4ef 100644 --- a/bundle/config/mutator/default_target_test.go +++ b/bundle/config/mutator/default_target_test.go @@ -12,24 +12,24 @@ import ( ) func TestDefaultTarget(t *testing.T) { - bundle := &bundle.Bundle{} - err := mutator.DefineDefaultTarget().Apply(context.Background(), bundle) + b := &bundle.Bundle{} + err := mutator.DefineDefaultTarget().Apply(context.Background(), b) require.NoError(t, err) - env, ok := bundle.Config.Targets["default"] + env, ok := b.Config.Targets["default"] assert.True(t, ok) assert.Equal(t, &config.Target{}, env) } func TestDefaultTargetAlreadySpecified(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Targets: map[string]*config.Target{ "development": {}, }, }, } - err := mutator.DefineDefaultTarget().Apply(context.Background(), bundle) + err := mutator.DefineDefaultTarget().Apply(context.Background(), b) require.NoError(t, err) - _, ok := bundle.Config.Targets["default"] + _, ok := b.Config.Targets["default"] assert.False(t, ok) } diff --git a/bundle/config/mutator/default_workspace_paths_test.go b/bundle/config/mutator/default_workspace_paths_test.go index 033b7f48..56b3c74c 100644 --- a/bundle/config/mutator/default_workspace_paths_test.go +++ b/bundle/config/mutator/default_workspace_paths_test.go @@ -12,22 +12,22 @@ import ( ) func TestDefineDefaultWorkspacePaths(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ RootPath: "/", }, }, } - err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle) + err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "/files", bundle.Config.Workspace.FilePath) - assert.Equal(t, "/artifacts", bundle.Config.Workspace.ArtifactPath) - assert.Equal(t, "/state", bundle.Config.Workspace.StatePath) + assert.Equal(t, "/files", b.Config.Workspace.FilePath) + assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath) + assert.Equal(t, "/state", b.Config.Workspace.StatePath) } func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ RootPath: "/", @@ -37,9 +37,9 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) { }, }, } - err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle) + err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "/foo/bar", bundle.Config.Workspace.FilePath) - assert.Equal(t, "/foo/bar", bundle.Config.Workspace.ArtifactPath) - assert.Equal(t, "/foo/bar", bundle.Config.Workspace.StatePath) + assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath) + assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath) + assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath) } diff --git a/bundle/config/mutator/default_workspace_root_test.go b/bundle/config/mutator/default_workspace_root_test.go index 1822dca0..ad921f6f 100644 --- a/bundle/config/mutator/default_workspace_root_test.go +++ b/bundle/config/mutator/default_workspace_root_test.go @@ -12,7 +12,7 @@ import ( ) func TestDefaultWorkspaceRoot(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ Name: "name", @@ -20,7 +20,7 @@ func TestDefaultWorkspaceRoot(t *testing.T) { }, }, } - err := mutator.DefineDefaultWorkspaceRoot().Apply(context.Background(), bundle) + err := mutator.DefineDefaultWorkspaceRoot().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "~/.bundle/name/environment", bundle.Config.Workspace.RootPath) + assert.Equal(t, "~/.bundle/name/environment", b.Config.Workspace.RootPath) } diff --git a/bundle/config/mutator/expand_workspace_root_test.go b/bundle/config/mutator/expand_workspace_root_test.go index 0ec11a07..217c07c5 100644 --- a/bundle/config/mutator/expand_workspace_root_test.go +++ b/bundle/config/mutator/expand_workspace_root_test.go @@ -13,7 +13,7 @@ import ( ) func TestExpandWorkspaceRoot(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ CurrentUser: &config.User{ @@ -25,13 +25,13 @@ func TestExpandWorkspaceRoot(t *testing.T) { }, }, } - err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle) + err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "/Users/jane@doe.com/foo", bundle.Config.Workspace.RootPath) + assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath) } func TestExpandWorkspaceRootDoesNothing(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ CurrentUser: &config.User{ @@ -43,13 +43,13 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) { }, }, } - err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle) + err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "/Users/charly@doe.com/foo", bundle.Config.Workspace.RootPath) + assert.Equal(t, "/Users/charly@doe.com/foo", b.Config.Workspace.RootPath) } func TestExpandWorkspaceRootWithoutRoot(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ CurrentUser: &config.User{ @@ -60,18 +60,18 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) { }, }, } - err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle) + err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), b) require.Error(t, err) } func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ RootPath: "~/foo", }, }, } - err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle) + err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), b) require.Error(t, err) } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index cb37eeb5..70d7f238 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -16,7 +16,7 @@ import ( func TestOverrideDevelopment(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "") - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ Mode: config.Development, @@ -47,22 +47,22 @@ func TestOverrideDevelopment(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) require.NoError(t, err) - assert.Nil(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) - assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) - assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) - assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[2].ExistingClusterId) - assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[3].ExistingClusterId) + assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) + assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) + assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) + assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[2].ExistingClusterId) + assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[3].ExistingClusterId) - assert.Nil(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) - assert.Empty(t, bundle.Config.Resources.Jobs["job1"].Tasks[2].ComputeKey) - assert.Empty(t, bundle.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey) + assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) + assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[2].ComputeKey) + assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey) } func TestOverrideDevelopmentEnv(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -83,14 +83,14 @@ func TestOverrideDevelopmentEnv(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "cluster2", bundle.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) + assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } func TestOverridePipelineTask(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -108,13 +108,13 @@ func TestOverridePipelineTask(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) require.NoError(t, err) - assert.Empty(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) + assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } func TestOverrideProduction(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ ComputeID: "newClusterID", @@ -138,13 +138,13 @@ func TestOverrideProduction(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) require.Error(t, err) } func TestOverrideProductionEnv(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -165,6 +165,6 @@ func TestOverrideProductionEnv(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) require.NoError(t, err) } diff --git a/bundle/config/mutator/process_include_test.go b/bundle/config/mutator/process_include_test.go index e5e27f9e..eb1cb291 100644 --- a/bundle/config/mutator/process_include_test.go +++ b/bundle/config/mutator/process_include_test.go @@ -15,7 +15,7 @@ import ( ) func TestProcessInclude(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: t.TempDir(), Workspace: config.Workspace{ @@ -25,14 +25,14 @@ func TestProcessInclude(t *testing.T) { } relPath := "./file.yml" - fullPath := filepath.Join(bundle.Config.Path, relPath) + fullPath := filepath.Join(b.Config.Path, relPath) f, err := os.Create(fullPath) require.NoError(t, err) fmt.Fprint(f, "workspace:\n host: bar\n") f.Close() - assert.Equal(t, "foo", bundle.Config.Workspace.Host) - err = mutator.ProcessInclude(fullPath, relPath).Apply(context.Background(), bundle) + assert.Equal(t, "foo", b.Config.Workspace.Host) + err = mutator.ProcessInclude(fullPath, relPath).Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "bar", bundle.Config.Workspace.Host) + assert.Equal(t, "bar", b.Config.Workspace.Host) } diff --git a/bundle/config/mutator/process_root_includes_test.go b/bundle/config/mutator/process_root_includes_test.go index aec9b32d..7a0b9e65 100644 --- a/bundle/config/mutator/process_root_includes_test.go +++ b/bundle/config/mutator/process_root_includes_test.go @@ -24,12 +24,12 @@ func touch(t *testing.T, path, file string) { } func TestProcessRootIncludesEmpty(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: ".", }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle) + err := mutator.ProcessRootIncludes().Apply(context.Background(), b) require.NoError(t, err) } @@ -41,7 +41,7 @@ func TestProcessRootIncludesAbs(t *testing.T) { t.Skip("skipping temperorilty to make windows unit tests green") } - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: ".", Include: []string{ @@ -49,13 +49,13 @@ func TestProcessRootIncludesAbs(t *testing.T) { }, }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle) + err := mutator.ProcessRootIncludes().Apply(context.Background(), b) require.Error(t, err) assert.Contains(t, err.Error(), "must be relative paths") } func TestProcessRootIncludesSingleGlob(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: t.TempDir(), Include: []string{ @@ -64,18 +64,18 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) { }, } - touch(t, bundle.Config.Path, "databricks.yml") - touch(t, bundle.Config.Path, "a.yml") - touch(t, bundle.Config.Path, "b.yml") + touch(t, b.Config.Path, "databricks.yml") + touch(t, b.Config.Path, "a.yml") + touch(t, b.Config.Path, "b.yml") - err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle) + err := mutator.ProcessRootIncludes().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, []string{"a.yml", "b.yml"}, bundle.Config.Include) + assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include) } func TestProcessRootIncludesMultiGlob(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: t.TempDir(), Include: []string{ @@ -85,17 +85,17 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) { }, } - touch(t, bundle.Config.Path, "a1.yml") - touch(t, bundle.Config.Path, "b1.yml") + touch(t, b.Config.Path, "a1.yml") + touch(t, b.Config.Path, "b1.yml") - err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle) + err := mutator.ProcessRootIncludes().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, []string{"a1.yml", "b1.yml"}, bundle.Config.Include) + assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include) } func TestProcessRootIncludesRemoveDups(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: t.TempDir(), Include: []string{ @@ -105,15 +105,15 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) { }, } - touch(t, bundle.Config.Path, "a.yml") + touch(t, b.Config.Path, "a.yml") - err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle) + err := mutator.ProcessRootIncludes().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, []string{"a.yml"}, bundle.Config.Include) + assert.Equal(t, []string{"a.yml"}, b.Config.Include) } func TestProcessRootIncludesNotExists(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: t.TempDir(), Include: []string{ @@ -121,7 +121,7 @@ func TestProcessRootIncludesNotExists(t *testing.T) { }, }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle) + err := mutator.ProcessRootIncludes().Apply(context.Background(), b) require.Error(t, err) assert.Contains(t, err.Error(), "notexist.yml defined in 'include' section does not match any files") } @@ -132,15 +132,15 @@ func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { touch(t, rootPath, testYamlName) t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName)) - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: rootPath, }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle) + err := mutator.ProcessRootIncludes().Apply(context.Background(), b) require.NoError(t, err) - assert.Contains(t, bundle.Config.Include, testYamlName) + assert.Contains(t, b.Config.Include, testYamlName) } func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) { @@ -155,13 +155,13 @@ func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) { string(os.PathListSeparator), )) - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: rootPath, }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle) + err := mutator.ProcessRootIncludes().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, []string{testYamlName}, bundle.Config.Include) + assert.Equal(t, []string{testYamlName}, b.Config.Include) } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 2e438f6e..6ce3fcdf 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -89,111 +89,111 @@ func mockBundle(mode config.Mode) *bundle.Bundle { } func TestProcessTargetModeDevelopment(t *testing.T) { - bundle := mockBundle(config.Development) + b := mockBundle(config.Development) m := ProcessTargetMode() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) require.NoError(t, err) // Job 1 - assert.Equal(t, "[dev lennart] job1", bundle.Config.Resources.Jobs["job1"].Name) - assert.Equal(t, bundle.Config.Resources.Jobs["job1"].Tags["dev"], "lennart") - assert.Equal(t, bundle.Config.Resources.Jobs["job1"].Schedule.PauseStatus, jobs.PauseStatusPaused) + assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name) + assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["dev"], "lennart") + assert.Equal(t, b.Config.Resources.Jobs["job1"].Schedule.PauseStatus, jobs.PauseStatusPaused) // Job 2 - assert.Equal(t, "[dev lennart] job2", bundle.Config.Resources.Jobs["job2"].Name) - assert.Equal(t, bundle.Config.Resources.Jobs["job2"].Tags["dev"], "lennart") - assert.Equal(t, bundle.Config.Resources.Jobs["job2"].Schedule.PauseStatus, jobs.PauseStatusUnpaused) + assert.Equal(t, "[dev lennart] job2", b.Config.Resources.Jobs["job2"].Name) + assert.Equal(t, b.Config.Resources.Jobs["job2"].Tags["dev"], "lennart") + assert.Equal(t, b.Config.Resources.Jobs["job2"].Schedule.PauseStatus, jobs.PauseStatusUnpaused) // Pipeline 1 - assert.Equal(t, "[dev lennart] pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name) - assert.True(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) + assert.Equal(t, "[dev lennart] pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) + assert.True(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) // Experiment 1 - assert.Equal(t, "/Users/lennart.kats@databricks.com/[dev lennart] experiment1", bundle.Config.Resources.Experiments["experiment1"].Name) - assert.Contains(t, bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags, ml.ExperimentTag{Key: "dev", Value: "lennart"}) - assert.Equal(t, "dev", bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key) + assert.Equal(t, "/Users/lennart.kats@databricks.com/[dev lennart] experiment1", b.Config.Resources.Experiments["experiment1"].Name) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Experiment.Tags, ml.ExperimentTag{Key: "dev", Value: "lennart"}) + assert.Equal(t, "dev", b.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key) // Experiment 2 - assert.Equal(t, "[dev lennart] experiment2", bundle.Config.Resources.Experiments["experiment2"].Name) - assert.Contains(t, bundle.Config.Resources.Experiments["experiment2"].Experiment.Tags, ml.ExperimentTag{Key: "dev", Value: "lennart"}) + assert.Equal(t, "[dev lennart] experiment2", b.Config.Resources.Experiments["experiment2"].Name) + assert.Contains(t, b.Config.Resources.Experiments["experiment2"].Experiment.Tags, ml.ExperimentTag{Key: "dev", Value: "lennart"}) // Model 1 - assert.Equal(t, "[dev lennart] model1", bundle.Config.Resources.Models["model1"].Name) + assert.Equal(t, "[dev lennart] model1", b.Config.Resources.Models["model1"].Name) // Model serving endpoint 1 - assert.Equal(t, "dev_lennart_servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) + assert.Equal(t, "dev_lennart_servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) // Registered model 1 - assert.Equal(t, "dev_lennart_registeredmodel1", bundle.Config.Resources.RegisteredModels["registeredmodel1"].Name) + assert.Equal(t, "dev_lennart_registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) } func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { - bundle := mockBundle(config.Development) - bundle.Tagging = tags.ForCloud(&sdkconfig.Config{ + b := mockBundle(config.Development) + b.Tagging = tags.ForCloud(&sdkconfig.Config{ Host: "https://dbc-XXXXXXXX-YYYY.cloud.databricks.com/", }) - bundle.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := ProcessTargetMode().Apply(context.Background(), bundle) + b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" + err := ProcessTargetMode().Apply(context.Background(), b) require.NoError(t, err) // Assert that tag normalization took place. - assert.Equal(t, "Hello world__", bundle.Config.Resources.Jobs["job1"].Tags["dev"]) + assert.Equal(t, "Hello world__", b.Config.Resources.Jobs["job1"].Tags["dev"]) } func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) { - bundle := mockBundle(config.Development) - bundle.Tagging = tags.ForCloud(&sdkconfig.Config{ + b := mockBundle(config.Development) + b.Tagging = tags.ForCloud(&sdkconfig.Config{ Host: "https://adb-xxx.y.azuredatabricks.net/", }) - bundle.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := ProcessTargetMode().Apply(context.Background(), bundle) + b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" + err := ProcessTargetMode().Apply(context.Background(), b) require.NoError(t, err) // Assert that tag normalization took place (Azure allows more characters than AWS). - assert.Equal(t, "Héllö wörld?!", bundle.Config.Resources.Jobs["job1"].Tags["dev"]) + assert.Equal(t, "Héllö wörld?!", b.Config.Resources.Jobs["job1"].Tags["dev"]) } func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) { - bundle := mockBundle(config.Development) - bundle.Tagging = tags.ForCloud(&sdkconfig.Config{ + b := mockBundle(config.Development) + b.Tagging = tags.ForCloud(&sdkconfig.Config{ Host: "https://123.4.gcp.databricks.com/", }) - bundle.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := ProcessTargetMode().Apply(context.Background(), bundle) + b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" + err := ProcessTargetMode().Apply(context.Background(), b) require.NoError(t, err) // Assert that tag normalization took place. - assert.Equal(t, "Hello_world", bundle.Config.Resources.Jobs["job1"].Tags["dev"]) + assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"]) } func TestProcessTargetModeDefault(t *testing.T) { - bundle := mockBundle("") + b := mockBundle("") m := ProcessTargetMode() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name) - assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name) - assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) - assert.Equal(t, "servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) - assert.Equal(t, "registeredmodel1", bundle.Config.Resources.RegisteredModels["registeredmodel1"].Name) + assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name) + assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) + assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) + assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) + assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) } func TestProcessTargetModeProduction(t *testing.T) { - bundle := mockBundle(config.Production) + b := mockBundle(config.Production) - err := validateProductionMode(context.Background(), bundle, false) + err := validateProductionMode(context.Background(), b, false) require.ErrorContains(t, err, "state_path") - bundle.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state" - bundle.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts" - bundle.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files" + b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state" + b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts" + b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files" - err = validateProductionMode(context.Background(), bundle, false) + err = validateProductionMode(context.Background(), b, false) require.ErrorContains(t, err, "production") permissions := []resources.Permission{ @@ -202,41 +202,41 @@ func TestProcessTargetModeProduction(t *testing.T) { UserName: "user@company.com", }, } - bundle.Config.Resources.Jobs["job1"].Permissions = permissions - bundle.Config.Resources.Jobs["job1"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} - bundle.Config.Resources.Jobs["job2"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} - bundle.Config.Resources.Pipelines["pipeline1"].Permissions = permissions - bundle.Config.Resources.Experiments["experiment1"].Permissions = permissions - bundle.Config.Resources.Experiments["experiment2"].Permissions = permissions - bundle.Config.Resources.Models["model1"].Permissions = permissions - bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions + b.Config.Resources.Jobs["job1"].Permissions = permissions + b.Config.Resources.Jobs["job1"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} + b.Config.Resources.Jobs["job2"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} + b.Config.Resources.Pipelines["pipeline1"].Permissions = permissions + b.Config.Resources.Experiments["experiment1"].Permissions = permissions + b.Config.Resources.Experiments["experiment2"].Permissions = permissions + b.Config.Resources.Models["model1"].Permissions = permissions + b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions - err = validateProductionMode(context.Background(), bundle, false) + err = validateProductionMode(context.Background(), b, false) require.NoError(t, err) - assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name) - assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name) - assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) - assert.Equal(t, "servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) - assert.Equal(t, "registeredmodel1", bundle.Config.Resources.RegisteredModels["registeredmodel1"].Name) + assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name) + assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) + assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) + assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) + assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) } func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { - bundle := mockBundle(config.Production) + b := mockBundle(config.Production) // Our target has all kinds of problems when not using service principals ... - err := validateProductionMode(context.Background(), bundle, false) + err := validateProductionMode(context.Background(), b, false) require.Error(t, err) // ... but we're much less strict when a principal is used - err = validateProductionMode(context.Background(), bundle, true) + err = validateProductionMode(context.Background(), b, true) require.NoError(t, err) } // Make sure that we have test coverage for all resource types func TestAllResourcesMocked(t *testing.T) { - bundle := mockBundle(config.Development) - resources := reflect.ValueOf(bundle.Config.Resources) + b := mockBundle(config.Development) + resources := reflect.ValueOf(b.Config.Resources) for i := 0; i < resources.NumField(); i++ { field := resources.Field(i) @@ -253,11 +253,11 @@ func TestAllResourcesMocked(t *testing.T) { // Make sure that we at least rename all resources func TestAllResourcesRenamed(t *testing.T) { - bundle := mockBundle(config.Development) - resources := reflect.ValueOf(bundle.Config.Resources) + b := mockBundle(config.Development) + resources := reflect.ValueOf(b.Config.Resources) m := ProcessTargetMode() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) require.NoError(t, err) for i := 0; i < resources.NumField(); i++ { diff --git a/bundle/config/mutator/select_default_target_test.go b/bundle/config/mutator/select_default_target_test.go index 5d7b93b2..cb595f56 100644 --- a/bundle/config/mutator/select_default_target_test.go +++ b/bundle/config/mutator/select_default_target_test.go @@ -11,30 +11,30 @@ import ( ) func TestSelectDefaultTargetNoTargets(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Targets: map[string]*config.Target{}, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + err := mutator.SelectDefaultTarget().Apply(context.Background(), b) assert.ErrorContains(t, err, "no targets defined") } func TestSelectDefaultTargetSingleTargets(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Targets: map[string]*config.Target{ "foo": {}, }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + err := mutator.SelectDefaultTarget().Apply(context.Background(), b) assert.NoError(t, err) - assert.Equal(t, "foo", bundle.Config.Bundle.Target) + assert.Equal(t, "foo", b.Config.Bundle.Target) } func TestSelectDefaultTargetNoDefaults(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Targets: map[string]*config.Target{ "foo": {}, @@ -43,12 +43,12 @@ func TestSelectDefaultTargetNoDefaults(t *testing.T) { }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + err := mutator.SelectDefaultTarget().Apply(context.Background(), b) assert.ErrorContains(t, err, "please specify target") } func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Targets: map[string]*config.Target{ "foo": nil, @@ -56,12 +56,12 @@ func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) { }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + err := mutator.SelectDefaultTarget().Apply(context.Background(), b) assert.ErrorContains(t, err, "please specify target") } func TestSelectDefaultTargetMultipleDefaults(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Targets: map[string]*config.Target{ "foo": {Default: true}, @@ -70,12 +70,12 @@ func TestSelectDefaultTargetMultipleDefaults(t *testing.T) { }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + err := mutator.SelectDefaultTarget().Apply(context.Background(), b) assert.ErrorContains(t, err, "multiple targets are marked as default") } func TestSelectDefaultTargetSingleDefault(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Targets: map[string]*config.Target{ "foo": {}, @@ -84,7 +84,7 @@ func TestSelectDefaultTargetSingleDefault(t *testing.T) { }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle) + err := mutator.SelectDefaultTarget().Apply(context.Background(), b) assert.NoError(t, err) - assert.Equal(t, "bar", bundle.Config.Bundle.Target) + assert.Equal(t, "bar", b.Config.Bundle.Target) } diff --git a/bundle/config/mutator/select_target_test.go b/bundle/config/mutator/select_target_test.go index dfcd8cb0..6fae0ca2 100644 --- a/bundle/config/mutator/select_target_test.go +++ b/bundle/config/mutator/select_target_test.go @@ -12,7 +12,7 @@ import ( ) func TestSelectTarget(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Workspace: config.Workspace{ Host: "foo", @@ -26,19 +26,19 @@ func TestSelectTarget(t *testing.T) { }, }, } - err := mutator.SelectTarget("default").Apply(context.Background(), bundle) + err := mutator.SelectTarget("default").Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "bar", bundle.Config.Workspace.Host) + assert.Equal(t, "bar", b.Config.Workspace.Host) } func TestSelectTargetNotFound(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Targets: map[string]*config.Target{ "default": {}, }, }, } - err := mutator.SelectTarget("doesnt-exist").Apply(context.Background(), bundle) + err := mutator.SelectTarget("doesnt-exist").Apply(context.Background(), b) require.Error(t, err, "no targets defined") } diff --git a/bundle/config/mutator/set_variables_test.go b/bundle/config/mutator/set_variables_test.go index 323f1e86..c4500413 100644 --- a/bundle/config/mutator/set_variables_test.go +++ b/bundle/config/mutator/set_variables_test.go @@ -87,7 +87,7 @@ func TestSetVariablesMutator(t *testing.T) { defaultValForA := "default-a" defaultValForB := "default-b" valForC := "assigned-val-c" - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Variables: map[string]*variable.Variable{ "a": { @@ -108,9 +108,9 @@ func TestSetVariablesMutator(t *testing.T) { t.Setenv("BUNDLE_VAR_b", "env-var-b") - err := SetVariables().Apply(context.Background(), bundle) + err := SetVariables().Apply(context.Background(), b) require.NoError(t, err) - assert.Equal(t, "default-a", *bundle.Config.Variables["a"].Value) - assert.Equal(t, "env-var-b", *bundle.Config.Variables["b"].Value) - assert.Equal(t, "assigned-val-c", *bundle.Config.Variables["c"].Value) + assert.Equal(t, "default-a", *b.Config.Variables["a"].Value) + assert.Equal(t, "env-var-b", *b.Config.Variables["b"].Value) + assert.Equal(t, "assigned-val-c", *b.Config.Variables["c"].Value) } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 2e578dd9..321b73dc 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -35,7 +35,7 @@ func touchEmptyFile(t *testing.T, path string) { func TestTranslatePathsSkippedWithGitSource(t *testing.T) { dir := t.TempDir() - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Workspace: config.Workspace{ @@ -80,23 +80,23 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) require.NoError(t, err) assert.Equal( t, "my_job_notebook.py", - bundle.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath, + b.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath, ) assert.Equal( t, "foo", - bundle.Config.Resources.Jobs["job"].Tasks[1].PythonWheelTask.PackageName, + b.Config.Resources.Jobs["job"].Tasks[1].PythonWheelTask.PackageName, ) assert.Equal( t, "my_python_file.py", - bundle.Config.Resources.Jobs["job"].Tasks[2].SparkPythonTask.PythonFile, + b.Config.Resources.Jobs["job"].Tasks[2].SparkPythonTask.PythonFile, ) } @@ -107,7 +107,7 @@ func TestTranslatePaths(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_python_file.py")) touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar")) - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Workspace: config.Workspace{ @@ -207,66 +207,66 @@ func TestTranslatePaths(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) require.NoError(t, err) // Assert that the path in the tasks now refer to the artifact. assert.Equal( t, "/bundle/my_job_notebook", - bundle.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath, + b.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath, ) assert.Equal( t, filepath.Join("dist", "task.whl"), - bundle.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl, + b.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl, ) assert.Equal( t, "/Users/jane.doe@databricks.com/doesnt_exist.py", - bundle.Config.Resources.Jobs["job"].Tasks[1].NotebookTask.NotebookPath, + b.Config.Resources.Jobs["job"].Tasks[1].NotebookTask.NotebookPath, ) assert.Equal( t, "/bundle/my_job_notebook", - bundle.Config.Resources.Jobs["job"].Tasks[2].NotebookTask.NotebookPath, + b.Config.Resources.Jobs["job"].Tasks[2].NotebookTask.NotebookPath, ) assert.Equal( t, "/bundle/my_python_file.py", - bundle.Config.Resources.Jobs["job"].Tasks[4].SparkPythonTask.PythonFile, + b.Config.Resources.Jobs["job"].Tasks[4].SparkPythonTask.PythonFile, ) assert.Equal( t, "/bundle/dist/task.jar", - bundle.Config.Resources.Jobs["job"].Tasks[5].Libraries[0].Jar, + b.Config.Resources.Jobs["job"].Tasks[5].Libraries[0].Jar, ) assert.Equal( t, "dbfs:/bundle/dist/task_remote.jar", - bundle.Config.Resources.Jobs["job"].Tasks[6].Libraries[0].Jar, + b.Config.Resources.Jobs["job"].Tasks[6].Libraries[0].Jar, ) // Assert that the path in the libraries now refer to the artifact. assert.Equal( t, "/bundle/my_pipeline_notebook", - bundle.Config.Resources.Pipelines["pipeline"].Libraries[0].Notebook.Path, + b.Config.Resources.Pipelines["pipeline"].Libraries[0].Notebook.Path, ) assert.Equal( t, "/Users/jane.doe@databricks.com/doesnt_exist.py", - bundle.Config.Resources.Pipelines["pipeline"].Libraries[1].Notebook.Path, + b.Config.Resources.Pipelines["pipeline"].Libraries[1].Notebook.Path, ) assert.Equal( t, "/bundle/my_pipeline_notebook", - bundle.Config.Resources.Pipelines["pipeline"].Libraries[2].Notebook.Path, + b.Config.Resources.Pipelines["pipeline"].Libraries[2].Notebook.Path, ) assert.Equal( t, "/bundle/my_python_file.py", - bundle.Config.Resources.Pipelines["pipeline"].Libraries[4].File.Path, + b.Config.Resources.Pipelines["pipeline"].Libraries[4].File.Path, ) } @@ -278,7 +278,7 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "job", "my_sql_file.sql")) touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml")) - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Workspace: config.Workspace{ @@ -342,41 +342,41 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) require.NoError(t, err) assert.Equal( t, "/bundle/job/my_python_file.py", - bundle.Config.Resources.Jobs["job"].Tasks[0].SparkPythonTask.PythonFile, + b.Config.Resources.Jobs["job"].Tasks[0].SparkPythonTask.PythonFile, ) assert.Equal( t, "/bundle/job/dist/task.jar", - bundle.Config.Resources.Jobs["job"].Tasks[1].Libraries[0].Jar, + b.Config.Resources.Jobs["job"].Tasks[1].Libraries[0].Jar, ) assert.Equal( t, "/bundle/job/my_sql_file.sql", - bundle.Config.Resources.Jobs["job"].Tasks[2].SqlTask.File.Path, + b.Config.Resources.Jobs["job"].Tasks[2].SqlTask.File.Path, ) assert.Equal( t, "/bundle/job/my_dbt_project", - bundle.Config.Resources.Jobs["job"].Tasks[3].DbtTask.ProjectDirectory, + b.Config.Resources.Jobs["job"].Tasks[3].DbtTask.ProjectDirectory, ) assert.Equal( t, "/bundle/pipeline/my_python_file.py", - bundle.Config.Resources.Pipelines["pipeline"].Libraries[0].File.Path, + b.Config.Resources.Pipelines["pipeline"].Libraries[0].File.Path, ) } func TestTranslatePathsOutsideBundleRoot(t *testing.T) { dir := t.TempDir() - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Workspace: config.Workspace{ @@ -403,14 +403,14 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) assert.ErrorContains(t, err, "is not contained in bundle root") } func TestJobNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Resources: config.Resources{ @@ -434,14 +434,14 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") } func TestJobFileDoesNotExistError(t *testing.T) { dir := t.TempDir() - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Resources: config.Resources{ @@ -465,14 +465,14 @@ func TestJobFileDoesNotExistError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) assert.EqualError(t, err, "file ./doesnt_exist.py not found") } func TestPipelineNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Resources: config.Resources{ @@ -496,14 +496,14 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") } func TestPipelineFileDoesNotExistError(t *testing.T) { dir := t.TempDir() - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Resources: config.Resources{ @@ -527,7 +527,7 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) assert.EqualError(t, err, "file ./doesnt_exist.py not found") } @@ -535,7 +535,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { dir := t.TempDir() touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Workspace: config.Workspace{ @@ -562,7 +562,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) assert.ErrorContains(t, err, `expected a file for "tasks.spark_python_task.python_file" but got a notebook`) } @@ -570,7 +570,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { dir := t.TempDir() touchEmptyFile(t, filepath.Join(dir, "my_file.py")) - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Workspace: config.Workspace{ @@ -597,7 +597,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) assert.ErrorContains(t, err, `expected a notebook for "tasks.notebook_task.notebook_path" but got a file`) } @@ -605,7 +605,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { dir := t.TempDir() touchEmptyFile(t, filepath.Join(dir, "my_file.py")) - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Workspace: config.Workspace{ @@ -632,7 +632,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) assert.ErrorContains(t, err, `expected a notebook for "libraries.notebook.path" but got a file`) } @@ -640,7 +640,7 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { dir := t.TempDir() touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: dir, Workspace: config.Workspace{ @@ -667,6 +667,6 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), bundle) + err := mutator.TranslatePaths().Apply(context.Background(), b) assert.ErrorContains(t, err, `expected a file for "libraries.file.path" but got a notebook`) } diff --git a/bundle/config/mutator/validate_git_details_test.go b/bundle/config/mutator/validate_git_details_test.go index 252964ee..eedef126 100644 --- a/bundle/config/mutator/validate_git_details_test.go +++ b/bundle/config/mutator/validate_git_details_test.go @@ -10,7 +10,7 @@ import ( ) func TestValidateGitDetailsMatchingBranches(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ Git: config.Git{ @@ -22,13 +22,13 @@ func TestValidateGitDetailsMatchingBranches(t *testing.T) { } m := ValidateGitDetails() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) assert.NoError(t, err) } func TestValidateGitDetailsNonMatchingBranches(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ Git: config.Git{ @@ -40,14 +40,14 @@ func TestValidateGitDetailsNonMatchingBranches(t *testing.T) { } m := ValidateGitDetails() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) expectedError := "not on the right Git branch:\n expected according to configuration: main\n actual: feature\nuse --force to override" assert.EqualError(t, err, expectedError) } func TestValidateGitDetailsNotUsingGit(t *testing.T) { - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ Git: config.Git{ @@ -59,7 +59,7 @@ func TestValidateGitDetailsNotUsingGit(t *testing.T) { } m := ValidateGitDetails() - err := m.Apply(context.Background(), bundle) + err := m.Apply(context.Background(), b) assert.NoError(t, err) } diff --git a/bundle/context.go b/bundle/context.go index 9287afd1..3e6ed751 100644 --- a/bundle/context.go +++ b/bundle/context.go @@ -26,9 +26,9 @@ func GetOrNil(ctx context.Context) *Bundle { // Get returns the bundle as configured on the context. // It panics if it isn't configured. func Get(ctx context.Context) *Bundle { - bundle := GetOrNil(ctx) - if bundle == nil { + b := GetOrNil(ctx) + if b == nil { panic("context not configured with bundle") } - return bundle + return b } diff --git a/bundle/deferred_test.go b/bundle/deferred_test.go index 46d5e641..f75867d6 100644 --- a/bundle/deferred_test.go +++ b/bundle/deferred_test.go @@ -29,8 +29,8 @@ func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) { cleanup := &testMutator{} deferredMutator := Defer(Seq(m1, m2, m3), cleanup) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, deferredMutator) + b := &Bundle{} + err := Apply(context.Background(), b, deferredMutator) assert.NoError(t, err) assert.Equal(t, 1, m1.applyCalled) @@ -46,8 +46,8 @@ func TestDeferredMutatorWhenFirstFails(t *testing.T) { cleanup := &testMutator{} deferredMutator := Defer(Seq(mErr, m1, m2), cleanup) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, deferredMutator) + b := &Bundle{} + err := Apply(context.Background(), b, deferredMutator) assert.ErrorContains(t, err, "mutator error occurred") assert.Equal(t, 1, mErr.applyCalled) @@ -63,8 +63,8 @@ func TestDeferredMutatorWhenMiddleOneFails(t *testing.T) { cleanup := &testMutator{} deferredMutator := Defer(Seq(m1, mErr, m2), cleanup) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, deferredMutator) + b := &Bundle{} + err := Apply(context.Background(), b, deferredMutator) assert.ErrorContains(t, err, "mutator error occurred") assert.Equal(t, 1, m1.applyCalled) @@ -80,8 +80,8 @@ func TestDeferredMutatorWhenLastOneFails(t *testing.T) { cleanup := &testMutator{} deferredMutator := Defer(Seq(m1, m2, mErr), cleanup) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, deferredMutator) + b := &Bundle{} + err := Apply(context.Background(), b, deferredMutator) assert.ErrorContains(t, err, "mutator error occurred") assert.Equal(t, 1, m1.applyCalled) @@ -97,8 +97,8 @@ func TestDeferredMutatorCombinesErrorMessages(t *testing.T) { cleanupErr := &mutatorWithError{errorMsg: "cleanup error occurred"} deferredMutator := Defer(Seq(m1, m2, mErr), cleanupErr) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, deferredMutator) + b := &Bundle{} + err := Apply(context.Background(), b, deferredMutator) assert.ErrorContains(t, err, "mutator error occurred\ncleanup error occurred") assert.Equal(t, 1, m1.applyCalled) diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index 001e7a22..266f1c43 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -27,7 +27,7 @@ func TestInitEnvironmentVariables(t *testing.T) { t.Skipf("cannot find terraform binary: %s", err) } - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Path: t.TempDir(), Bundle: config.Bundle{ @@ -43,9 +43,9 @@ func TestInitEnvironmentVariables(t *testing.T) { // TODO(pietern): create test fixture that initializes a mocked client. t.Setenv("DATABRICKS_HOST", "https://x") t.Setenv("DATABRICKS_TOKEN", "foobar") - bundle.WorkspaceClient() + b.WorkspaceClient() - err = Initialize().Apply(context.Background(), bundle) + err = Initialize().Apply(context.Background(), b) require.NoError(t, err) } diff --git a/bundle/mutator_test.go b/bundle/mutator_test.go index 127f5668..c1f3c075 100644 --- a/bundle/mutator_test.go +++ b/bundle/mutator_test.go @@ -34,8 +34,8 @@ func TestMutator(t *testing.T) { }, } - bundle := &Bundle{} - err := Apply(context.Background(), bundle, m) + b := &Bundle{} + err := Apply(context.Background(), b, m) assert.NoError(t, err) assert.Equal(t, 1, m.applyCalled) diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index 1ccdba56..b6427ccd 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -73,7 +73,7 @@ func TestGenerateBoth(t *testing.T) { func TestTransformFiltersWheelTasksOnly(t *testing.T) { trampoline := pythonTrampoline{} - bundle := &bundle.Bundle{ + b := &bundle.Bundle{ Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -106,7 +106,7 @@ func TestTransformFiltersWheelTasksOnly(t *testing.T) { }, } - tasks := trampoline.GetTasks(bundle) + tasks := trampoline.GetTasks(b) require.Len(t, tasks, 1) require.Equal(t, "job1", tasks[0].JobKey) require.Equal(t, "key1", tasks[0].Task.TaskKey) diff --git a/bundle/seq_test.go b/bundle/seq_test.go index 26ae37f8..d5c229e3 100644 --- a/bundle/seq_test.go +++ b/bundle/seq_test.go @@ -13,8 +13,8 @@ func TestSeqMutator(t *testing.T) { m3 := &testMutator{} seqMutator := Seq(m1, m2, m3) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, seqMutator) + b := &Bundle{} + err := Apply(context.Background(), b, seqMutator) assert.NoError(t, err) assert.Equal(t, 1, m1.applyCalled) @@ -29,8 +29,8 @@ func TestSeqWithDeferredMutator(t *testing.T) { m4 := &testMutator{} seqMutator := Seq(m1, Defer(m2, m3), m4) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, seqMutator) + b := &Bundle{} + err := Apply(context.Background(), b, seqMutator) assert.NoError(t, err) assert.Equal(t, 1, m1.applyCalled) @@ -46,8 +46,8 @@ func TestSeqWithErrorAndDeferredMutator(t *testing.T) { m3 := &testMutator{} seqMutator := Seq(errorMut, Defer(m1, m2), m3) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, seqMutator) + b := &Bundle{} + err := Apply(context.Background(), b, seqMutator) assert.Error(t, err) assert.Equal(t, 1, errorMut.applyCalled) @@ -63,8 +63,8 @@ func TestSeqWithErrorInsideDeferredMutator(t *testing.T) { m3 := &testMutator{} seqMutator := Seq(m1, Defer(errorMut, m2), m3) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, seqMutator) + b := &Bundle{} + err := Apply(context.Background(), b, seqMutator) assert.Error(t, err) assert.Equal(t, 1, m1.applyCalled) @@ -80,8 +80,8 @@ func TestSeqWithErrorInsideFinallyStage(t *testing.T) { m3 := &testMutator{} seqMutator := Seq(m1, Defer(m2, errorMut), m3) - bundle := &Bundle{} - err := Apply(context.Background(), bundle, seqMutator) + b := &Bundle{} + err := Apply(context.Background(), b, seqMutator) assert.Error(t, err) assert.Equal(t, 1, m1.applyCalled) From 489d6fa1b849fc02d3766b942ecc9c81f619e504 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 15 Nov 2023 15:19:18 +0100 Subject: [PATCH 240/310] Replace direct calls with `bundle.Apply` (#990) ## Changes Some test call sites called directly into the mutator's `Apply` function instead of `bundle.Apply`. Calling into `bundle.Apply` is preferred because that's where we can run pre/post logic common across all mutators. ## Tests Pass. --- bundle/config/mutator/default_target_test.go | 4 ++-- .../mutator/default_workspace_paths_test.go | 4 ++-- .../mutator/default_workspace_root_test.go | 2 +- .../mutator/expand_workspace_root_test.go | 8 +++---- .../config/mutator/override_compute_test.go | 10 ++++---- bundle/config/mutator/process_include_test.go | 2 +- .../mutator/process_root_includes_test.go | 16 ++++++------- .../mutator/process_target_mode_test.go | 12 +++++----- .../config/mutator/select_default_target.go | 4 ++-- .../mutator/select_default_target_test.go | 12 +++++----- bundle/config/mutator/select_target_test.go | 4 ++-- bundle/config/mutator/set_variables_test.go | 2 +- bundle/config/mutator/translate_paths_test.go | 24 +++++++++---------- .../mutator/validate_git_details_test.go | 6 ++--- bundle/deploy/metadata/compute_test.go | 2 +- bundle/deploy/terraform/init_test.go | 2 +- bundle/tests/bundle/wheel_test.go | 16 ++++++------- 17 files changed, 65 insertions(+), 65 deletions(-) diff --git a/bundle/config/mutator/default_target_test.go b/bundle/config/mutator/default_target_test.go index 9214c4ef..61a5a013 100644 --- a/bundle/config/mutator/default_target_test.go +++ b/bundle/config/mutator/default_target_test.go @@ -13,7 +13,7 @@ import ( func TestDefaultTarget(t *testing.T) { b := &bundle.Bundle{} - err := mutator.DefineDefaultTarget().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) require.NoError(t, err) env, ok := b.Config.Targets["default"] assert.True(t, ok) @@ -28,7 +28,7 @@ func TestDefaultTargetAlreadySpecified(t *testing.T) { }, }, } - err := mutator.DefineDefaultTarget().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) require.NoError(t, err) _, ok := b.Config.Targets["default"] assert.False(t, ok) diff --git a/bundle/config/mutator/default_workspace_paths_test.go b/bundle/config/mutator/default_workspace_paths_test.go index 56b3c74c..1ad0ca78 100644 --- a/bundle/config/mutator/default_workspace_paths_test.go +++ b/bundle/config/mutator/default_workspace_paths_test.go @@ -19,7 +19,7 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) { }, }, } - err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) require.NoError(t, err) assert.Equal(t, "/files", b.Config.Workspace.FilePath) assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath) @@ -37,7 +37,7 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) { }, }, } - err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) require.NoError(t, err) assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath) assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath) diff --git a/bundle/config/mutator/default_workspace_root_test.go b/bundle/config/mutator/default_workspace_root_test.go index ad921f6f..9dd549a3 100644 --- a/bundle/config/mutator/default_workspace_root_test.go +++ b/bundle/config/mutator/default_workspace_root_test.go @@ -20,7 +20,7 @@ func TestDefaultWorkspaceRoot(t *testing.T) { }, }, } - err := mutator.DefineDefaultWorkspaceRoot().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot()) require.NoError(t, err) assert.Equal(t, "~/.bundle/name/environment", b.Config.Workspace.RootPath) } diff --git a/bundle/config/mutator/expand_workspace_root_test.go b/bundle/config/mutator/expand_workspace_root_test.go index 217c07c5..17ee0650 100644 --- a/bundle/config/mutator/expand_workspace_root_test.go +++ b/bundle/config/mutator/expand_workspace_root_test.go @@ -25,7 +25,7 @@ func TestExpandWorkspaceRoot(t *testing.T) { }, }, } - err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) require.NoError(t, err) assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath) } @@ -43,7 +43,7 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) { }, }, } - err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) require.NoError(t, err) assert.Equal(t, "/Users/charly@doe.com/foo", b.Config.Workspace.RootPath) } @@ -60,7 +60,7 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) { }, }, } - err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) require.Error(t, err) } @@ -72,6 +72,6 @@ func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) { }, }, } - err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) require.Error(t, err) } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 70d7f238..4c5d4427 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -47,7 +47,7 @@ func TestOverrideDevelopment(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) @@ -83,7 +83,7 @@ func TestOverrideDevelopmentEnv(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } @@ -108,7 +108,7 @@ func TestOverridePipelineTask(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } @@ -138,7 +138,7 @@ func TestOverrideProduction(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) require.Error(t, err) } @@ -165,6 +165,6 @@ func TestOverrideProductionEnv(t *testing.T) { } m := mutator.OverrideCompute() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) } diff --git a/bundle/config/mutator/process_include_test.go b/bundle/config/mutator/process_include_test.go index eb1cb291..7ca5d198 100644 --- a/bundle/config/mutator/process_include_test.go +++ b/bundle/config/mutator/process_include_test.go @@ -32,7 +32,7 @@ func TestProcessInclude(t *testing.T) { f.Close() assert.Equal(t, "foo", b.Config.Workspace.Host) - err = mutator.ProcessInclude(fullPath, relPath).Apply(context.Background(), b) + err = bundle.Apply(context.Background(), b, mutator.ProcessInclude(fullPath, relPath)) require.NoError(t, err) assert.Equal(t, "bar", b.Config.Workspace.Host) } diff --git a/bundle/config/mutator/process_root_includes_test.go b/bundle/config/mutator/process_root_includes_test.go index 7a0b9e65..88a6c743 100644 --- a/bundle/config/mutator/process_root_includes_test.go +++ b/bundle/config/mutator/process_root_includes_test.go @@ -29,7 +29,7 @@ func TestProcessRootIncludesEmpty(t *testing.T) { Path: ".", }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, err) } @@ -49,7 +49,7 @@ func TestProcessRootIncludesAbs(t *testing.T) { }, }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.Error(t, err) assert.Contains(t, err.Error(), "must be relative paths") } @@ -68,7 +68,7 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) { touch(t, b.Config.Path, "a.yml") touch(t, b.Config.Path, "b.yml") - err := mutator.ProcessRootIncludes().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, err) assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include) @@ -88,7 +88,7 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) { touch(t, b.Config.Path, "a1.yml") touch(t, b.Config.Path, "b1.yml") - err := mutator.ProcessRootIncludes().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, err) assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include) @@ -107,7 +107,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) { touch(t, b.Config.Path, "a.yml") - err := mutator.ProcessRootIncludes().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, err) assert.Equal(t, []string{"a.yml"}, b.Config.Include) } @@ -121,7 +121,7 @@ func TestProcessRootIncludesNotExists(t *testing.T) { }, }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.Error(t, err) assert.Contains(t, err.Error(), "notexist.yml defined in 'include' section does not match any files") } @@ -138,7 +138,7 @@ func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, err) assert.Contains(t, b.Config.Include, testYamlName) } @@ -161,7 +161,7 @@ func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) { }, } - err := mutator.ProcessRootIncludes().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, err) assert.Equal(t, []string{testYamlName}, b.Config.Include) } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 6ce3fcdf..0e360263 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -92,7 +92,7 @@ func TestProcessTargetModeDevelopment(t *testing.T) { b := mockBundle(config.Development) m := ProcessTargetMode() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) // Job 1 @@ -135,7 +135,7 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := ProcessTargetMode().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, ProcessTargetMode()) require.NoError(t, err) // Assert that tag normalization took place. @@ -149,7 +149,7 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := ProcessTargetMode().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, ProcessTargetMode()) require.NoError(t, err) // Assert that tag normalization took place (Azure allows more characters than AWS). @@ -163,7 +163,7 @@ func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := ProcessTargetMode().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, ProcessTargetMode()) require.NoError(t, err) // Assert that tag normalization took place. @@ -174,7 +174,7 @@ func TestProcessTargetModeDefault(t *testing.T) { b := mockBundle("") m := ProcessTargetMode() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name) assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) @@ -257,7 +257,7 @@ func TestAllResourcesRenamed(t *testing.T) { resources := reflect.ValueOf(b.Config.Resources) m := ProcessTargetMode() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) for i := 0; i < resources.NumField(); i++ { diff --git a/bundle/config/mutator/select_default_target.go b/bundle/config/mutator/select_default_target.go index 8abcfe4f..be5046f8 100644 --- a/bundle/config/mutator/select_default_target.go +++ b/bundle/config/mutator/select_default_target.go @@ -28,7 +28,7 @@ func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error // One target means there's only one default. names := maps.Keys(b.Config.Targets) if len(names) == 1 { - return SelectTarget(names[0]).Apply(ctx, b) + return bundle.Apply(ctx, b, SelectTarget(names[0])) } // Multiple targets means we look for the `default` flag. @@ -50,5 +50,5 @@ func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error } // One default remaining. - return SelectTarget(defaults[0]).Apply(ctx, b) + return bundle.Apply(ctx, b, SelectTarget(defaults[0])) } diff --git a/bundle/config/mutator/select_default_target_test.go b/bundle/config/mutator/select_default_target_test.go index cb595f56..1c2e451f 100644 --- a/bundle/config/mutator/select_default_target_test.go +++ b/bundle/config/mutator/select_default_target_test.go @@ -16,7 +16,7 @@ func TestSelectDefaultTargetNoTargets(t *testing.T) { Targets: map[string]*config.Target{}, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) assert.ErrorContains(t, err, "no targets defined") } @@ -28,7 +28,7 @@ func TestSelectDefaultTargetSingleTargets(t *testing.T) { }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) assert.NoError(t, err) assert.Equal(t, "foo", b.Config.Bundle.Target) } @@ -43,7 +43,7 @@ func TestSelectDefaultTargetNoDefaults(t *testing.T) { }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) assert.ErrorContains(t, err, "please specify target") } @@ -56,7 +56,7 @@ func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) { }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) assert.ErrorContains(t, err, "please specify target") } @@ -70,7 +70,7 @@ func TestSelectDefaultTargetMultipleDefaults(t *testing.T) { }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) assert.ErrorContains(t, err, "multiple targets are marked as default") } @@ -84,7 +84,7 @@ func TestSelectDefaultTargetSingleDefault(t *testing.T) { }, }, } - err := mutator.SelectDefaultTarget().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) assert.NoError(t, err) assert.Equal(t, "bar", b.Config.Bundle.Target) } diff --git a/bundle/config/mutator/select_target_test.go b/bundle/config/mutator/select_target_test.go index 6fae0ca2..20467270 100644 --- a/bundle/config/mutator/select_target_test.go +++ b/bundle/config/mutator/select_target_test.go @@ -26,7 +26,7 @@ func TestSelectTarget(t *testing.T) { }, }, } - err := mutator.SelectTarget("default").Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.SelectTarget("default")) require.NoError(t, err) assert.Equal(t, "bar", b.Config.Workspace.Host) } @@ -39,6 +39,6 @@ func TestSelectTargetNotFound(t *testing.T) { }, }, } - err := mutator.SelectTarget("doesnt-exist").Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.SelectTarget("doesnt-exist")) require.Error(t, err, "no targets defined") } diff --git a/bundle/config/mutator/set_variables_test.go b/bundle/config/mutator/set_variables_test.go index c4500413..15a98e5c 100644 --- a/bundle/config/mutator/set_variables_test.go +++ b/bundle/config/mutator/set_variables_test.go @@ -108,7 +108,7 @@ func TestSetVariablesMutator(t *testing.T) { t.Setenv("BUNDLE_VAR_b", "env-var-b") - err := SetVariables().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, SetVariables()) require.NoError(t, err) assert.Equal(t, "default-a", *b.Config.Variables["a"].Value) assert.Equal(t, "env-var-b", *b.Config.Variables["b"].Value) diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 321b73dc..41d031ca 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -80,7 +80,7 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) require.NoError(t, err) assert.Equal( @@ -207,7 +207,7 @@ func TestTranslatePaths(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) require.NoError(t, err) // Assert that the path in the tasks now refer to the artifact. @@ -342,7 +342,7 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) require.NoError(t, err) assert.Equal( @@ -403,7 +403,7 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, "is not contained in bundle root") } @@ -434,7 +434,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") } @@ -465,7 +465,7 @@ func TestJobFileDoesNotExistError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.EqualError(t, err, "file ./doesnt_exist.py not found") } @@ -496,7 +496,7 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") } @@ -527,7 +527,7 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.EqualError(t, err, "file ./doesnt_exist.py not found") } @@ -562,7 +562,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, `expected a file for "tasks.spark_python_task.python_file" but got a notebook`) } @@ -597,7 +597,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, `expected a notebook for "tasks.notebook_task.notebook_path" but got a file`) } @@ -632,7 +632,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, `expected a notebook for "libraries.notebook.path" but got a file`) } @@ -667,6 +667,6 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { }, } - err := mutator.TranslatePaths().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, `expected a file for "libraries.file.path" but got a notebook`) } diff --git a/bundle/config/mutator/validate_git_details_test.go b/bundle/config/mutator/validate_git_details_test.go index eedef126..f207d9cf 100644 --- a/bundle/config/mutator/validate_git_details_test.go +++ b/bundle/config/mutator/validate_git_details_test.go @@ -22,7 +22,7 @@ func TestValidateGitDetailsMatchingBranches(t *testing.T) { } m := ValidateGitDetails() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) assert.NoError(t, err) } @@ -40,7 +40,7 @@ func TestValidateGitDetailsNonMatchingBranches(t *testing.T) { } m := ValidateGitDetails() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) expectedError := "not on the right Git branch:\n expected according to configuration: main\n actual: feature\nuse --force to override" assert.EqualError(t, err, expectedError) @@ -59,7 +59,7 @@ func TestValidateGitDetailsNotUsingGit(t *testing.T) { } m := ValidateGitDetails() - err := m.Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, m) assert.NoError(t, err) } diff --git a/bundle/deploy/metadata/compute_test.go b/bundle/deploy/metadata/compute_test.go index ffa352d0..c3cb029d 100644 --- a/bundle/deploy/metadata/compute_test.go +++ b/bundle/deploy/metadata/compute_test.go @@ -93,7 +93,7 @@ func TestComputeMetadataMutator(t *testing.T) { }, } - err := Compute().Apply(context.Background(), b) + err := bundle.Apply(context.Background(), b, Compute()) require.NoError(t, err) assert.Equal(t, expectedMetadata, b.Metadata) diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index 266f1c43..a3a9e0e4 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -45,7 +45,7 @@ func TestInitEnvironmentVariables(t *testing.T) { t.Setenv("DATABRICKS_TOKEN", "foobar") b.WorkspaceClient() - err = Initialize().Apply(context.Background(), b) + err = bundle.Apply(context.Background(), b, Initialize()) require.NoError(t, err) } diff --git a/bundle/tests/bundle/wheel_test.go b/bundle/tests/bundle/wheel_test.go index f7f0e75e..57ecb54b 100644 --- a/bundle/tests/bundle/wheel_test.go +++ b/bundle/tests/bundle/wheel_test.go @@ -17,7 +17,7 @@ func TestBundlePythonWheelBuild(t *testing.T) { require.NoError(t, err) m := phases.Build() - err = m.Apply(ctx, b) + err = bundle.Apply(ctx, b, m) require.NoError(t, err) matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") @@ -25,7 +25,7 @@ func TestBundlePythonWheelBuild(t *testing.T) { require.Equal(t, 1, len(matches)) match := libraries.MatchWithArtifacts() - err = match.Apply(ctx, b) + err = bundle.Apply(ctx, b, match) require.NoError(t, err) } @@ -35,7 +35,7 @@ func TestBundlePythonWheelBuildAutoDetect(t *testing.T) { require.NoError(t, err) m := phases.Build() - err = m.Apply(ctx, b) + err = bundle.Apply(ctx, b, m) require.NoError(t, err) matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") @@ -43,7 +43,7 @@ func TestBundlePythonWheelBuildAutoDetect(t *testing.T) { require.Equal(t, 1, len(matches)) match := libraries.MatchWithArtifacts() - err = match.Apply(ctx, b) + err = bundle.Apply(ctx, b, match) require.NoError(t, err) } @@ -53,11 +53,11 @@ func TestBundlePythonWheelWithDBFSLib(t *testing.T) { require.NoError(t, err) m := phases.Build() - err = m.Apply(ctx, b) + err = bundle.Apply(ctx, b, m) require.NoError(t, err) match := libraries.MatchWithArtifacts() - err = match.Apply(ctx, b) + err = bundle.Apply(ctx, b, match) require.NoError(t, err) } @@ -67,11 +67,11 @@ func TestBundlePythonWheelBuildNoBuildJustUpload(t *testing.T) { require.NoError(t, err) m := phases.Build() - err = m.Apply(ctx, b) + err = bundle.Apply(ctx, b, m) require.NoError(t, err) match := libraries.MatchWithArtifacts() - err = match.Apply(ctx, b) + err = bundle.Apply(ctx, b, match) require.ErrorContains(t, err, "./non-existing/*.whl") require.NotZero(t, len(b.Config.Artifacts)) From 1b7558cd7d3ba68a0d6317a7505c182dc76adae0 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Fri, 17 Nov 2023 13:47:37 +0100 Subject: [PATCH 241/310] Add `databricks labs` command group (#914) ## Command group image ## Installed versions image ## Project commands image ## Installer hook ![image](https://github.com/databricks/cli/assets/259697/3ce0d355-039a-445f-bff7-6dfc1a2e3288) ## Update notifications ![image](https://github.com/databricks/cli/assets/259697/10724627-3606-49e1-9722-00ae37afed12) # Downstream work - https://github.com/databrickslabs/ucx/pull/517 - https://github.com/databrickslabs/dlt-meta/pull/19 - https://github.com/databrickslabs/discoverx/pull/84 --- cmd/cmd.go | 2 + cmd/labs/CODEOWNERS | 1 + cmd/labs/clear_cache.go | 33 ++ cmd/labs/github/github.go | 66 +++ cmd/labs/github/ref.go | 20 + cmd/labs/github/ref_test.go | 48 ++ cmd/labs/github/releases.go | 61 +++ cmd/labs/github/releases_test.go | 34 ++ cmd/labs/github/repositories.go | 59 +++ cmd/labs/github/repositories_test.go | 30 ++ cmd/labs/install.go | 21 + cmd/labs/installed.go | 57 +++ cmd/labs/installed_test.go | 19 + cmd/labs/labs.go | 39 ++ cmd/labs/list.go | 62 +++ cmd/labs/list_test.go | 19 + cmd/labs/localcache/jsonfile.go | 109 +++++ cmd/labs/localcache/jsonfile_test.go | 132 ++++++ cmd/labs/project/command_test.go | 69 +++ cmd/labs/project/entrypoint.go | 250 +++++++++++ cmd/labs/project/fetcher.go | 141 ++++++ cmd/labs/project/helpers.go | 35 ++ cmd/labs/project/init_test.go | 13 + cmd/labs/project/installed.go | 58 +++ cmd/labs/project/installed_test.go | 19 + cmd/labs/project/installer.go | 286 ++++++++++++ cmd/labs/project/installer_test.go | 415 ++++++++++++++++++ cmd/labs/project/login.go | 117 +++++ cmd/labs/project/project.go | 352 +++++++++++++++ cmd/labs/project/project_test.go | 22 + cmd/labs/project/proxy.go | 146 ++++++ cmd/labs/project/schema.json | 126 ++++++ cmd/labs/project/testdata/.gitignore | 1 + .../databrickslabs-blueprint-releases.json | 8 + .../labs/blueprint/config/login.json | 4 + .../.databricks/labs/blueprint/lib/install.py | 1 + .../.databricks/labs/blueprint/lib/labs.yml | 33 ++ .../.databricks/labs/blueprint/lib/main.py | 27 ++ .../labs/blueprint/lib/pyproject.toml | 11 + .../blueprint/state/other-state-file.json | 1 + .../labs/blueprint/state/venv/pyvenv.cfg | 0 .../labs/blueprint/state/version.json | 4 + .../labs/databrickslabs-repositories.json | 37 ++ .../testdata/installed-in-home/.databrickscfg | 9 + cmd/labs/show.go | 57 +++ cmd/labs/uninstall.go | 39 ++ cmd/labs/unpack/zipball.go | 64 +++ cmd/labs/upgrade.go | 21 + 48 files changed, 3178 insertions(+) create mode 100644 cmd/labs/CODEOWNERS create mode 100644 cmd/labs/clear_cache.go create mode 100644 cmd/labs/github/github.go create mode 100644 cmd/labs/github/ref.go create mode 100644 cmd/labs/github/ref_test.go create mode 100644 cmd/labs/github/releases.go create mode 100644 cmd/labs/github/releases_test.go create mode 100644 cmd/labs/github/repositories.go create mode 100644 cmd/labs/github/repositories_test.go create mode 100644 cmd/labs/install.go create mode 100644 cmd/labs/installed.go create mode 100644 cmd/labs/installed_test.go create mode 100644 cmd/labs/labs.go create mode 100644 cmd/labs/list.go create mode 100644 cmd/labs/list_test.go create mode 100644 cmd/labs/localcache/jsonfile.go create mode 100644 cmd/labs/localcache/jsonfile_test.go create mode 100644 cmd/labs/project/command_test.go create mode 100644 cmd/labs/project/entrypoint.go create mode 100644 cmd/labs/project/fetcher.go create mode 100644 cmd/labs/project/helpers.go create mode 100644 cmd/labs/project/init_test.go create mode 100644 cmd/labs/project/installed.go create mode 100644 cmd/labs/project/installed_test.go create mode 100644 cmd/labs/project/installer.go create mode 100644 cmd/labs/project/installer_test.go create mode 100644 cmd/labs/project/login.go create mode 100644 cmd/labs/project/project.go create mode 100644 cmd/labs/project/project_test.go create mode 100644 cmd/labs/project/proxy.go create mode 100644 cmd/labs/project/schema.json create mode 100644 cmd/labs/project/testdata/.gitignore create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/cache/databrickslabs-blueprint-releases.json create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/config/login.json create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/install.py create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/main.py create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/pyproject.toml create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/other-state-file.json create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/venv/pyvenv.cfg create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/version.json create mode 100644 cmd/labs/project/testdata/installed-in-home/.databricks/labs/databrickslabs-repositories.json create mode 100644 cmd/labs/project/testdata/installed-in-home/.databrickscfg create mode 100644 cmd/labs/show.go create mode 100644 cmd/labs/uninstall.go create mode 100644 cmd/labs/unpack/zipball.go create mode 100644 cmd/labs/upgrade.go diff --git a/cmd/cmd.go b/cmd/cmd.go index 6dd0f6e2..5d835409 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/cmd/bundle" "github.com/databricks/cli/cmd/configure" "github.com/databricks/cli/cmd/fs" + "github.com/databricks/cli/cmd/labs" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/cmd/sync" "github.com/databricks/cli/cmd/version" @@ -70,6 +71,7 @@ func New(ctx context.Context) *cobra.Command { cli.AddCommand(bundle.New()) cli.AddCommand(configure.New()) cli.AddCommand(fs.New()) + cli.AddCommand(labs.New(ctx)) cli.AddCommand(sync.New()) cli.AddCommand(version.New()) diff --git a/cmd/labs/CODEOWNERS b/cmd/labs/CODEOWNERS new file mode 100644 index 00000000..cc93a75e --- /dev/null +++ b/cmd/labs/CODEOWNERS @@ -0,0 +1 @@ +* @nfx diff --git a/cmd/labs/clear_cache.go b/cmd/labs/clear_cache.go new file mode 100644 index 00000000..e2f531cf --- /dev/null +++ b/cmd/labs/clear_cache.go @@ -0,0 +1,33 @@ +package labs + +import ( + "log/slog" + "os" + + "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/libs/log" + "github.com/spf13/cobra" +) + +func newClearCacheCommand() *cobra.Command { + return &cobra.Command{ + Use: "clear-cache", + Short: "Clears cache entries from everywhere relevant", + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + projects, err := project.Installed(ctx) + if err != nil { + return err + } + _ = os.Remove(project.PathInLabs(ctx, "databrickslabs-repositories.json")) + logger := log.GetLogger(ctx) + for _, prj := range projects { + logger.Info("clearing labs project cache", slog.String("name", prj.Name)) + _ = os.RemoveAll(prj.CacheDir(ctx)) + // recreating empty cache folder for downstream apps to work normally + _ = prj.EnsureFoldersExist(ctx) + } + return nil + }, + } +} diff --git a/cmd/labs/github/github.go b/cmd/labs/github/github.go new file mode 100644 index 00000000..1dd9fae5 --- /dev/null +++ b/cmd/labs/github/github.go @@ -0,0 +1,66 @@ +package github + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/databricks/cli/libs/log" +) + +const gitHubAPI = "https://api.github.com" +const gitHubUserContent = "https://raw.githubusercontent.com" + +// Placeholders to use as unique keys in context.Context. +var apiOverride int +var userContentOverride int + +func WithApiOverride(ctx context.Context, override string) context.Context { + return context.WithValue(ctx, &apiOverride, override) +} + +func WithUserContentOverride(ctx context.Context, override string) context.Context { + return context.WithValue(ctx, &userContentOverride, override) +} + +var ErrNotFound = errors.New("not found") + +func getBytes(ctx context.Context, method, url string, body io.Reader) ([]byte, error) { + ao, ok := ctx.Value(&apiOverride).(string) + if ok { + url = strings.Replace(url, gitHubAPI, ao, 1) + } + uco, ok := ctx.Value(&userContentOverride).(string) + if ok { + url = strings.Replace(url, gitHubUserContent, uco, 1) + } + log.Tracef(ctx, "%s %s", method, url) + req, err := http.NewRequestWithContext(ctx, "GET", url, body) + if err != nil { + return nil, err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode == 404 { + return nil, ErrNotFound + } + if res.StatusCode >= 400 { + return nil, fmt.Errorf("github request failed: %s", res.Status) + } + defer res.Body.Close() + return io.ReadAll(res.Body) +} + +func httpGetAndUnmarshal(ctx context.Context, url string, response any) error { + raw, err := getBytes(ctx, "GET", url, nil) + if err != nil { + return err + } + return json.Unmarshal(raw, response) +} diff --git a/cmd/labs/github/ref.go b/cmd/labs/github/ref.go new file mode 100644 index 00000000..1975f6fb --- /dev/null +++ b/cmd/labs/github/ref.go @@ -0,0 +1,20 @@ +package github + +import ( + "context" + "fmt" + + "github.com/databricks/cli/libs/log" +) + +func ReadFileFromRef(ctx context.Context, org, repo, ref, file string) ([]byte, error) { + log.Debugf(ctx, "Reading %s@%s from %s/%s", file, ref, org, repo) + url := fmt.Sprintf("%s/%s/%s/%s/%s", gitHubUserContent, org, repo, ref, file) + return getBytes(ctx, "GET", url, nil) +} + +func DownloadZipball(ctx context.Context, org, repo, ref string) ([]byte, error) { + log.Debugf(ctx, "Downloading zipball for %s from %s/%s", ref, org, repo) + zipballURL := fmt.Sprintf("%s/repos/%s/%s/zipball/%s", gitHubAPI, org, repo, ref) + return getBytes(ctx, "GET", zipballURL, nil) +} diff --git a/cmd/labs/github/ref_test.go b/cmd/labs/github/ref_test.go new file mode 100644 index 00000000..2a9ffcc5 --- /dev/null +++ b/cmd/labs/github/ref_test.go @@ -0,0 +1,48 @@ +package github + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFileFromRef(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/databrickslabs/ucx/main/README.md" { + w.Write([]byte(`abc`)) + return + } + t.Logf("Requested: %s", r.URL.Path) + panic("stub required") + })) + defer server.Close() + + ctx := context.Background() + ctx = WithUserContentOverride(ctx, server.URL) + + raw, err := ReadFileFromRef(ctx, "databrickslabs", "ucx", "main", "README.md") + assert.NoError(t, err) + assert.Equal(t, []byte("abc"), raw) +} + +func TestDownloadZipball(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/repos/databrickslabs/ucx/zipball/main" { + w.Write([]byte(`abc`)) + return + } + t.Logf("Requested: %s", r.URL.Path) + panic("stub required") + })) + defer server.Close() + + ctx := context.Background() + ctx = WithApiOverride(ctx, server.URL) + + raw, err := DownloadZipball(ctx, "databrickslabs", "ucx", "main") + assert.NoError(t, err) + assert.Equal(t, []byte("abc"), raw) +} diff --git a/cmd/labs/github/releases.go b/cmd/labs/github/releases.go new file mode 100644 index 00000000..0dae0317 --- /dev/null +++ b/cmd/labs/github/releases.go @@ -0,0 +1,61 @@ +package github + +import ( + "context" + "fmt" + "time" + + "github.com/databricks/cli/cmd/labs/localcache" + "github.com/databricks/cli/libs/log" +) + +const cacheTTL = 1 * time.Hour + +// NewReleaseCache creates a release cache for a repository in the GitHub org. +// Caller has to provide different cache directories for different repositories. +func NewReleaseCache(org, repo, cacheDir string) *ReleaseCache { + pattern := fmt.Sprintf("%s-%s-releases", org, repo) + return &ReleaseCache{ + cache: localcache.NewLocalCache[Versions](cacheDir, pattern, cacheTTL), + Org: org, + Repo: repo, + } +} + +type ReleaseCache struct { + cache localcache.LocalCache[Versions] + Org string + Repo string +} + +func (r *ReleaseCache) Load(ctx context.Context) (Versions, error) { + return r.cache.Load(ctx, func() (Versions, error) { + return getVersions(ctx, r.Org, r.Repo) + }) +} + +// getVersions is considered to be a private API, as we want the usage go through a cache +func getVersions(ctx context.Context, org, repo string) (Versions, error) { + var releases Versions + log.Debugf(ctx, "Fetching latest releases for %s/%s from GitHub API", org, repo) + url := fmt.Sprintf("%s/repos/%s/%s/releases", gitHubAPI, org, repo) + err := httpGetAndUnmarshal(ctx, url, &releases) + return releases, err +} + +type ghAsset struct { + Name string `json:"name"` + ContentType string `json:"content_type"` + Size int `json:"size"` + BrowserDownloadURL string `json:"browser_download_url"` +} + +type Release struct { + Version string `json:"tag_name"` + CreatedAt time.Time `json:"created_at"` + PublishedAt time.Time `json:"published_at"` + ZipballURL string `json:"zipball_url"` + Assets []ghAsset `json:"assets"` +} + +type Versions []Release diff --git a/cmd/labs/github/releases_test.go b/cmd/labs/github/releases_test.go new file mode 100644 index 00000000..ea24a1e2 --- /dev/null +++ b/cmd/labs/github/releases_test.go @@ -0,0 +1,34 @@ +package github + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLoadsReleasesForCLI(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/repos/databricks/cli/releases" { + w.Write([]byte(`[{"tag_name": "v1.2.3"}, {"tag_name": "v1.2.2"}]`)) + return + } + t.Logf("Requested: %s", r.URL.Path) + panic("stub required") + })) + defer server.Close() + + ctx := context.Background() + ctx = WithApiOverride(ctx, server.URL) + + r := NewReleaseCache("databricks", "cli", t.TempDir()) + all, err := r.Load(ctx) + assert.NoError(t, err) + assert.Len(t, all, 2) + + // no call is made + _, err = r.Load(ctx) + assert.NoError(t, err) +} diff --git a/cmd/labs/github/repositories.go b/cmd/labs/github/repositories.go new file mode 100644 index 00000000..850cdb1c --- /dev/null +++ b/cmd/labs/github/repositories.go @@ -0,0 +1,59 @@ +package github + +import ( + "context" + "fmt" + "time" + + "github.com/databricks/cli/cmd/labs/localcache" + "github.com/databricks/cli/libs/log" +) + +const repositoryCacheTTL = 24 * time.Hour + +func NewRepositoryCache(org, cacheDir string) *repositoryCache { + filename := fmt.Sprintf("%s-repositories", org) + return &repositoryCache{ + cache: localcache.NewLocalCache[Repositories](cacheDir, filename, repositoryCacheTTL), + Org: org, + } +} + +type repositoryCache struct { + cache localcache.LocalCache[Repositories] + Org string +} + +func (r *repositoryCache) Load(ctx context.Context) (Repositories, error) { + return r.cache.Load(ctx, func() (Repositories, error) { + return getRepositories(ctx, r.Org) + }) +} + +// getRepositories is considered to be privata API, as we want the usage to go through a cache +func getRepositories(ctx context.Context, org string) (Repositories, error) { + var repos Repositories + log.Debugf(ctx, "Loading repositories for %s from GitHub API", org) + url := fmt.Sprintf("%s/users/%s/repos", gitHubAPI, org) + err := httpGetAndUnmarshal(ctx, url, &repos) + return repos, err +} + +type Repositories []ghRepo + +type ghRepo struct { + Name string `json:"name"` + Description string `json:"description"` + Langauge string `json:"language"` + DefaultBranch string `json:"default_branch"` + Stars int `json:"stargazers_count"` + IsFork bool `json:"fork"` + IsArchived bool `json:"archived"` + Topics []string `json:"topics"` + HtmlURL string `json:"html_url"` + CloneURL string `json:"clone_url"` + SshURL string `json:"ssh_url"` + License struct { + Name string `json:"name"` + } `json:"license"` +} diff --git a/cmd/labs/github/repositories_test.go b/cmd/labs/github/repositories_test.go new file mode 100644 index 00000000..4f2fef3e --- /dev/null +++ b/cmd/labs/github/repositories_test.go @@ -0,0 +1,30 @@ +package github + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRepositories(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/users/databrickslabs/repos" { + w.Write([]byte(`[{"name": "x"}]`)) + return + } + t.Logf("Requested: %s", r.URL.Path) + panic("stub required") + })) + defer server.Close() + + ctx := context.Background() + ctx = WithApiOverride(ctx, server.URL) + + r := NewRepositoryCache("databrickslabs", t.TempDir()) + all, err := r.Load(ctx) + assert.NoError(t, err) + assert.True(t, len(all) > 0) +} diff --git a/cmd/labs/install.go b/cmd/labs/install.go new file mode 100644 index 00000000..31db4389 --- /dev/null +++ b/cmd/labs/install.go @@ -0,0 +1,21 @@ +package labs + +import ( + "github.com/databricks/cli/cmd/labs/project" + "github.com/spf13/cobra" +) + +func newInstallCommand() *cobra.Command { + return &cobra.Command{ + Use: "install NAME", + Args: cobra.ExactArgs(1), + Short: "Installs project", + RunE: func(cmd *cobra.Command, args []string) error { + inst, err := project.NewInstaller(cmd, args[0]) + if err != nil { + return err + } + return inst.Install(cmd.Context()) + }, + } +} diff --git a/cmd/labs/installed.go b/cmd/labs/installed.go new file mode 100644 index 00000000..e4249c9f --- /dev/null +++ b/cmd/labs/installed.go @@ -0,0 +1,57 @@ +package labs + +import ( + "fmt" + + "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" +) + +func newInstalledCommand() *cobra.Command { + return &cobra.Command{ + Use: "installed", + Short: "List all installed labs", + Annotations: map[string]string{ + "template": cmdio.Heredoc(` + Name Description Version + {{range .Projects}}{{.Name}} {{.Description}} {{.Version}} + {{end}} + `), + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + type installedProject struct { + Name string `json:"name"` + Description string `json:"description"` + Version string `json:"version"` + } + projects, err := project.Installed(ctx) + if err != nil { + return err + } + var info struct { + Projects []installedProject `json:"projects"` + } + for _, v := range projects { + description := v.Description + if len(description) > 50 { + description = description[:50] + "..." + } + version, err := v.InstalledVersion(ctx) + if err != nil { + return fmt.Errorf("%s: %w", v.Name, err) + } + info.Projects = append(info.Projects, installedProject{ + Name: v.Name, + Description: description, + Version: version.Version, + }) + } + if len(info.Projects) == 0 { + return fmt.Errorf("no projects installed") + } + return cmdio.Render(ctx, info) + }, + } +} diff --git a/cmd/labs/installed_test.go b/cmd/labs/installed_test.go new file mode 100644 index 00000000..00692f79 --- /dev/null +++ b/cmd/labs/installed_test.go @@ -0,0 +1,19 @@ +package labs_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/libs/env" +) + +func TestListsInstalledProjects(t *testing.T) { + ctx := context.Background() + ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") + r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "installed") + r.RunAndExpectOutput(` + Name Description Version + blueprint Blueprint Project v0.3.15 + `) +} diff --git a/cmd/labs/labs.go b/cmd/labs/labs.go new file mode 100644 index 00000000..cccf8ac4 --- /dev/null +++ b/cmd/labs/labs.go @@ -0,0 +1,39 @@ +package labs + +import ( + "context" + + "github.com/databricks/cli/cmd/labs/project" + "github.com/spf13/cobra" +) + +func New(ctx context.Context) *cobra.Command { + cmd := &cobra.Command{ + Use: "labs", + Short: "Manage Databricks Labs installations", + Long: `Manage experimental Databricks Labs apps`, + } + + cmd.AddGroup(&cobra.Group{ + ID: "labs", + Title: "Installed Databricks Labs", + }) + + cmd.AddCommand( + newListCommand(), + newInstallCommand(), + newUpgradeCommand(), + newInstalledCommand(), + newClearCacheCommand(), + newUninstallCommand(), + newShowCommand(), + ) + all, err := project.Installed(ctx) + if err != nil { + panic(err) + } + for _, v := range all { + v.Register(cmd) + } + return cmd +} diff --git a/cmd/labs/list.go b/cmd/labs/list.go new file mode 100644 index 00000000..07cc180c --- /dev/null +++ b/cmd/labs/list.go @@ -0,0 +1,62 @@ +package labs + +import ( + "context" + + "github.com/databricks/cli/cmd/labs/github" + "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" +) + +type labsMeta struct { + Name string `json:"name"` + Description string `json:"description"` + License string `json:"license"` +} + +func allRepos(ctx context.Context) (github.Repositories, error) { + cacheDir := project.PathInLabs(ctx) + cache := github.NewRepositoryCache("databrickslabs", cacheDir) + return cache.Load(ctx) +} + +func newListCommand() *cobra.Command { + return &cobra.Command{ + Use: "list", + Short: "List all labs", + Annotations: map[string]string{ + "template": cmdio.Heredoc(` + Name Description + {{range .}}{{.Name}} {{.Description}} + {{end}} + `), + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + repositories, err := allRepos(ctx) + if err != nil { + return err + } + info := []labsMeta{} + for _, v := range repositories { + if v.IsArchived { + continue + } + if v.IsFork { + continue + } + description := v.Description + if len(description) > 50 { + description = description[:50] + "..." + } + info = append(info, labsMeta{ + Name: v.Name, + Description: description, + License: v.License.Name, + }) + } + return cmdio.Render(ctx, info) + }, + } +} diff --git a/cmd/labs/list_test.go b/cmd/labs/list_test.go new file mode 100644 index 00000000..925b984a --- /dev/null +++ b/cmd/labs/list_test.go @@ -0,0 +1,19 @@ +package labs_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/libs/env" + "github.com/stretchr/testify/require" +) + +func TestListingWorks(t *testing.T) { + ctx := context.Background() + ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") + c := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "list") + stdout, _, err := c.Run() + require.NoError(t, err) + require.Contains(t, stdout.String(), "ucx") +} diff --git a/cmd/labs/localcache/jsonfile.go b/cmd/labs/localcache/jsonfile.go new file mode 100644 index 00000000..495743a5 --- /dev/null +++ b/cmd/labs/localcache/jsonfile.go @@ -0,0 +1,109 @@ +package localcache + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/fs" + "net/url" + "os" + "path/filepath" + "time" + + "github.com/databricks/cli/libs/log" +) + +const userRW = 0o600 +const ownerRWXworldRX = 0o755 + +func NewLocalCache[T any](dir, name string, validity time.Duration) LocalCache[T] { + return LocalCache[T]{ + dir: dir, + name: name, + validity: validity, + } +} + +type LocalCache[T any] struct { + name string + dir string + validity time.Duration + zero T +} + +func (r *LocalCache[T]) Load(ctx context.Context, refresh func() (T, error)) (T, error) { + cached, err := r.loadCache() + if errors.Is(err, fs.ErrNotExist) { + return r.refreshCache(ctx, refresh, r.zero) + } else if err != nil { + return r.zero, err + } else if time.Since(cached.Refreshed) > r.validity { + return r.refreshCache(ctx, refresh, cached.Data) + } + return cached.Data, nil +} + +type cached[T any] struct { + // we don't use mtime of the file because it's easier to + // for testdata used in the unit tests to be somewhere far + // in the future and don't bother about switching the mtime bit. + Refreshed time.Time `json:"refreshed_at"` + Data T `json:"data"` +} + +func (r *LocalCache[T]) refreshCache(ctx context.Context, refresh func() (T, error), offlineVal T) (T, error) { + data, err := refresh() + var urlError *url.Error + if errors.As(err, &urlError) { + log.Warnf(ctx, "System offline. Cannot refresh cache: %s", urlError) + return offlineVal, nil + } + if err != nil { + return r.zero, fmt.Errorf("refresh: %w", err) + } + return r.writeCache(ctx, data) +} + +func (r *LocalCache[T]) writeCache(ctx context.Context, data T) (T, error) { + cached := &cached[T]{time.Now(), data} + raw, err := json.MarshalIndent(cached, "", " ") + if err != nil { + return r.zero, fmt.Errorf("json marshal: %w", err) + } + cacheFile := r.FileName() + err = os.WriteFile(cacheFile, raw, userRW) + if errors.Is(err, fs.ErrNotExist) { + cacheDir := filepath.Dir(cacheFile) + err := os.MkdirAll(cacheDir, ownerRWXworldRX) + if err != nil { + return r.zero, fmt.Errorf("create %s: %w", cacheDir, err) + } + err = os.WriteFile(cacheFile, raw, userRW) + if err != nil { + return r.zero, fmt.Errorf("retry save cache: %w", err) + } + return data, nil + } else if err != nil { + return r.zero, fmt.Errorf("save cache: %w", err) + } + return data, nil +} + +func (r *LocalCache[T]) FileName() string { + return filepath.Join(r.dir, fmt.Sprintf("%s.json", r.name)) +} + +func (r *LocalCache[T]) loadCache() (*cached[T], error) { + jsonFile := r.FileName() + raw, err := os.ReadFile(r.FileName()) + if err != nil { + return nil, fmt.Errorf("read %s: %w", jsonFile, err) + } + var v cached[T] + err = json.Unmarshal(raw, &v) + if err != nil { + return nil, fmt.Errorf("parse %s: %w", jsonFile, err) + } + return &v, nil +} diff --git a/cmd/labs/localcache/jsonfile_test.go b/cmd/labs/localcache/jsonfile_test.go new file mode 100644 index 00000000..0d852174 --- /dev/null +++ b/cmd/labs/localcache/jsonfile_test.go @@ -0,0 +1,132 @@ +package localcache + +import ( + "context" + "errors" + "fmt" + "net/url" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCreatesDirectoryIfNeeded(t *testing.T) { + ctx := context.Background() + c := NewLocalCache[int64](t.TempDir(), "some/nested/file", 1*time.Minute) + thing := []int64{0} + tick := func() (int64, error) { + thing[0] += 1 + return thing[0], nil + } + first, err := c.Load(ctx, tick) + assert.NoError(t, err) + assert.Equal(t, first, int64(1)) +} + +func TestImpossibleToCreateDir(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("No /dev/null on windows") + } + ctx := context.Background() + c := NewLocalCache[int64]("/dev/null", "some/nested/file", 1*time.Minute) + thing := []int64{0} + tick := func() (int64, error) { + thing[0] += 1 + return thing[0], nil + } + _, err := c.Load(ctx, tick) + assert.Error(t, err) +} + +func TestRefreshes(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("No /dev/null on windows") + } + ctx := context.Background() + c := NewLocalCache[int64](t.TempDir(), "time", 1*time.Minute) + thing := []int64{0} + tick := func() (int64, error) { + thing[0] += 1 + return thing[0], nil + } + first, err := c.Load(ctx, tick) + assert.NoError(t, err) + + second, err := c.Load(ctx, tick) + assert.NoError(t, err) + assert.Equal(t, first, second) + + c.validity = 0 + third, err := c.Load(ctx, tick) + assert.NoError(t, err) + assert.NotEqual(t, first, third) +} + +func TestSupportOfflineSystem(t *testing.T) { + c := NewLocalCache[int64](t.TempDir(), "time", 1*time.Minute) + thing := []int64{0} + tick := func() (int64, error) { + thing[0] += 1 + return thing[0], nil + } + ctx := context.Background() + first, err := c.Load(ctx, tick) + assert.NoError(t, err) + + tick = func() (int64, error) { + return 0, &url.Error{ + Op: "X", + URL: "Y", + Err: errors.New("nope"), + } + } + + c.validity = 0 + + // offline during refresh + third, err := c.Load(ctx, tick) + assert.NoError(t, err) + assert.Equal(t, first, third) + + // fully offline + c = NewLocalCache[int64](t.TempDir(), "time", 1*time.Minute) + zero, err := c.Load(ctx, tick) + assert.NoError(t, err) + assert.Equal(t, int64(0), zero) +} + +func TestFolderDisappears(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("No /dev/null on windows") + } + c := NewLocalCache[int64]("/dev/null", "time", 1*time.Minute) + tick := func() (int64, error) { + now := time.Now().UnixNano() + t.Log("TICKS") + return now, nil + } + ctx := context.Background() + _, err := c.Load(ctx, tick) + assert.Error(t, err) +} + +func TestRefreshFails(t *testing.T) { + c := NewLocalCache[int64](t.TempDir(), "time", 1*time.Minute) + tick := func() (int64, error) { + return 0, fmt.Errorf("nope") + } + ctx := context.Background() + _, err := c.Load(ctx, tick) + assert.EqualError(t, err, "refresh: nope") +} + +func TestWrongType(t *testing.T) { + c := NewLocalCache[chan int](t.TempDir(), "x", 1*time.Minute) + ctx := context.Background() + _, err := c.Load(ctx, func() (chan int, error) { + return make(chan int), nil + }) + assert.EqualError(t, err, "json marshal: json: unsupported type: chan int") +} diff --git a/cmd/labs/project/command_test.go b/cmd/labs/project/command_test.go new file mode 100644 index 00000000..20021879 --- /dev/null +++ b/cmd/labs/project/command_test.go @@ -0,0 +1,69 @@ +package project_test + +import ( + "context" + "path/filepath" + "testing" + "time" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/python" + "github.com/databricks/databricks-sdk-go" + "github.com/stretchr/testify/assert" +) + +type echoOut struct { + Command string `json:"command"` + Flags map[string]string `json:"flags"` + Env map[string]string `json:"env"` +} + +func devEnvContext(t *testing.T) context.Context { + ctx := context.Background() + ctx = env.WithUserHomeDir(ctx, "testdata/installed-in-home") + py, _ := python.DetectExecutable(ctx) + py, _ = filepath.Abs(py) + ctx = env.Set(ctx, "PYTHON_BIN", py) + return ctx +} + +func TestRunningBlueprintEcho(t *testing.T) { + ctx := devEnvContext(t) + r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "echo") + var out echoOut + r.RunAndParseJSON(&out) + assert.Equal(t, "echo", out.Command) + assert.Equal(t, "something", out.Flags["first"]) + assert.Equal(t, "https://accounts.cloud.databricks.com", out.Env["DATABRICKS_HOST"]) + assert.Equal(t, "cde", out.Env["DATABRICKS_ACCOUNT_ID"]) +} + +func TestRunningBlueprintEchoProfileWrongOverride(t *testing.T) { + ctx := devEnvContext(t) + r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "echo", "--profile", "workspace-profile") + _, _, err := r.Run() + assert.ErrorIs(t, err, databricks.ErrNotAccountClient) +} + +func TestRunningCommand(t *testing.T) { + ctx := devEnvContext(t) + r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "foo") + r.WithStdin() + defer r.CloseStdin() + + r.RunBackground() + r.WaitForTextPrinted("What is your name?", 5*time.Second) + r.SendText("Dude\n") + r.WaitForTextPrinted("Hello, Dude!", 5*time.Second) +} + +func TestRenderingTable(t *testing.T) { + ctx := devEnvContext(t) + r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "table") + r.RunAndExpectOutput(` + Key Value + First Second + Third Fourth + `) +} diff --git a/cmd/labs/project/entrypoint.go b/cmd/labs/project/entrypoint.go new file mode 100644 index 00000000..fedd70a4 --- /dev/null +++ b/cmd/labs/project/entrypoint.go @@ -0,0 +1,250 @@ +package project + +import ( + "context" + "errors" + "fmt" + "io/fs" + "net/http" + "os" + "path/filepath" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/config" + "github.com/spf13/cobra" +) + +type Entrypoint struct { + *Project + + RequireRunningCluster bool `yaml:"require_running_cluster,omitempty"` + IsUnauthenticated bool `yaml:"is_unauthenticated,omitempty"` + IsAccountLevel bool `yaml:"is_account_level,omitempty"` + IsBundleAware bool `yaml:"is_bundle_aware,omitempty"` +} + +var ErrNoLoginConfig = errors.New("no login configuration found") +var ErrMissingClusterID = errors.New("missing a cluster compatible with Databricks Connect") +var ErrMissingWarehouseID = errors.New("missing a SQL warehouse") +var ErrNotInTTY = errors.New("not in an interactive terminal") + +func (e *Entrypoint) NeedsCluster() bool { + if e.Installer == nil { + return false + } + if e.Installer.RequireDatabricksConnect && e.Installer.MinRuntimeVersion == "" { + e.Installer.MinRuntimeVersion = "13.1" + } + return e.Installer.MinRuntimeVersion != "" +} + +func (e *Entrypoint) NeedsWarehouse() bool { + if e.Installer == nil { + return false + } + return len(e.Installer.WarehouseTypes) != 0 +} + +func (e *Entrypoint) Prepare(cmd *cobra.Command) (map[string]string, error) { + ctx := cmd.Context() + libDir := e.EffectiveLibDir(ctx) + environment := map[string]string{ + "DATABRICKS_CLI_VERSION": build.GetInfo().Version, + "DATABRICKS_LABS_CACHE_DIR": e.CacheDir(ctx), + "DATABRICKS_LABS_CONFIG_DIR": e.ConfigDir(ctx), + "DATABRICKS_LABS_STATE_DIR": e.StateDir(ctx), + "DATABRICKS_LABS_LIB_DIR": libDir, + } + if e.IsPythonProject(ctx) { + e.preparePython(ctx, environment) + } + cfg, err := e.validLogin(cmd) + if err != nil { + return nil, fmt.Errorf("login: %w", err) + } + // cleanup auth profile and config file location, + // so that we don't confuse SDKs + cfg.Profile = "" + cfg.ConfigFile = "" + varNames := []string{} + for k, v := range e.environmentFromConfig(cfg) { + environment[k] = v + varNames = append(varNames, k) + } + if e.NeedsCluster() && e.RequireRunningCluster { + err = e.ensureRunningCluster(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("running cluster: %w", err) + } + } + log.Debugf(ctx, "Passing down environment variables: %s", strings.Join(varNames, ", ")) + return environment, nil +} + +func (e *Entrypoint) preparePython(ctx context.Context, environment map[string]string) { + venv := e.virtualEnvPath(ctx) + environment["PATH"] = e.joinPaths(filepath.Join(venv, "bin"), env.Get(ctx, "PATH")) + + // PYTHONPATH extends the standard lookup locations for module files. It follows the same structure as + // the shell's PATH, where you specify one or more directory paths separated by the appropriate delimiter + // (such as colons for Unix or semicolons for Windows). If a directory listed in PYTHONPATH doesn't exist, + // it is disregarded without any notifications. + // + // Beyond regular directories, individual entries in PYTHONPATH can point to zipfiles that contain pure + // Python modules in either their source or compiled forms. It's important to note that extension modules + // cannot be imported from zipfiles. + // + // The initial search path varies depending on your installation but typically commences with the + // prefix/lib/pythonversion path (as indicated by PYTHONHOME). This default path is always included + // in PYTHONPATH. + // + // An extra directory can be included at the beginning of the search path, coming before PYTHONPATH, + // as explained in the Interface options section. You can control the search path from within a Python + // script using the sys.path variable. + // + // Here we are also supporting the "src" layout for python projects. + // + // See https://docs.python.org/3/using/cmdline.html#envvar-PYTHONPATH + libDir := e.EffectiveLibDir(ctx) + // The intention for every install is to be sandboxed - not dependent on anything else than Python binary. + // Having ability to override PYTHONPATH in the mix will break this assumption. Need strong evidence that + // this is really needed. + environment["PYTHONPATH"] = e.joinPaths(libDir, filepath.Join(libDir, "src")) +} + +func (e *Entrypoint) ensureRunningCluster(ctx context.Context, cfg *config.Config) error { + feedback := cmdio.Spinner(ctx) + defer close(feedback) + w, err := databricks.NewWorkspaceClient((*databricks.Config)(cfg)) + if err != nil { + return fmt.Errorf("workspace client: %w", err) + } + // TODO: add in-progress callback to EnsureClusterIsRunning() in SDK + feedback <- "Ensuring the cluster is running..." + err = w.Clusters.EnsureClusterIsRunning(ctx, cfg.ClusterID) + if err != nil { + return fmt.Errorf("ensure running: %w", err) + } + return nil +} + +func (e *Entrypoint) joinPaths(paths ...string) string { + return strings.Join(paths, string(os.PathListSeparator)) +} + +func (e *Entrypoint) envAwareConfig(ctx context.Context) *config.Config { + return &config.Config{ + ConfigFile: filepath.Join(env.UserHomeDir(ctx), ".databrickscfg"), + Loaders: []config.Loader{ + env.NewConfigLoader(ctx), + config.ConfigAttributes, + config.ConfigFile, + }, + } +} + +func (e *Entrypoint) envAwareConfigWithProfile(ctx context.Context, profile string) *config.Config { + cfg := e.envAwareConfig(ctx) + cfg.Profile = profile + return cfg +} + +func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.Config, error) { + ctx := cmd.Context() + // it's okay for this config file not to exist, because some environments, + // like GitHub Actions, don't (need) to have it. There's a small downside of + // a warning log message from within Go SDK. + profileOverride := e.profileOverride(cmd) + if profileOverride != "" { + log.Infof(ctx, "Overriding login profile: %s", profileOverride) + return &loginConfig{}, e.envAwareConfigWithProfile(ctx, profileOverride), nil + } + lc, err := e.loadLoginConfig(ctx) + isNoLoginConfig := errors.Is(err, fs.ErrNotExist) + defaultConfig := e.envAwareConfig(ctx) + if isNoLoginConfig && !e.IsBundleAware && e.isAuthConfigured(defaultConfig) { + log.Debugf(ctx, "Login is configured via environment variables") + return &loginConfig{}, defaultConfig, nil + } + if isNoLoginConfig && !e.IsBundleAware { + return nil, nil, ErrNoLoginConfig + } + if !isNoLoginConfig && err != nil { + return nil, nil, fmt.Errorf("load: %w", err) + } + if e.IsAccountLevel { + log.Debugf(ctx, "Using account-level login profile: %s", lc.AccountProfile) + return lc, e.envAwareConfigWithProfile(ctx, lc.AccountProfile), nil + } + if e.IsBundleAware { + err = root.TryConfigureBundle(cmd, []string{}) + if err != nil { + return nil, nil, fmt.Errorf("bundle: %w", err) + } + if b := bundle.GetOrNil(cmd.Context()); b != nil { + log.Infof(ctx, "Using login configuration from Databricks Asset Bundle") + return &loginConfig{}, b.WorkspaceClient().Config, nil + } + } + log.Debugf(ctx, "Using workspace-level login profile: %s", lc.WorkspaceProfile) + return lc, e.envAwareConfigWithProfile(ctx, lc.WorkspaceProfile), nil +} + +func (e *Entrypoint) validLogin(cmd *cobra.Command) (*config.Config, error) { + if e.IsUnauthenticated { + return &config.Config{}, nil + } + lc, cfg, err := e.getLoginConfig(cmd) + if err != nil { + return nil, fmt.Errorf("login config: %w", err) + } + err = cfg.EnsureResolved() + if err != nil { + return nil, err + } + // merge ~/.databrickscfg and ~/.databricks/labs/x/config/login.json when + // it comes to project-specific configuration + if e.NeedsCluster() && cfg.ClusterID == "" { + cfg.ClusterID = lc.ClusterID + } + if e.NeedsWarehouse() && cfg.WarehouseID == "" { + cfg.WarehouseID = lc.WarehouseID + } + isACC := cfg.IsAccountClient() + if e.IsAccountLevel && !isACC { + return nil, databricks.ErrNotAccountClient + } + if e.NeedsCluster() && !isACC && cfg.ClusterID == "" { + return nil, ErrMissingClusterID + } + if e.NeedsWarehouse() && !isACC && cfg.WarehouseID == "" { + return nil, ErrMissingWarehouseID + } + return cfg, nil +} + +func (e *Entrypoint) environmentFromConfig(cfg *config.Config) map[string]string { + env := map[string]string{} + for _, a := range config.ConfigAttributes { + if a.IsZero(cfg) { + continue + } + for _, ev := range a.EnvVars { + env[ev] = a.GetString(cfg) + } + } + return env +} + +func (e *Entrypoint) isAuthConfigured(cfg *config.Config) bool { + r := &http.Request{Header: http.Header{}} + err := cfg.Authenticate(r.WithContext(context.Background())) + return err == nil +} diff --git a/cmd/labs/project/fetcher.go b/cmd/labs/project/fetcher.go new file mode 100644 index 00000000..b677bcd9 --- /dev/null +++ b/cmd/labs/project/fetcher.go @@ -0,0 +1,141 @@ +package project + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/databricks/cli/cmd/labs/github" + "github.com/databricks/cli/libs/log" + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +type installable interface { + Install(ctx context.Context) error +} + +type devInstallation struct { + *Project + *cobra.Command +} + +func (d *devInstallation) Install(ctx context.Context) error { + if d.Installer == nil { + return nil + } + _, err := d.Installer.validLogin(d.Command) + if errors.Is(err, ErrNoLoginConfig) { + cfg := d.Installer.envAwareConfig(ctx) + lc := &loginConfig{Entrypoint: d.Installer.Entrypoint} + _, err = lc.askWorkspace(ctx, cfg) + if err != nil { + return fmt.Errorf("ask for workspace: %w", err) + } + err = lc.askAccountProfile(ctx, cfg) + if err != nil { + return fmt.Errorf("ask for account: %w", err) + } + err = lc.EnsureFoldersExist(ctx) + if err != nil { + return fmt.Errorf("folders: %w", err) + } + err = lc.save(ctx) + if err != nil { + return fmt.Errorf("save: %w", err) + } + } + return d.Installer.runHook(d.Command) +} + +func NewInstaller(cmd *cobra.Command, name string) (installable, error) { + if name == "." { + wd, err := os.Getwd() + if err != nil { + return nil, fmt.Errorf("working directory: %w", err) + } + prj, err := Load(cmd.Context(), filepath.Join(wd, "labs.yml")) + if err != nil { + return nil, fmt.Errorf("load: %w", err) + } + cmd.PrintErrln(color.YellowString("Installing %s in development mode from %s", prj.Name, wd)) + return &devInstallation{ + Project: prj, + Command: cmd, + }, nil + } + name, version, ok := strings.Cut(name, "@") + if !ok { + version = "latest" + } + f := &fetcher{name} + version, err := f.checkReleasedVersions(cmd, version) + if err != nil { + return nil, fmt.Errorf("version: %w", err) + } + prj, err := f.loadRemoteProjectDefinition(cmd, version) + if err != nil { + return nil, fmt.Errorf("remote: %w", err) + } + return &installer{ + Project: prj, + version: version, + cmd: cmd, + }, nil +} + +func NewUpgrader(cmd *cobra.Command, name string) (*installer, error) { + f := &fetcher{name} + version, err := f.checkReleasedVersions(cmd, "latest") + if err != nil { + return nil, fmt.Errorf("version: %w", err) + } + prj, err := f.loadRemoteProjectDefinition(cmd, version) + if err != nil { + return nil, fmt.Errorf("remote: %w", err) + } + prj.folder = PathInLabs(cmd.Context(), name) + return &installer{ + Project: prj, + version: version, + cmd: cmd, + }, nil +} + +type fetcher struct { + name string +} + +func (f *fetcher) checkReleasedVersions(cmd *cobra.Command, version string) (string, error) { + ctx := cmd.Context() + cacheDir := PathInLabs(ctx, f.name, "cache") + // `databricks labs isntall X` doesn't know which exact version to fetch, so first + // we fetch all versions and then pick the latest one dynamically. + versions, err := github.NewReleaseCache("databrickslabs", f.name, cacheDir).Load(ctx) + if err != nil { + return "", fmt.Errorf("versions: %w", err) + } + for _, v := range versions { + if v.Version == version { + return version, nil + } + } + if version == "latest" && len(versions) > 0 { + log.Debugf(ctx, "Latest %s version is: %s", f.name, versions[0].Version) + return versions[0].Version, nil + } + cmd.PrintErrln(color.YellowString("[WARNING] Installing unreleased version: %s", version)) + return version, nil +} + +func (i *fetcher) loadRemoteProjectDefinition(cmd *cobra.Command, version string) (*Project, error) { + ctx := cmd.Context() + raw, err := github.ReadFileFromRef(ctx, "databrickslabs", i.name, version, "labs.yml") + if err != nil { + return nil, fmt.Errorf("read labs.yml from GitHub: %w", err) + } + return readFromBytes(ctx, raw) +} diff --git a/cmd/labs/project/helpers.go b/cmd/labs/project/helpers.go new file mode 100644 index 00000000..9117d875 --- /dev/null +++ b/cmd/labs/project/helpers.go @@ -0,0 +1,35 @@ +package project + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/databricks/cli/libs/env" +) + +func PathInLabs(ctx context.Context, dirs ...string) string { + homdeDir := env.UserHomeDir(ctx) + prefix := []string{homdeDir, ".databricks", "labs"} + return filepath.Join(append(prefix, dirs...)...) +} + +func tryLoadAndParseJSON[T any](jsonFile string) (*T, error) { + raw, err := os.ReadFile(jsonFile) + if errors.Is(err, fs.ErrNotExist) { + return nil, err + } + if err != nil { + return nil, fmt.Errorf("read %s: %w", jsonFile, err) + } + var v T + err = json.Unmarshal(raw, &v) + if err != nil { + return nil, fmt.Errorf("parse %s: %w", jsonFile, err) + } + return &v, nil +} diff --git a/cmd/labs/project/init_test.go b/cmd/labs/project/init_test.go new file mode 100644 index 00000000..959381f5 --- /dev/null +++ b/cmd/labs/project/init_test.go @@ -0,0 +1,13 @@ +package project + +import ( + "log/slog" + "os" +) + +func init() { + slog.SetDefault(slog.New( + slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + Level: slog.LevelDebug, + }))) +} diff --git a/cmd/labs/project/installed.go b/cmd/labs/project/installed.go new file mode 100644 index 00000000..77fee544 --- /dev/null +++ b/cmd/labs/project/installed.go @@ -0,0 +1,58 @@ +package project + +import ( + "context" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/databricks/cli/folders" + "github.com/databricks/cli/libs/log" +) + +func projectInDevMode(ctx context.Context) (*Project, error) { + cwd, err := os.Getwd() + if err != nil { + return nil, err + } + folder, err := folders.FindDirWithLeaf(cwd, "labs.yml") + if err != nil { + return nil, err + } + log.Debugf(ctx, "Found project under development in: %s", cwd) + return Load(ctx, filepath.Join(folder, "labs.yml")) +} + +func Installed(ctx context.Context) (projects []*Project, err error) { + labsDir, err := os.ReadDir(PathInLabs(ctx)) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + projectDev, err := projectInDevMode(ctx) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + if err == nil { + projects = append(projects, projectDev) + } + for _, v := range labsDir { + if !v.IsDir() { + continue + } + if projectDev != nil && v.Name() == projectDev.Name { + continue + } + labsYml := PathInLabs(ctx, v.Name(), "lib", "labs.yml") + prj, err := Load(ctx, labsYml) + if errors.Is(err, fs.ErrNotExist) { + continue + } + if err != nil { + return nil, fmt.Errorf("%s: %w", v.Name(), err) + } + projects = append(projects, prj) + } + return projects, nil +} diff --git a/cmd/labs/project/installed_test.go b/cmd/labs/project/installed_test.go new file mode 100644 index 00000000..e837692d --- /dev/null +++ b/cmd/labs/project/installed_test.go @@ -0,0 +1,19 @@ +package project + +import ( + "context" + "testing" + + "github.com/databricks/cli/libs/env" + "github.com/stretchr/testify/assert" +) + +func TestInstalled(t *testing.T) { + ctx := context.Background() + ctx = env.WithUserHomeDir(ctx, "testdata/installed-in-home") + + all, err := Installed(ctx) + assert.NoError(t, err) + assert.Len(t, all, 1) + assert.Equal(t, "blueprint", all[0].Name) +} diff --git a/cmd/labs/project/installer.go b/cmd/labs/project/installer.go new file mode 100644 index 00000000..2e09ed37 --- /dev/null +++ b/cmd/labs/project/installer.go @@ -0,0 +1,286 @@ +package project + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/databricks/cli/cmd/labs/github" + "github.com/databricks/cli/cmd/labs/unpack" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/databrickscfg/cfgpickers" + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/process" + "github.com/databricks/cli/libs/python" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +const ownerRWXworldRX = 0o755 + +type whTypes []sql.EndpointInfoWarehouseType + +type hook struct { + *Entrypoint `yaml:",inline"` + Script string `yaml:"script"` + RequireDatabricksConnect bool `yaml:"require_databricks_connect,omitempty"` + MinRuntimeVersion string `yaml:"min_runtime_version,omitempty"` + WarehouseTypes whTypes `yaml:"warehouse_types,omitempty"` +} + +func (h *hook) RequireRunningCluster() bool { + if h.Entrypoint == nil { + return false + } + return h.Entrypoint.RequireRunningCluster +} + +func (h *hook) HasPython() bool { + return strings.HasSuffix(h.Script, ".py") +} + +func (h *hook) runHook(cmd *cobra.Command) error { + if h.Script == "" { + return nil + } + ctx := cmd.Context() + envs, err := h.Prepare(cmd) + if err != nil { + return fmt.Errorf("prepare: %w", err) + } + libDir := h.EffectiveLibDir(ctx) + args := []string{} + if strings.HasSuffix(h.Script, ".py") { + args = append(args, h.virtualEnvPython(ctx)) + } + return process.Forwarded(ctx, + append(args, h.Script), + cmd.InOrStdin(), + cmd.OutOrStdout(), + cmd.ErrOrStderr(), + process.WithDir(libDir), + process.WithEnvs(envs)) +} + +type installer struct { + *Project + version string + + // command instance is used for: + // - auth profile flag override + // - standard input, output, and error streams + cmd *cobra.Command +} + +func (i *installer) Install(ctx context.Context) error { + err := i.EnsureFoldersExist(ctx) + if err != nil { + return fmt.Errorf("folders: %w", err) + } + i.folder = PathInLabs(ctx, i.Name) + w, err := i.login(ctx) + if err != nil && errors.Is(err, databrickscfg.ErrNoConfiguration) { + cfg := i.Installer.envAwareConfig(ctx) + w, err = databricks.NewWorkspaceClient((*databricks.Config)(cfg)) + if err != nil { + return fmt.Errorf("no ~/.databrickscfg: %w", err) + } + } else if err != nil { + return fmt.Errorf("login: %w", err) + } + err = i.downloadLibrary(ctx) + if err != nil { + return fmt.Errorf("lib: %w", err) + } + err = i.setupPythonVirtualEnvironment(ctx, w) + if err != nil { + return fmt.Errorf("python: %w", err) + } + err = i.recordVersion(ctx) + if err != nil { + return fmt.Errorf("record version: %w", err) + } + // TODO: failing install hook for "clean installations" (not upgrages) + // should trigger removal of the project, otherwise users end up with + // misconfigured CLIs + err = i.runInstallHook(ctx) + if err != nil { + return fmt.Errorf("installer: %w", err) + } + return nil +} + +func (i *installer) Upgrade(ctx context.Context) error { + err := i.downloadLibrary(ctx) + if err != nil { + return fmt.Errorf("lib: %w", err) + } + err = i.recordVersion(ctx) + if err != nil { + return fmt.Errorf("record version: %w", err) + } + err = i.runInstallHook(ctx) + if err != nil { + return fmt.Errorf("installer: %w", err) + } + return nil +} + +func (i *installer) warningf(text string, v ...any) { + i.cmd.PrintErrln(color.YellowString(text, v...)) +} + +func (i *installer) cleanupLib(ctx context.Context) error { + libDir := i.LibDir(ctx) + err := os.RemoveAll(libDir) + if err != nil { + return fmt.Errorf("remove all: %w", err) + } + return os.MkdirAll(libDir, ownerRWXworldRX) +} + +func (i *installer) recordVersion(ctx context.Context) error { + return i.writeVersionFile(ctx, i.version) +} + +func (i *installer) login(ctx context.Context) (*databricks.WorkspaceClient, error) { + if !cmdio.IsInteractive(ctx) { + log.Debugf(ctx, "Skipping workspace profile prompts in non-interactive mode") + return nil, nil + } + cfg, err := i.metaEntrypoint(ctx).validLogin(i.cmd) + if errors.Is(err, ErrNoLoginConfig) { + cfg = i.Installer.envAwareConfig(ctx) + } else if err != nil { + return nil, fmt.Errorf("valid: %w", err) + } + if !i.HasAccountLevelCommands() && cfg.IsAccountClient() { + return nil, fmt.Errorf("got account-level client, but no account-level commands") + } + lc := &loginConfig{Entrypoint: i.Installer.Entrypoint} + w, err := lc.askWorkspace(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("ask for workspace: %w", err) + } + err = lc.askAccountProfile(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("ask for account: %w", err) + } + err = lc.save(ctx) + if err != nil { + return nil, fmt.Errorf("save: %w", err) + } + return w, nil +} + +func (i *installer) downloadLibrary(ctx context.Context) error { + feedback := cmdio.Spinner(ctx) + defer close(feedback) + feedback <- "Cleaning up previous installation if necessary" + err := i.cleanupLib(ctx) + if err != nil { + return fmt.Errorf("cleanup: %w", err) + } + libTarget := i.LibDir(ctx) + // we may support wheels, jars, and golang binaries. but those are not zipballs + if i.IsZipball() { + feedback <- fmt.Sprintf("Downloading and unpacking zipball for %s", i.version) + return i.downloadAndUnpackZipball(ctx, libTarget) + } + return fmt.Errorf("we only support zipballs for now") +} + +func (i *installer) downloadAndUnpackZipball(ctx context.Context, libTarget string) error { + raw, err := github.DownloadZipball(ctx, "databrickslabs", i.Name, i.version) + if err != nil { + return fmt.Errorf("download zipball from GitHub: %w", err) + } + zipball := unpack.GitHubZipball{Reader: bytes.NewBuffer(raw)} + log.Debugf(ctx, "Unpacking zipball to: %s", libTarget) + return zipball.UnpackTo(libTarget) +} + +func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databricks.WorkspaceClient) error { + if !i.HasPython() { + return nil + } + feedback := cmdio.Spinner(ctx) + defer close(feedback) + feedback <- "Detecting all installed Python interpreters on the system" + pythonInterpreters, err := python.DetectInterpreters(ctx) + if err != nil { + return fmt.Errorf("detect: %w", err) + } + py, err := pythonInterpreters.AtLeast(i.MinPython) + if err != nil { + return fmt.Errorf("min version: %w", err) + } + log.Debugf(ctx, "Detected Python %s at: %s", py.Version, py.Path) + venvPath := i.virtualEnvPath(ctx) + log.Debugf(ctx, "Creating Python Virtual Environment at: %s", venvPath) + feedback <- fmt.Sprintf("Creating Virtual Environment with Python %s", py.Version) + _, err = process.Background(ctx, []string{py.Path, "-m", "venv", venvPath}) + if err != nil { + return fmt.Errorf("create venv: %w", err) + } + if i.Installer != nil && i.Installer.RequireDatabricksConnect { + feedback <- "Determining Databricks Connect version" + cluster, err := w.Clusters.Get(ctx, compute.GetClusterRequest{ + ClusterId: w.Config.ClusterID, + }) + if err != nil { + return fmt.Errorf("cluster: %w", err) + } + runtimeVersion, ok := cfgpickers.GetRuntimeVersion(*cluster) + if !ok { + return fmt.Errorf("unsupported runtime: %s", cluster.SparkVersion) + } + feedback <- fmt.Sprintf("Installing Databricks Connect v%s", runtimeVersion) + pipSpec := fmt.Sprintf("databricks-connect==%s", runtimeVersion) + err = i.installPythonDependencies(ctx, pipSpec) + if err != nil { + return fmt.Errorf("dbconnect: %w", err) + } + } + feedback <- "Installing Python library dependencies" + return i.installPythonDependencies(ctx, ".") +} + +func (i *installer) installPythonDependencies(ctx context.Context, spec string) error { + if !i.IsPythonProject(ctx) { + return nil + } + libDir := i.LibDir(ctx) + log.Debugf(ctx, "Installing Python dependencies for: %s", libDir) + // maybe we'll need to add call one of the two scripts: + // - python3 -m ensurepip --default-pip + // - curl -o https://bootstrap.pypa.io/get-pip.py | python3 + var buf bytes.Buffer + _, err := process.Background(ctx, + []string{i.virtualEnvPython(ctx), "-m", "pip", "install", spec}, + process.WithCombinedOutput(&buf), + process.WithDir(libDir)) + if err != nil { + i.warningf(buf.String()) + return fmt.Errorf("failed to install dependencies of %s", spec) + } + return nil +} + +func (i *installer) runInstallHook(ctx context.Context) error { + if i.Installer == nil { + return nil + } + if i.Installer.Script == "" { + return nil + } + log.Debugf(ctx, "Launching installer script %s in %s", i.Installer.Script, i.LibDir(ctx)) + return i.Installer.runHook(i.cmd) +} diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go new file mode 100644 index 00000000..b61026f2 --- /dev/null +++ b/cmd/labs/project/installer_test.go @@ -0,0 +1,415 @@ +package project_test + +import ( + "archive/zip" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "net/http" + "net/http/httptest" + "os" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/databricks/cli/cmd/labs/github" + "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/internal" + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/python" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/stretchr/testify/require" +) + +const ownerRWXworldRX = 0o755 +const ownerRW = 0o600 + +func zipballFromFolder(src string) ([]byte, error) { + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + rootDir := path.Base(src) // this is required to emulate github ZIP downloads + err := filepath.Walk(src, func(filePath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + relpath, err := filepath.Rel(src, filePath) + if err != nil { + return err + } + relpath = path.Join(rootDir, relpath) + if info.IsDir() { + _, err = zw.Create(relpath + "/") + return err + } + file, err := os.Open(filePath) + if err != nil { + return err + } + defer file.Close() + f, err := zw.Create(relpath) + if err != nil { + return err + } + _, err = io.Copy(f, file) + return err + }) + if err != nil { + return nil, err + } + err = zw.Close() + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func copyTestdata(t *testing.T, name string) string { + // TODO: refactor fs.cp command into a reusable util + tempDir := t.TempDir() + name = strings.ReplaceAll(name, "/", string(os.PathSeparator)) + err := filepath.WalkDir(name, func(path string, d fs.DirEntry, err error) error { + require.NoError(t, err) + dst := strings.TrimPrefix(path, name) + if dst == "" { + return nil + } + if d.IsDir() { + err := os.MkdirAll(filepath.Join(tempDir, dst), ownerRWXworldRX) + require.NoError(t, err) + return nil + } + in, err := os.Open(path) + require.NoError(t, err) + defer in.Close() + out, err := os.Create(filepath.Join(tempDir, dst)) + require.NoError(t, err) + defer out.Close() + _, err = io.Copy(out, in) + require.NoError(t, err) + return nil + }) + require.NoError(t, err) + return tempDir +} + +func installerContext(t *testing.T, server *httptest.Server) context.Context { + ctx := context.Background() + ctx = github.WithApiOverride(ctx, server.URL) + ctx = github.WithUserContentOverride(ctx, server.URL) + ctx = env.WithUserHomeDir(ctx, t.TempDir()) + // trick release cache to thing it went to github already + cachePath := project.PathInLabs(ctx, "blueprint", "cache") + err := os.MkdirAll(cachePath, ownerRWXworldRX) + require.NoError(t, err) + bs := []byte(`{"refreshed_at": "2033-01-01T00:00:00.92857+02:00","data": [{"tag_name": "v0.3.15"}]}`) + err = os.WriteFile(filepath.Join(cachePath, "databrickslabs-blueprint-releases.json"), bs, ownerRW) + require.NoError(t, err) + return ctx +} + +func respondWithJSON(t *testing.T, w http.ResponseWriter, v any) { + raw, err := json.Marshal(v) + if err != nil { + require.NoError(t, err) + } + w.Write(raw) +} + +type fileTree struct { + Path string + MaxDepth int +} + +func (ft fileTree) String() string { + lines := ft.listFiles(ft.Path, ft.MaxDepth) + return strings.Join(lines, "\n") +} + +func (ft fileTree) listFiles(dir string, depth int) (lines []string) { + if ft.MaxDepth > 0 && depth > ft.MaxDepth { + return []string{fmt.Sprintf("deeper than %d levels", ft.MaxDepth)} + } + fileInfo, err := os.ReadDir(dir) + if err != nil { + return []string{err.Error()} + } + for _, entry := range fileInfo { + lines = append(lines, fmt.Sprintf("%s%s", ft.getIndent(depth), entry.Name())) + if entry.IsDir() { + subdir := filepath.Join(dir, entry.Name()) + lines = append(lines, ft.listFiles(subdir, depth+1)...) + } + } + return lines +} + +func (ft fileTree) getIndent(depth int) string { + return "│" + strings.Repeat(" ", depth*2) + "├─ " +} + +func TestInstallerWorksForReleases(t *testing.T) { + defer func() { + if !t.Failed() { + return + } + t.Logf("file tree:\n%s", fileTree{ + Path: filepath.Dir(t.TempDir()), + }) + }() + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/databrickslabs/blueprint/v0.3.15/labs.yml" { + raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml") + if err != nil { + panic(err) + } + w.Write(raw) + return + } + if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.3.15" { + raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib") + if err != nil { + panic(err) + } + w.Header().Add("Content-Type", "application/octet-stream") + w.Write(raw) + return + } + if r.URL.Path == "/api/2.0/clusters/get" { + respondWithJSON(t, w, &compute.ClusterDetails{ + State: compute.StateRunning, + }) + return + } + t.Logf("Requested: %s", r.URL.Path) + t.FailNow() + })) + defer server.Close() + + ctx := installerContext(t, server) + + // simulate the case of GitHub Actions + ctx = env.Set(ctx, "DATABRICKS_HOST", server.URL) + ctx = env.Set(ctx, "DATABRICKS_TOKEN", "...") + ctx = env.Set(ctx, "DATABRICKS_CLUSTER_ID", "installer-cluster") + ctx = env.Set(ctx, "DATABRICKS_WAREHOUSE_ID", "installer-warehouse") + + // After the installation, we'll have approximately the following state: + // t.TempDir() + // └── 001 <------------------------------------------------- env.UserHomeDir(ctx) + // ├── .databricks + // │ └── labs + // │ └── blueprint + // │ ├── cache <------------------------------- prj.CacheDir(ctx) + // │ │ └── databrickslabs-blueprint-releases.json + // │ ├── config + // │ ├── lib <--------------------------------- prj.LibDir(ctx) + // │ │ ├── install.py + // │ │ ├── labs.yml + // │ │ ├── main.py + // │ │ └── pyproject.toml + // │ └── state <------------------------------- prj.StateDir(ctx) + // │ ├── venv <---------------------------- prj.virtualEnvPath(ctx) + // │ │ ├── bin + // │ │ │ ├── pip + // │ │ │ ├── ... + // │ │ │ ├── python -> python3.9 + // │ │ │ ├── python3 -> python3.9 <---- prj.virtualEnvPython(ctx) + // │ │ │ └── python3.9 -> (path to a detected python) + // │ │ ├── include + // │ │ ├── lib + // │ │ │ └── python3.9 + // │ │ │ └── site-packages + // │ │ │ ├── ... + // │ │ │ ├── distutils-precedence.pth + r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", "blueprint") + r.RunAndExpectOutput("setting up important infrastructure") +} + +func TestInstallerWorksForDevelopment(t *testing.T) { + defer func() { + if !t.Failed() { + return + } + t.Logf("file tree:\n%s", fileTree{ + Path: filepath.Dir(t.TempDir()), + }) + }() + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/api/2.0/clusters/list" { + respondWithJSON(t, w, compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{ + { + ClusterId: "abc-id", + ClusterName: "first shared", + DataSecurityMode: compute.DataSecurityModeUserIsolation, + SparkVersion: "12.2.x-whatever", + State: compute.StateRunning, + }, + { + ClusterId: "bcd-id", + ClusterName: "second personal", + DataSecurityMode: compute.DataSecurityModeSingleUser, + SparkVersion: "14.5.x-whatever", + State: compute.StateRunning, + SingleUserName: "serge", + }, + }, + }) + return + } + if r.URL.Path == "/api/2.0/preview/scim/v2/Me" { + respondWithJSON(t, w, iam.User{ + UserName: "serge", + }) + return + } + if r.URL.Path == "/api/2.0/clusters/spark-versions" { + respondWithJSON(t, w, compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ + { + Key: "14.5.x-whatever", + Name: "14.5 (Awesome)", + }, + }, + }) + return + } + if r.URL.Path == "/api/2.0/clusters/get" { + respondWithJSON(t, w, &compute.ClusterDetails{ + State: compute.StateRunning, + }) + return + } + if r.URL.Path == "/api/2.0/sql/warehouses" { + respondWithJSON(t, w, sql.ListWarehousesResponse{ + Warehouses: []sql.EndpointInfo{ + { + Id: "efg-id", + Name: "First PRO Warehouse", + WarehouseType: sql.EndpointInfoWarehouseTypePro, + }, + }, + }) + return + } + t.Logf("Requested: %s", r.URL.Path) + t.FailNow() + })) + defer server.Close() + + wd, _ := os.Getwd() + defer os.Chdir(wd) + + devDir := copyTestdata(t, "testdata/installed-in-home/.databricks/labs/blueprint/lib") + err := os.Chdir(devDir) + require.NoError(t, err) + + ctx := installerContext(t, server) + py, _ := python.DetectExecutable(ctx) + py, _ = filepath.Abs(py) + + // development installer assumes it's in the active virtualenv + ctx = env.Set(ctx, "PYTHON_BIN", py) + + err = os.WriteFile(filepath.Join(env.UserHomeDir(ctx), ".databrickscfg"), []byte(fmt.Sprintf(` +[profile-one] +host = %s +token = ... + +[acc] +host = %s +account_id = abc + `, server.URL, server.URL)), ownerRW) + require.NoError(t, err) + + // We have the following state at this point: + // t.TempDir() + // ├── 001 <------------------ $CWD, prj.EffectiveLibDir(ctx), prj.folder + // │ ├── install.py + // │ ├── labs.yml <--------- prj.IsDeveloperMode(ctx) == true + // │ ├── main.py + // │ └── pyproject.toml + // └── 002 <------------------ env.UserHomeDir(ctx) + // └── .databricks + // └── labs + // └── blueprint <--- project.PathInLabs(ctx, "blueprint"), prj.rootDir(ctx) + // └── cache <--- prj.CacheDir(ctx) + // └── databrickslabs-blueprint-releases.json + + // `databricks labs install .` means "verify this installer i'm developing does work" + r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", ".") + r.WithStdin() + defer r.CloseStdin() + + r.RunBackground() + r.WaitForTextPrinted("setting up important infrastructure", 5*time.Second) +} + +func TestUpgraderWorksForReleases(t *testing.T) { + defer func() { + if !t.Failed() { + return + } + t.Logf("file tree:\n%s", fileTree{ + Path: filepath.Dir(t.TempDir()), + }) + }() + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/databrickslabs/blueprint/v0.4.0/labs.yml" { + raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml") + if err != nil { + panic(err) + } + w.Write(raw) + return + } + if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.4.0" { + raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib") + if err != nil { + panic(err) + } + w.Header().Add("Content-Type", "application/octet-stream") + w.Write(raw) + return + } + if r.URL.Path == "/api/2.0/clusters/get" { + respondWithJSON(t, w, &compute.ClusterDetails{ + State: compute.StateRunning, + }) + return + } + t.Logf("Requested: %s", r.URL.Path) + t.FailNow() + })) + defer server.Close() + + ctx := installerContext(t, server) + + newHome := copyTestdata(t, "testdata/installed-in-home") + ctx = env.WithUserHomeDir(ctx, newHome) + + py, _ := python.DetectExecutable(ctx) + py, _ = filepath.Abs(py) + ctx = env.Set(ctx, "PYTHON_BIN", py) + + cachePath := project.PathInLabs(ctx, "blueprint", "cache") + bs := []byte(`{"refreshed_at": "2033-01-01T00:00:00.92857+02:00","data": [{"tag_name": "v0.4.0"}]}`) + err := os.WriteFile(filepath.Join(cachePath, "databrickslabs-blueprint-releases.json"), bs, ownerRW) + require.NoError(t, err) + + // simulate the case of GitHub Actions + ctx = env.Set(ctx, "DATABRICKS_HOST", server.URL) + ctx = env.Set(ctx, "DATABRICKS_TOKEN", "...") + ctx = env.Set(ctx, "DATABRICKS_CLUSTER_ID", "installer-cluster") + ctx = env.Set(ctx, "DATABRICKS_WAREHOUSE_ID", "installer-warehouse") + + r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint") + r.RunAndExpectOutput("setting up important infrastructure") +} diff --git a/cmd/labs/project/login.go b/cmd/labs/project/login.go new file mode 100644 index 00000000..dd235064 --- /dev/null +++ b/cmd/labs/project/login.go @@ -0,0 +1,117 @@ +package project + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/databrickscfg/cfgpickers" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/config" +) + +type loginConfig struct { + *Entrypoint `json:"-"` + WorkspaceProfile string `json:"workspace_profile,omitempty"` + AccountProfile string `json:"account_profile,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + WarehouseID string `json:"warehouse_id,omitempty"` +} + +func (lc *loginConfig) askWorkspace(ctx context.Context, cfg *config.Config) (*databricks.WorkspaceClient, error) { + if cfg.IsAccountClient() { + return nil, nil + } + err := lc.askWorkspaceProfile(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("profile: %w", err) + } + w, err := databricks.NewWorkspaceClient((*databricks.Config)(cfg)) + if err != nil { + return nil, fmt.Errorf("client: %w", err) + } + err = lc.askCluster(ctx, w) + if err != nil { + return nil, fmt.Errorf("cluster: %w", err) + } + err = lc.askWarehouse(ctx, w) + if err != nil { + return nil, fmt.Errorf("warehouse: %w", err) + } + return w, nil +} + +func (lc *loginConfig) askWorkspaceProfile(ctx context.Context, cfg *config.Config) (err error) { + if cfg.Profile != "" { + lc.WorkspaceProfile = cfg.Profile + return + } + if !cmdio.IsInteractive(ctx) { + return ErrNotInTTY + } + lc.WorkspaceProfile, err = root.AskForWorkspaceProfile(ctx) + cfg.Profile = lc.WorkspaceProfile + return +} + +func (lc *loginConfig) askCluster(ctx context.Context, w *databricks.WorkspaceClient) (err error) { + if !lc.NeedsCluster() { + return + } + if w.Config.ClusterID != "" { + lc.ClusterID = w.Config.ClusterID + return + } + if !cmdio.IsInteractive(ctx) { + return ErrNotInTTY + } + clusterID, err := cfgpickers.AskForCluster(ctx, w, + cfgpickers.WithDatabricksConnect(lc.Installer.MinRuntimeVersion)) + if err != nil { + return fmt.Errorf("select: %w", err) + } + w.Config.ClusterID = clusterID + lc.ClusterID = clusterID + return +} + +func (lc *loginConfig) askWarehouse(ctx context.Context, w *databricks.WorkspaceClient) (err error) { + if !lc.NeedsWarehouse() { + return + } + if w.Config.WarehouseID != "" { + lc.WarehouseID = w.Config.WarehouseID + return + } + if !cmdio.IsInteractive(ctx) { + return ErrNotInTTY + } + lc.WarehouseID, err = cfgpickers.AskForWarehouse(ctx, w, + cfgpickers.WithWarehouseTypes(lc.Installer.WarehouseTypes...)) + return +} + +func (lc *loginConfig) askAccountProfile(ctx context.Context, cfg *config.Config) (err error) { + if !lc.HasAccountLevelCommands() { + return nil + } + if !cmdio.IsInteractive(ctx) { + return ErrNotInTTY + } + lc.AccountProfile, err = root.AskForAccountProfile(ctx) + return +} + +func (lc *loginConfig) save(ctx context.Context) error { + authFile := lc.loginFile(ctx) + raw, err := json.MarshalIndent(lc, "", " ") + if err != nil { + return err + } + log.Debugf(ctx, "Writing auth configuration to: %s", authFile) + return os.WriteFile(authFile, raw, ownerRW) +} diff --git a/cmd/labs/project/project.go b/cmd/labs/project/project.go new file mode 100644 index 00000000..6adf9a3c --- /dev/null +++ b/cmd/labs/project/project.go @@ -0,0 +1,352 @@ +package project + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/databricks/cli/cmd/labs/github" + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/python" + "github.com/databricks/databricks-sdk-go/logger" + "github.com/fatih/color" + "gopkg.in/yaml.v3" + + "github.com/spf13/cobra" +) + +const ownerRW = 0o600 + +func Load(ctx context.Context, labsYml string) (*Project, error) { + raw, err := os.ReadFile(labsYml) + if err != nil { + return nil, fmt.Errorf("read labs.yml: %w", err) + } + project, err := readFromBytes(ctx, raw) + if err != nil { + return nil, err + } + project.folder = filepath.Dir(labsYml) + return project, nil +} + +func readFromBytes(ctx context.Context, labsYmlRaw []byte) (*Project, error) { + var project Project + err := yaml.Unmarshal(labsYmlRaw, &project) + if err != nil { + return nil, fmt.Errorf("parse labs.yml: %w", err) + } + e := (&project).metaEntrypoint(ctx) + if project.Installer != nil { + project.Installer.Entrypoint = e + } + if project.Uninstaller != nil { + project.Uninstaller.Entrypoint = e + } + return &project, nil +} + +type Project struct { + SpecVersion int `yaml:"$version"` + + Name string `yaml:"name"` + Description string `yaml:"description"` + Installer *hook `yaml:"install,omitempty"` + Uninstaller *hook `yaml:"uninstall,omitempty"` + Main string `yaml:"entrypoint"` + MinPython string `yaml:"min_python"` + Commands []*proxy `yaml:"commands,omitempty"` + + folder string +} + +func (p *Project) IsZipball() bool { + // the simplest way of running the project - download ZIP file from github + return true +} + +func (p *Project) HasPython() bool { + if strings.HasSuffix(p.Main, ".py") { + return true + } + if p.Installer != nil && p.Installer.HasPython() { + return true + } + if p.Uninstaller != nil && p.Uninstaller.HasPython() { + return true + } + return p.MinPython != "" +} + +func (p *Project) metaEntrypoint(ctx context.Context) *Entrypoint { + return &Entrypoint{ + Project: p, + RequireRunningCluster: p.requireRunningCluster(), + } +} + +func (p *Project) requireRunningCluster() bool { + if p.Installer != nil && p.Installer.RequireRunningCluster() { + return true + } + for _, v := range p.Commands { + if v.RequireRunningCluster { + return true + } + } + return false +} + +func (p *Project) fileExists(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +func (p *Project) projectFilePath(ctx context.Context, name string) string { + return filepath.Join(p.EffectiveLibDir(ctx), name) +} + +func (p *Project) IsPythonProject(ctx context.Context) bool { + if p.fileExists(p.projectFilePath(ctx, "setup.py")) { + return true + } + if p.fileExists(p.projectFilePath(ctx, "pyproject.toml")) { + return true + } + return false +} + +func (p *Project) IsDeveloperMode(ctx context.Context) bool { + return p.folder != "" && !strings.HasPrefix(p.LibDir(ctx), p.folder) +} + +func (p *Project) HasFolder() bool { + return p.folder != "" +} + +func (p *Project) HasAccountLevelCommands() bool { + for _, v := range p.Commands { + if v.IsAccountLevel { + return true + } + } + return false +} + +func (p *Project) IsBundleAware() bool { + for _, v := range p.Commands { + if v.IsBundleAware { + return true + } + } + return false +} + +func (p *Project) Register(parent *cobra.Command) { + group := &cobra.Command{ + Use: p.Name, + Short: p.Description, + GroupID: "labs", + } + parent.AddCommand(group) + for _, cp := range p.Commands { + cp.register(group) + cp.Entrypoint.Project = p + } +} + +func (p *Project) rootDir(ctx context.Context) string { + return PathInLabs(ctx, p.Name) +} + +func (p *Project) CacheDir(ctx context.Context) string { + return filepath.Join(p.rootDir(ctx), "cache") +} + +func (p *Project) ConfigDir(ctx context.Context) string { + return filepath.Join(p.rootDir(ctx), "config") +} + +func (p *Project) LibDir(ctx context.Context) string { + return filepath.Join(p.rootDir(ctx), "lib") +} + +func (p *Project) EffectiveLibDir(ctx context.Context) string { + if p.IsDeveloperMode(ctx) { + // developer is working on a local checkout, that is not inside of installed root + return p.folder + } + return p.LibDir(ctx) +} + +func (p *Project) StateDir(ctx context.Context) string { + return filepath.Join(p.rootDir(ctx), "state") +} + +func (p *Project) EnsureFoldersExist(ctx context.Context) error { + dirs := []string{p.CacheDir(ctx), p.ConfigDir(ctx), p.LibDir(ctx), p.StateDir(ctx)} + for _, v := range dirs { + err := os.MkdirAll(v, ownerRWXworldRX) + if err != nil { + return fmt.Errorf("folder %s: %w", v, err) + } + } + return nil +} + +func (p *Project) Uninstall(cmd *cobra.Command) error { + if p.Uninstaller != nil { + err := p.Uninstaller.runHook(cmd) + if err != nil { + return fmt.Errorf("uninstall hook: %w", err) + } + } + ctx := cmd.Context() + log.Infof(ctx, "Removing project: %s", p.Name) + return os.RemoveAll(p.rootDir(ctx)) +} + +func (p *Project) virtualEnvPath(ctx context.Context) string { + if p.IsDeveloperMode(ctx) { + // When a virtual environment has been activated, the VIRTUAL_ENV environment variable + // is set to the path of the environment. Since explicitly activating a virtual environment + // is not required to use it, VIRTUAL_ENV cannot be relied upon to determine whether a virtual + // environment is being used. + // + // See https://docs.python.org/3/library/venv.html#how-venvs-work + activatedVenv := env.Get(ctx, "VIRTUAL_ENV") + if activatedVenv != "" { + logger.Debugf(ctx, "(development mode) using active virtual environment from: %s", activatedVenv) + return activatedVenv + } + nonActivatedVenv, err := python.DetectVirtualEnvPath(p.EffectiveLibDir(ctx)) + if err == nil { + logger.Debugf(ctx, "(development mode) using virtual environment from: %s", nonActivatedVenv) + return nonActivatedVenv + } + } + // by default, we pick Virtual Environment from DATABRICKS_LABS_STATE_DIR + return filepath.Join(p.StateDir(ctx), "venv") +} + +func (p *Project) virtualEnvPython(ctx context.Context) string { + overridePython := env.Get(ctx, "PYTHON_BIN") + if overridePython != "" { + return overridePython + } + if runtime.GOOS == "windows" { + return filepath.Join(p.virtualEnvPath(ctx), "Scripts", "python.exe") + } + return filepath.Join(p.virtualEnvPath(ctx), "bin", "python3") +} + +func (p *Project) loginFile(ctx context.Context) string { + if p.IsDeveloperMode(ctx) { + // developers may not want to pollute the state in + // ~/.databricks/labs/X/config while the version is not yet + // released + return p.projectFilePath(ctx, ".databricks-login.json") + } + return filepath.Join(p.ConfigDir(ctx), "login.json") +} + +func (p *Project) loadLoginConfig(ctx context.Context) (*loginConfig, error) { + loginFile := p.loginFile(ctx) + log.Debugf(ctx, "Loading login configuration from: %s", loginFile) + lc, err := tryLoadAndParseJSON[loginConfig](loginFile) + if err != nil { + return nil, fmt.Errorf("try load: %w", err) + } + lc.Entrypoint = p.metaEntrypoint(ctx) + return lc, nil +} + +func (p *Project) versionFile(ctx context.Context) string { + return filepath.Join(p.StateDir(ctx), "version.json") +} + +func (p *Project) InstalledVersion(ctx context.Context) (*version, error) { + if p.IsDeveloperMode(ctx) { + return &version{ + Version: "*", + Date: time.Now(), + }, nil + } + versionFile := p.versionFile(ctx) + log.Debugf(ctx, "Loading installed version info from: %s", versionFile) + return tryLoadAndParseJSON[version](versionFile) +} + +func (p *Project) writeVersionFile(ctx context.Context, ver string) error { + versionFile := p.versionFile(ctx) + raw, err := json.Marshal(version{ + Version: ver, + Date: time.Now(), + }) + if err != nil { + return err + } + log.Debugf(ctx, "Writing installed version info to: %s", versionFile) + return os.WriteFile(versionFile, raw, ownerRW) +} + +// checkUpdates is called before every command of an installed project, +// giving users hints when they need to update their installations. +func (p *Project) checkUpdates(cmd *cobra.Command) error { + ctx := cmd.Context() + if p.IsDeveloperMode(ctx) { + // skipping update check for projects in developer mode, that + // might not be installed yet + return nil + } + r := github.NewReleaseCache("databrickslabs", p.Name, p.CacheDir(ctx)) + versions, err := r.Load(ctx) + if err != nil { + return err + } + installed, err := p.InstalledVersion(ctx) + if err != nil { + return err + } + latest := versions[0] + if installed.Version == latest.Version { + return nil + } + ago := time.Since(latest.PublishedAt) + msg := "[UPGRADE ADVISED] Newer %s version was released %s ago. Please run `databricks labs upgrade %s` to upgrade: %s -> %s" + cmd.PrintErrln(color.YellowString(msg, p.Name, p.timeAgo(ago), p.Name, installed.Version, latest.Version)) + return nil +} + +func (p *Project) timeAgo(dur time.Duration) string { + days := int(dur.Hours()) / 24 + hours := int(dur.Hours()) % 24 + minutes := int(dur.Minutes()) % 60 + if dur < time.Minute { + return "minute" + } else if dur < time.Hour { + return fmt.Sprintf("%d minutes", minutes) + } else if dur < (24 * time.Hour) { + return fmt.Sprintf("%d hours", hours) + } + return fmt.Sprintf("%d days", days) +} + +func (p *Project) profileOverride(cmd *cobra.Command) string { + profileFlag := cmd.Flag("profile") + if profileFlag == nil { + return "" + } + return profileFlag.Value.String() +} + +type version struct { + Version string `json:"version"` + Date time.Time `json:"date"` +} diff --git a/cmd/labs/project/project_test.go b/cmd/labs/project/project_test.go new file mode 100644 index 00000000..79e69bad --- /dev/null +++ b/cmd/labs/project/project_test.go @@ -0,0 +1,22 @@ +package project + +import ( + "context" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func assertEqualPaths(t *testing.T, expected, actual string) { + expected = strings.ReplaceAll(expected, "/", string(os.PathSeparator)) + assert.Equal(t, expected, actual) +} + +func TestLoad(t *testing.T) { + ctx := context.Background() + prj, err := Load(ctx, "testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml") + assert.NoError(t, err) + assertEqualPaths(t, "testdata/installed-in-home/.databricks/labs/blueprint/lib", prj.folder) +} diff --git a/cmd/labs/project/proxy.go b/cmd/labs/project/proxy.go new file mode 100644 index 00000000..ae7df286 --- /dev/null +++ b/cmd/labs/project/proxy.go @@ -0,0 +1,146 @@ +package project + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/fs" + "path/filepath" + "strings" + + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/process" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type proxy struct { + Entrypoint `yaml:",inline"` + Name string `yaml:"name"` + Description string `yaml:"description"` + TableTemplate string `yaml:"table_template,omitempty"` + Flags []flag `yaml:"flags,omitempty"` +} + +func (cp *proxy) register(parent *cobra.Command) { + cmd := &cobra.Command{ + Use: cp.Name, + Short: cp.Description, + RunE: cp.runE, + } + parent.AddCommand(cmd) + flags := cmd.Flags() + for _, flag := range cp.Flags { + flag.register(flags) + } +} + +func (cp *proxy) runE(cmd *cobra.Command, _ []string) error { + err := cp.checkUpdates(cmd) + if err != nil { + return err + } + args, err := cp.commandInput(cmd) + if err != nil { + return err + } + envs, err := cp.Prepare(cmd) + if err != nil { + return fmt.Errorf("entrypoint: %w", err) + } + ctx := cmd.Context() + log.Debugf(ctx, "Forwarding subprocess: %s", strings.Join(args, " ")) + if cp.TableTemplate != "" { + return cp.renderJsonAsTable(cmd, args, envs) + } + err = process.Forwarded(ctx, args, + cmd.InOrStdin(), + cmd.OutOrStdout(), + cmd.ErrOrStderr(), + process.WithEnvs(envs)) + if errors.Is(err, fs.ErrNotExist) && cp.IsPythonProject(ctx) { + msg := "cannot find Python %s. Please re-run: databricks labs install %s" + return fmt.Errorf(msg, cp.MinPython, cp.Name) + } + return err +} + +// [EXPERIMENTAL] this interface contract may change in the future. +// See https://github.com/databricks/cli/issues/994 +func (cp *proxy) renderJsonAsTable(cmd *cobra.Command, args []string, envs map[string]string) error { + var buf bytes.Buffer + ctx := cmd.Context() + err := process.Forwarded(ctx, args, + cmd.InOrStdin(), + &buf, + cmd.ErrOrStderr(), + process.WithEnvs(envs)) + if err != nil { + return err + } + var anyVal any + err = json.Unmarshal(buf.Bytes(), &anyVal) + if err != nil { + return err + } + // IntelliJ eagerly replaces tabs with spaces, even though we're not asking for it + fixedTemplate := strings.ReplaceAll(cp.TableTemplate, "\\t", "\t") + return cmdio.RenderWithTemplate(ctx, anyVal, fixedTemplate) +} + +func (cp *proxy) commandInput(cmd *cobra.Command) ([]string, error) { + flags := cmd.Flags() + commandInput := struct { + Command string `json:"command"` + Flags map[string]any `json:"flags"` + OutputType string `json:"output_type"` + }{ + Command: cp.Name, + Flags: map[string]any{}, + } + for _, f := range cp.Flags { + v, err := f.get(flags) + if err != nil { + return nil, fmt.Errorf("get %s flag: %w", f.Name, err) + } + commandInput.Flags[f.Name] = v + } + logLevelFlag := flags.Lookup("log-level") + if logLevelFlag != nil { + commandInput.Flags["log_level"] = logLevelFlag.Value.String() + } + args := []string{} + ctx := cmd.Context() + if cp.IsPythonProject(ctx) { + args = append(args, cp.virtualEnvPython(ctx)) + libDir := cp.EffectiveLibDir(cmd.Context()) + entrypoint := filepath.Join(libDir, cp.Main) + args = append(args, entrypoint) + } + raw, err := json.Marshal(commandInput) + if err != nil { + return nil, fmt.Errorf("command input: %w", err) + } + args = append(args, string(raw)) + return args, nil +} + +type flag struct { + Name string `yaml:"name"` + Description string `yaml:"description"` + Default any `yaml:"default,omitempty"` +} + +func (f *flag) register(pf *pflag.FlagSet) { + var dflt string + if f.Default != nil { + dflt = fmt.Sprint(f.Default) + } + pf.String(f.Name, dflt, f.Description) +} + +func (f *flag) get(pf *pflag.FlagSet) (any, error) { + return pf.GetString(f.Name) +} diff --git a/cmd/labs/project/schema.json b/cmd/labs/project/schema.json new file mode 100644 index 00000000..a779b15e --- /dev/null +++ b/cmd/labs/project/schema.json @@ -0,0 +1,126 @@ +{ + "id": "https://raw.githubusercontent.com/databricks/cli/feat/labs/cmd/labs/project/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema", + "definitions": { + "entrypoint": { + "type": "object", + "properties": { + "require_running_cluster": { + "type": "boolean", + "default": false + }, + "is_unauthenticated": { + "type": "boolean", + "default": false + }, + "is_account_level": { + "type": "boolean", + "default": false + }, + "is_bundle_aware": { + "type": "boolean", + "default": false + } + } + }, + "hook": { + "type": "object", + "$ref": "#/definitions/entrypoint", + "unevaluatedProperties": true, + "properties": { + "script": { + "type": "string", + "pattern": "^[A-Za-z0-9_-/\\.]+$" + }, + "min_runtime_version": { + "type": "string", + "pattern": "^[0-9]+.[0-9]+$" + }, + "require_databricks_connect": { + "type": "boolean", + "default": false + }, + "warehouse_types": { + "enum": [ "PRO", "CLASSIC", "TYPE_UNSPECIFIED" ] + } + } + }, + "alphanum": { + "type": "string", + "pattern": "^[a-z0-9-]$" + }, + "command": { + "type": "object", + "$ref": "#/definitions/entrypoint", + "unevaluatedProperties": true, + "required": ["name", "description"], + "properties": { + "name": { + "$ref": "#/definitions/alphanum" + }, + "description": { + "type": "string" + }, + "table_template": { + "type": "string" + }, + "flags": { + "$ref": "#/definitions/flag" + } + } + }, + "flag": { + "type": "object", + "required": ["name", "description"], + "properties": { + "name": { + "$ref": "#/definitions/alphanum" + }, + "description": { + "type": "string" + }, + "default": {} + } + } + }, + "type": "object", + "additionalProperties": false, + "required": ["name", "description", "entrypoint"], + "properties": { + "$version": { + "type": "integer", + "default": 1 + }, + "name": { + "$ref": "#/definitions/alphanum", + "description": "Name of the project" + }, + "description": { + "type": "string", + "description": "Short description of the project" + }, + "entrypoint": { + "type": "string", + "description": "Script that routes subcommands" + }, + "min_python": { + "type": "string", + "pattern": "^3.[0-9]+$", + "description": "Minimal Python version required" + }, + "install": { + "$ref": "#/definitions/hook", + "description": "Installation configuration" + }, + "uninstall": { + "$ref": "#/definitions/hook" + }, + "commands": { + "type": "array", + "description": "Exposed commands", + "items": { + "$ref": "#/definitions/command" + } + } + } +} diff --git a/cmd/labs/project/testdata/.gitignore b/cmd/labs/project/testdata/.gitignore new file mode 100644 index 00000000..bd1711fd --- /dev/null +++ b/cmd/labs/project/testdata/.gitignore @@ -0,0 +1 @@ +!.databricks diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/cache/databrickslabs-blueprint-releases.json b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/cache/databrickslabs-blueprint-releases.json new file mode 100644 index 00000000..87651864 --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/cache/databrickslabs-blueprint-releases.json @@ -0,0 +1,8 @@ +{ + "refreshed_at": "2033-01-01T00:00:00.92857+02:00", + "data": [ + { + "tag_name": "v0.3.15" + } + ] + } diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/config/login.json b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/config/login.json new file mode 100644 index 00000000..7b611ba3 --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/config/login.json @@ -0,0 +1,4 @@ +{ + "workspace_profile": "workspace-profile", + "account_profile": "account-profile" +} diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/install.py b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/install.py new file mode 100644 index 00000000..6873257d --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/install.py @@ -0,0 +1 @@ +print(f'setting up important infrastructure') diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml new file mode 100644 index 00000000..0ac4bf82 --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml @@ -0,0 +1,33 @@ +--- +version: 1 +name: blueprint +description: Blueprint Project +install: + min_runtime_version: 13.1 + require_running_cluster: true + warehouse_types: + - PRO + script: install.py +entrypoint: main.py +min_python: 3.9 +commands: + - name: echo + is_account_level: true + description: non-interactive echo + flags: + - name: first + default: something + description: first flag description + - name: foo + description: foo command + flags: + - name: first + description: first flag description + - name: second + description: second flag description + - name: table + description: something that renders a table + table_template: | + Key Value + {{range .records}}{{.key}} {{.value}} + {{end}} diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/main.py b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/main.py new file mode 100644 index 00000000..769ee73e --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/main.py @@ -0,0 +1,27 @@ +import os, sys, json + +payload = json.loads(sys.argv[1]) + +if 'echo' == payload['command']: + json.dump({ + 'command': payload['command'], + 'flags': payload['flags'], + 'env': {k:v for k,v in os.environ.items()} + }, sys.stdout) + sys.exit(0) + +if 'table' == payload['command']: + sys.stderr.write("some intermediate info\n") + json.dump({'records': [ + {'key': 'First', 'value': 'Second'}, + {'key': 'Third', 'value': 'Fourth'}, + ]}, sys.stdout) + sys.exit(0) + +print(f'host is {os.environ["DATABRICKS_HOST"]}') + +print(f'[{payload["command"]}] command flags are {payload["flags"]}') + +answer = input('What is your name? ') + +print(f'Hello, {answer}!') diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/pyproject.toml b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/pyproject.toml new file mode 100644 index 00000000..d33ab1fb --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/lib/pyproject.toml @@ -0,0 +1,11 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "blueprint" +version = "0.3.15" +description = 'Databricks Labs Blueprint' +requires-python = ">=3.9" +classifiers = ["Programming Language :: Python"] +dependencies = [] diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/other-state-file.json b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/other-state-file.json new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/other-state-file.json @@ -0,0 +1 @@ +{} diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/venv/pyvenv.cfg b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/venv/pyvenv.cfg new file mode 100644 index 00000000..e69de29b diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/version.json b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/version.json new file mode 100644 index 00000000..4bcae155 --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/blueprint/state/version.json @@ -0,0 +1,4 @@ +{ + "version": "v0.3.15", + "date": "2023-10-24T15:04:05+01:00" +} diff --git a/cmd/labs/project/testdata/installed-in-home/.databricks/labs/databrickslabs-repositories.json b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/databrickslabs-repositories.json new file mode 100644 index 00000000..896ebecc --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databricks/labs/databrickslabs-repositories.json @@ -0,0 +1,37 @@ +{ + "refreshed_at": "2033-01-01T00:00:00.92857+02:00", + "data": [ + { + "name": "blueprint", + "description": "Sample project", + "language": "Python", + "default_branch": "main", + "stargazers_count": 100500, + "fork": false, + "archived": false, + "topics": [], + "html_url": "https://github.com/databrickslabs/blueprint", + "clone_url": "https://github.com/databrickslabs/blueprint.git", + "ssh_url": "git@github.com:databrickslabs/blueprint.git", + "license": { + "name": "Other" + } + }, + { + "name": "ucx", + "description": "Unity Catalog Migrations", + "language": "Python", + "default_branch": "main", + "stargazers_count": 100500, + "fork": false, + "archived": false, + "topics": [], + "html_url": "https://github.com/databrickslabs/ucx", + "clone_url": "https://github.com/databrickslabs/ucx.git", + "ssh_url": "git@github.com:databrickslabs/ucx.git", + "license": { + "name": "Other" + } + } + ] +} diff --git a/cmd/labs/project/testdata/installed-in-home/.databrickscfg b/cmd/labs/project/testdata/installed-in-home/.databrickscfg new file mode 100644 index 00000000..ec1bf7bd --- /dev/null +++ b/cmd/labs/project/testdata/installed-in-home/.databrickscfg @@ -0,0 +1,9 @@ +[workspace-profile] +host = https://abc +token = bcd +cluster_id = cde +warehouse_id = def + +[account-profile] +host = https://accounts.cloud.databricks.com +account_id = cde diff --git a/cmd/labs/show.go b/cmd/labs/show.go new file mode 100644 index 00000000..fc9d175c --- /dev/null +++ b/cmd/labs/show.go @@ -0,0 +1,57 @@ +package labs + +import ( + "fmt" + + "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" +) + +func newShowCommand() *cobra.Command { + return &cobra.Command{ + Use: "show NAME", + Args: cobra.ExactArgs(1), + Short: "Shows information about the project", + Annotations: map[string]string{ + "template": cmdio.Heredoc(` + Name: {{.name}} + Description: {{.description}} + Python: {{.is_python}} + + Folders: + - lib: {{.lib_dir}} + - cache: {{.cache_dir}} + - config: {{.config_dir}} + + `), + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + installed, err := project.Installed(ctx) + if err != nil { + return err + } + if len(installed) == 0 { + return fmt.Errorf("no projects found") + } + name := args[0] + for _, v := range installed { + isDev := name == "." && v.IsDeveloperMode(ctx) + isMatch := name == v.Name + if !(isDev || isMatch) { + continue + } + return cmdio.Render(ctx, map[string]any{ + "name": v.Name, + "description": v.Description, + "cache_dir": v.CacheDir(ctx), + "config_dir": v.ConfigDir(ctx), + "lib_dir": v.EffectiveLibDir(ctx), + "is_python": v.IsPythonProject(ctx), + }) + } + return nil + }, + } +} diff --git a/cmd/labs/uninstall.go b/cmd/labs/uninstall.go new file mode 100644 index 00000000..b2c83fff --- /dev/null +++ b/cmd/labs/uninstall.go @@ -0,0 +1,39 @@ +package labs + +import ( + "fmt" + + "github.com/databricks/cli/cmd/labs/project" + "github.com/spf13/cobra" +) + +func newUninstallCommand() *cobra.Command { + return &cobra.Command{ + Use: "uninstall NAME", + Args: cobra.ExactArgs(1), + Short: "Uninstalls project", + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var names []string + installed, _ := project.Installed(cmd.Context()) + for _, v := range installed { + names = append(names, v.Name) + } + return names, cobra.ShellCompDirectiveNoFileComp + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + installed, err := project.Installed(ctx) + if err != nil { + return err + } + name := args[0] + for _, prj := range installed { + if prj.Name != name { + continue + } + return prj.Uninstall(cmd) + } + return fmt.Errorf("not found: %s", name) + }, + } +} diff --git a/cmd/labs/unpack/zipball.go b/cmd/labs/unpack/zipball.go new file mode 100644 index 00000000..d2cfa8c9 --- /dev/null +++ b/cmd/labs/unpack/zipball.go @@ -0,0 +1,64 @@ +package unpack + +import ( + "archive/zip" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +const ownerRWXworldRX = 0o755 + +type GitHubZipball struct { + io.Reader +} + +func (v GitHubZipball) UnpackTo(libTarget string) error { + raw, err := io.ReadAll(v) + if err != nil { + return err + } + zipReader, err := zip.NewReader(bytes.NewReader(raw), int64(len(raw))) + if err != nil { + return fmt.Errorf("zip: %w", err) + } + // GitHub packages entire repo contents into a top-level folder, e.g. databrickslabs-ucx-2800c6b + rootDirInZIP := zipReader.File[0].FileHeader.Name + for _, zf := range zipReader.File { + if zf.Name == rootDirInZIP { + continue + } + normalizedName := strings.TrimPrefix(zf.Name, rootDirInZIP) + targetName := filepath.Join(libTarget, normalizedName) + if zf.FileInfo().IsDir() { + err = os.MkdirAll(targetName, ownerRWXworldRX) + if err != nil { + return fmt.Errorf("mkdir %s: %w", normalizedName, err) + } + continue + } + err = v.extractFile(zf, targetName) + if err != nil { + return fmt.Errorf("extract %s: %w", zf.Name, err) + } + } + return nil +} + +func (v GitHubZipball) extractFile(zf *zip.File, targetName string) error { + reader, err := zf.Open() + if err != nil { + return fmt.Errorf("source: %w", err) + } + defer reader.Close() + writer, err := os.OpenFile(targetName, os.O_CREATE|os.O_RDWR, zf.Mode()) + if err != nil { + return fmt.Errorf("target: %w", err) + } + defer writer.Close() + _, err = io.Copy(writer, reader) + return err +} diff --git a/cmd/labs/upgrade.go b/cmd/labs/upgrade.go new file mode 100644 index 00000000..88b7bc92 --- /dev/null +++ b/cmd/labs/upgrade.go @@ -0,0 +1,21 @@ +package labs + +import ( + "github.com/databricks/cli/cmd/labs/project" + "github.com/spf13/cobra" +) + +func newUpgradeCommand() *cobra.Command { + return &cobra.Command{ + Use: "upgrade NAME", + Args: cobra.ExactArgs(1), + Short: "Upgrades project", + RunE: func(cmd *cobra.Command, args []string) error { + inst, err := project.NewUpgrader(cmd, args[0]) + if err != nil { + return err + } + return inst.Upgrade(cmd.Context()) + }, + } +} From 92539d4b9b7d39205956a69ab3868c856f460775 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 20 Nov 2023 20:25:43 +0100 Subject: [PATCH 242/310] Work around DLT issue with `$PYTHONPATH` not being set correctly (#999) ## Changes DLT currently doesn't always set `$PYTHONPATH` correctly (ES-947370). This restores the original workaround to make new pipelines work while that issue is being addressed. The workaround was removed in #832. Manually tested. --- .../resources/{{.project_name}}_pipeline.yml.tmpl | 3 +++ .../template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl | 1 + 2 files changed, 4 insertions(+) diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl index 498604f6..4b8f74d1 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_pipeline.yml.tmpl @@ -7,3 +7,6 @@ resources: libraries: - notebook: path: ../src/dlt_pipeline.ipynb + + configuration: + bundle.sourcePath: /Workspace/${workspace.file_path}/src diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl index 8c85e97e..4f50294f 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl @@ -35,6 +35,7 @@ "# Import DLT and src/{{.project_name}}\n", "import dlt\n", "import sys\n", + "sys.path.append(spark.conf.get(\"bundle.sourcePath\", \".\"))\n", "from pyspark.sql.functions import expr\n", "from {{.project_name}} import main" {{else}} From fa89db57e9d0f7149e0ee62edf2dc3effcce0a64 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 21 Nov 2023 11:15:09 +0100 Subject: [PATCH 243/310] Enable `spark_jar_task` with local JAR libraries (#993) ## Changes Previously local JAR paths were transformed to remote path during initialisation and thus artifact building logic did not recognise such libraries as local to be handled and uploaded. Now it's possible to use spark_jar_tasks with local JAR libraries on 14.1+ DBR clusters Example configuration ``` bundle: name: spark-jar workspace: host: *** artifacts: my_java_code: path: ./sample-java build: "javac PrintArgs.java && jar cvfm PrintArgs.jar META-INF/MANIFEST.MF PrintArgs.class" files: - source: "/Users/andrew.nester/dabs/wheel/sample-java/PrintArgs.jar" resources: jobs: print_args: name: "Print Args" tasks: - task_key: Print new_cluster: num_workers: 0 spark_version: 14.2.x-scala2.12 node_type_id: i3.xlarge spark_conf: "spark.databricks.cluster.profile": "singleNode" "spark.master": "local[*]" custom_tags: ResourceClass: "SingleNode" spark_jar_task: main_class_name: PrintArgs libraries: - jar: ./sample-java/PrintArgs.jar ``` ## Tests Manually running `bundle deploy and bundle run` --- bundle/config/mutator/translate_paths_jobs.go | 4 ++-- bundle/config/mutator/translate_paths_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index 564b8e02..d920c220 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -46,7 +46,7 @@ func transformWhlLibrary(resource any, dir string) *transformer { dir, &library.Whl, "libraries.whl", - translateNoOp, + translateNoOp, // Does not convert to remote path but makes sure that nested paths resolved correctly } } @@ -88,7 +88,7 @@ func transformJarLibrary(resource any, dir string) *transformer { dir, &library.Jar, "libraries.jar", - translateFilePath, + translateNoOp, // Does not convert to remote path but makes sure that nested paths resolved correctly } } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 41d031ca..67f15d40 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -238,7 +238,7 @@ func TestTranslatePaths(t *testing.T) { ) assert.Equal( t, - "/bundle/dist/task.jar", + filepath.Join("dist", "task.jar"), b.Config.Resources.Jobs["job"].Tasks[5].Libraries[0].Jar, ) assert.Equal( @@ -352,7 +352,7 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { ) assert.Equal( t, - "/bundle/job/dist/task.jar", + filepath.Join("job", "dist", "task.jar"), b.Config.Resources.Jobs["job"].Tasks[1].Libraries[0].Jar, ) assert.Equal( From 48e293c72c047e4428836af1f8e9d201a253237b Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 22 Nov 2023 10:16:28 +0100 Subject: [PATCH 244/310] Pass `USERPROFILE` environment variable to Terraform (#1001) ## Changes It appears that `USERPROFILE` env variable indicates where Azure CLI stores configuration data (aka `.azure` folder). https://learn.microsoft.com/en-us/cli/azure/azure-cli-configuration#cli-configuration-file Passing it to terraform executable allows it to correctly authenticate using Azure CLI. Fixes #983 ## Tests Ran deployment on Window VM before and after the fix. --- bundle/deploy/terraform/init.go | 9 +++-- bundle/deploy/terraform/init_test.go | 49 ++++++++-------------------- 2 files changed, 21 insertions(+), 37 deletions(-) diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index aa1dff74..503a1db2 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -81,6 +81,13 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error { environ["HOME"] = home } + // Include $USERPROFILE in set of environment variables to pass along. + // This variable is used by Azure CLI on Windows to find stored credentials and metadata + userProfile, ok := env.Lookup(ctx, "USERPROFILE") + if ok { + environ["USERPROFILE"] = userProfile + } + // Include $PATH in set of environment variables to pass along. // This is necessary to ensure that our Terraform provider can use the // same auxiliary programs (e.g. `az`, or `gcloud`) as the CLI. @@ -113,8 +120,6 @@ func setTempDirEnvVars(ctx context.Context, environ map[string]string, b *bundle environ["TMP"] = v } else if v, ok := env.Lookup(ctx, "TEMP"); ok { environ["TEMP"] = v - } else if v, ok := env.Lookup(ctx, "USERPROFILE"); ok { - environ["USERPROFILE"] = v } else { tmpDir, err := b.CacheDir(ctx, "tmp") if err != nil { diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index a3a9e0e4..4b00e18e 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -163,36 +163,6 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) { }, env) } -func TestSetTempDirEnvVarsForWindowWithUserProfileSet(t *testing.T) { - if runtime.GOOS != "windows" { - t.SkipNow() - } - - b := &bundle.Bundle{ - Config: config.Root{ - Path: t.TempDir(), - Bundle: config.Bundle{ - Target: "whatever", - }, - }, - } - - // Set environment variables - unsetEnv(t, "TMP") - unsetEnv(t, "TEMP") - t.Setenv("USERPROFILE", "c:\\foo\\c") - - // compute env - env := make(map[string]string, 0) - err := setTempDirEnvVars(context.Background(), env, b) - require.NoError(t, err) - - // assert that we pass through the user profile - assert.Equal(t, map[string]string{ - "USERPROFILE": "c:\\foo\\c", - }, env) -} - func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) { if runtime.GOOS != "windows" { t.SkipNow() @@ -284,9 +254,18 @@ func TestInheritEnvVars(t *testing.T) { require.NoError(t, err) - require.Equal(t, map[string]string{ - "HOME": "/home/testuser", - "PATH": "/foo:/bar", - "TF_CLI_CONFIG_FILE": "/tmp/config.tfrc", - }, env) + require.Equal(t, env["HOME"], "/home/testuser") + require.Equal(t, env["PATH"], "/foo:/bar") + require.Equal(t, env["TF_CLI_CONFIG_FILE"], "/tmp/config.tfrc") +} + +func TestSetUserProfileFromInheritEnvVars(t *testing.T) { + t.Setenv("USERPROFILE", "c:\\foo\\c") + + env := make(map[string]string, 0) + err := inheritEnvVars(context.Background(), env) + require.NoError(t, err) + + assert.Contains(t, env, "USERPROFILE") + assert.Equal(t, env["USERPROFILE"], "c:\\foo\\c") } From d9fe2ab43d621b27b7ed3b4d527791e551968f92 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 22 Nov 2023 13:25:16 +0100 Subject: [PATCH 245/310] Improve error message when path is not a bundle template (#985) Adds better error message when input path is not a bundle template before: ``` shreyas.goenka@THW32HFW6T bricks % cli bundle init ~/bricks Error: open /Users/shreyas.goenka/bricks/databricks_template_schema.json: no such file or directory ``` after: ``` shreyas.goenka@THW32HFW6T bricks % cli bundle init ~/bricks Error: expected to find a template schema file at /Users/shreyas.goenka/bricks/databricks_template_schema.json ``` --- libs/template/materialize.go | 5 +++++ libs/template/materialize_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 libs/template/materialize_test.go diff --git a/libs/template/materialize.go b/libs/template/materialize.go index da0bc45d..811ef925 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -3,6 +3,7 @@ package template import ( "context" "embed" + "fmt" "io/fs" "os" "path" @@ -43,6 +44,10 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st schemaPath := filepath.Join(templateRoot, schemaFileName) helpers := loadHelpers(ctx) + if _, err := os.Stat(schemaPath); os.IsNotExist(err) { + return fmt.Errorf("not a bundle template: expected to find a template schema file at %s", schemaPath) + } + config, err := newConfig(ctx, schemaPath) if err != nil { return err diff --git a/libs/template/materialize_test.go b/libs/template/materialize_test.go new file mode 100644 index 00000000..b4be3fe9 --- /dev/null +++ b/libs/template/materialize_test.go @@ -0,0 +1,24 @@ +package template + +import ( + "context" + "fmt" + "path/filepath" + "testing" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/databricks-sdk-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMaterializeForNonTemplateDirectory(t *testing.T) { + tmpDir := t.TempDir() + w, err := databricks.NewWorkspaceClient(&databricks.Config{}) + require.NoError(t, err) + ctx := root.SetWorkspaceClient(context.Background(), w) + + // Try to materialize a non-template directory. + err = Materialize(ctx, "", tmpDir, "") + assert.EqualError(t, err, fmt.Sprintf("not a bundle template: expected to find a template schema file at %s", filepath.Join(tmpDir, schemaFileName))) +} From 07c4c90772701885e5c10de7c580c443323cb33d Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Thu, 23 Nov 2023 10:04:54 +0100 Subject: [PATCH 246/310] Tolerate missing .databrickscfg file during `databricks auth login` (#1003) ## Changes `databricks configure` creates a new .databrickscfg if one doesn't already exist, but `databricks auth login` fails in this case. Because `databricks auth login` anyways writes out the config file, we gracefully handle this error and continue. ## Tests Unit test. ``` $ ls ~/.databrickscfg* /Users/miles/.databrickscfg.bak $ ./cli auth login Databricks Profile Name: test Databricks Host: https:// Profile test was successfully saved $ ls ~/.databrickscfg* /Users/miles/.databrickscfg /Users/miles/.databrickscfg.bak $ cat ~/.databrickscfg ; The profile defined in the DEFAULT section is to be used as a fallback when no profile is explicitly specified. [DEFAULT] [test] host = https:// auth_type = databricks-cli ``` --- cmd/auth/login.go | 4 +++- cmd/auth/login_test.go | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 cmd/auth/login_test.go diff --git a/cmd/auth/login.go b/cmd/auth/login.go index 8c6d52fc..bbc88c12 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -2,6 +2,7 @@ package auth import ( "context" + "errors" "fmt" "time" @@ -131,7 +132,8 @@ func setHost(ctx context.Context, profileName string, persistentAuth *auth.Persi _, profiles, err := databrickscfg.LoadProfiles(ctx, func(p databrickscfg.Profile) bool { return p.Name == profileName }) - if err != nil { + // Tolerate ErrNoConfiguration here, as we will write out a configuration as part of the login flow. + if !errors.Is(err, databrickscfg.ErrNoConfiguration) { return err } if persistentAuth.Host == "" { diff --git a/cmd/auth/login_test.go b/cmd/auth/login_test.go new file mode 100644 index 00000000..9b834bd0 --- /dev/null +++ b/cmd/auth/login_test.go @@ -0,0 +1,17 @@ +package auth + +import ( + "context" + "testing" + + "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/env" + "github.com/stretchr/testify/assert" +) + +func TestSetHostDoesNotFailWithNoDatabrickscfg(t *testing.T) { + ctx := context.Background() + ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./imaginary-file/databrickscfg") + err := setHost(ctx, "foo", &auth.PersistentAuth{Host: "test"}, []string{}) + assert.NoError(t, err) +} From d985601d3051657c40541e0448c1574e2cb353f7 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 23 Nov 2023 20:56:48 +0100 Subject: [PATCH 247/310] Add `--configure-cluster` flag to configure command (#1005) ## Changes This breaks out the flags into a separate struct to make it easier to pass around. If specified, the flag calls into the `cfgpicker` to prompt for a cluster to associated with the profile. ## Tests Existing tests pass; added one for host validation. --------- Co-authored-by: Miles Yucht --- cmd/configure/configure.go | 137 +++++++++++++++---------------------- cmd/configure/flags.go | 25 +++++++ cmd/configure/host.go | 20 ++++++ cmd/configure/host_test.go | 29 ++++++++ 4 files changed, 130 insertions(+), 81 deletions(-) create mode 100644 cmd/configure/flags.go create mode 100644 cmd/configure/host.go create mode 100644 cmd/configure/host_test.go diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index 55ede538..1c4d2e6b 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -1,70 +1,23 @@ package configure import ( - "context" "fmt" - "net/url" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/databrickscfg/cfgpickers" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" ) -func validateHost(s string) error { - u, err := url.Parse(s) - if err != nil { - return err - } - if u.Host == "" || u.Scheme != "https" { - return fmt.Errorf("must start with https://") - } - if u.Path != "" && u.Path != "/" { - return fmt.Errorf("must use empty path") - } - return nil -} - -func configureFromFlags(cmd *cobra.Command, ctx context.Context, cfg *config.Config) error { - // Configure profile name if set. - profile, err := cmd.Flags().GetString("profile") - if err != nil { - return fmt.Errorf("read --profile flag: %w", err) - } - if profile != "" { - cfg.Profile = profile - } - - // Configure host if set. - host, err := cmd.Flags().GetString("host") - if err != nil { - return fmt.Errorf("read --host flag: %w", err) - } - if host != "" { - cfg.Host = host - } - - // Validate host if set. - if cfg.Host != "" { - err = validateHost(cfg.Host) - if err != nil { - return err - } - } - - return nil -} - -func configureInteractive(cmd *cobra.Command, ctx context.Context, cfg *config.Config) error { - err := configureFromFlags(cmd, ctx, cfg) - if err != nil { - return err - } +func configureInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config.Config) error { + ctx := cmd.Context() // Ask user to specify the host if not already set. if cfg.Host == "" { prompt := cmdio.Prompt(ctx) - prompt.Label = "Databricks Host" + prompt.Label = "Databricks host" prompt.Default = "https://" prompt.AllowEdit = true prompt.Validate = validateHost @@ -78,7 +31,7 @@ func configureInteractive(cmd *cobra.Command, ctx context.Context, cfg *config.C // Ask user to specify the token is not already set. if cfg.Token == "" { prompt := cmdio.Prompt(ctx) - prompt.Label = "Personal Access Token" + prompt.Label = "Personal access token" prompt.Mask = '*' out, err := prompt.Run() if err != nil { @@ -87,19 +40,32 @@ func configureInteractive(cmd *cobra.Command, ctx context.Context, cfg *config.C cfg.Token = out } + // Ask user to specify a cluster if not already set. + if flags.ConfigureCluster && cfg.ClusterID == "" { + w, err := databricks.NewWorkspaceClient((*databricks.Config)(cfg)) + if err != nil { + return err + } + clusterID, err := cfgpickers.AskForCluster(cmd.Context(), w) + if err != nil { + return err + } + cfg.ClusterID = clusterID + } + return nil } -func configureNonInteractive(cmd *cobra.Command, ctx context.Context, cfg *config.Config) error { - err := configureFromFlags(cmd, ctx, cfg) - if err != nil { - return err - } - +func configureNonInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config.Config) error { if cfg.Host == "" { return fmt.Errorf("host must be set in non-interactive mode") } + // Check presence of cluster ID before reading token to fail fast. + if flags.ConfigureCluster && cfg.ClusterID == "" { + return fmt.Errorf("cluster ID must be set in non-interactive mode") + } + // Read token from stdin if not already set. if cfg.Token == "" { _, err := fmt.Fscanf(cmd.InOrStdin(), "%s\n", &cfg.Token) @@ -117,21 +83,16 @@ func newConfigureCommand() *cobra.Command { Short: "Configure authentication", Long: `Configure authentication. - This command adds a profile to your ~/.databrickscfg file. - You can write to a different file by setting the DATABRICKS_CONFIG_FILE environment variable. +This command adds a profile to your ~/.databrickscfg file. +You can write to a different file by setting the DATABRICKS_CONFIG_FILE environment variable. - If this command is invoked in non-interactive mode, it will read the token from stdin. - The host must be specified with the --host flag. +If this command is invoked in non-interactive mode, it will read the token from stdin. +The host must be specified with the --host flag or the DATABRICKS_HOST environment variable. `, } - cmd.Flags().String("host", "", "Databricks workspace host.") - cmd.Flags().String("profile", "DEFAULT", "Name for the connection profile to configure.") - - // Include token flag for compatibility with the legacy CLI. - // It doesn't actually do anything because we always use PATs. - cmd.Flags().Bool("token", true, "Configure using Databricks Personal Access Token") - cmd.Flags().MarkHidden("token") + var flags configureFlags + flags.Register(cmd) cmd.RunE = func(cmd *cobra.Command, args []string) error { var cfg config.Config @@ -142,15 +103,28 @@ func newConfigureCommand() *cobra.Command { return fmt.Errorf("unable to instantiate configuration from environment variables: %w", err) } - ctx := cmd.Context() - interactive := cmdio.IsInTTY(ctx) && cmdio.IsOutTTY(ctx) - var fn func(*cobra.Command, context.Context, *config.Config) error - if interactive { - fn = configureInteractive - } else { - fn = configureNonInteractive + // Populate configuration from flags (if set). + if flags.Host != "" { + cfg.Host = flags.Host + } + if flags.Profile != "" { + cfg.Profile = flags.Profile + } + + // Verify that the host is valid (if set). + if cfg.Host != "" { + err = validateHost(cfg.Host) + if err != nil { + return err + } + } + + ctx := cmd.Context() + if cmdio.IsInTTY(ctx) && cmdio.IsOutTTY(ctx) { + err = configureInteractive(cmd, &flags, &cfg) + } else { + err = configureNonInteractive(cmd, &flags, &cfg) } - err = fn(cmd, ctx, &cfg) if err != nil { return err } @@ -161,9 +135,10 @@ func newConfigureCommand() *cobra.Command { // Save profile to config file. return databrickscfg.SaveToProfile(ctx, &config.Config{ - Profile: cfg.Profile, - Host: cfg.Host, - Token: cfg.Token, + Profile: cfg.Profile, + Host: cfg.Host, + Token: cfg.Token, + ClusterID: cfg.ClusterID, }) } diff --git a/cmd/configure/flags.go b/cmd/configure/flags.go new file mode 100644 index 00000000..80e65026 --- /dev/null +++ b/cmd/configure/flags.go @@ -0,0 +1,25 @@ +package configure + +import ( + "github.com/spf13/cobra" +) + +type configureFlags struct { + Host string + Profile string + + // Flag to request a prompt for cluster configuration. + ConfigureCluster bool +} + +// Register flags with command. +func (f *configureFlags) Register(cmd *cobra.Command) { + cmd.Flags().StringVar(&f.Host, "host", "", "Databricks workspace host.") + cmd.Flags().StringVar(&f.Profile, "profile", "DEFAULT", "Name for the connection profile to configure.") + cmd.Flags().BoolVar(&f.ConfigureCluster, "configure-cluster", false, "Prompts to configure cluster") + + // Include token flag for compatibility with the legacy CLI. + // It doesn't actually do anything because we always use PATs. + cmd.Flags().Bool("token", true, "Configure using Databricks Personal Access Token") + cmd.Flags().MarkHidden("token") +} diff --git a/cmd/configure/host.go b/cmd/configure/host.go new file mode 100644 index 00000000..781c1238 --- /dev/null +++ b/cmd/configure/host.go @@ -0,0 +1,20 @@ +package configure + +import ( + "fmt" + "net/url" +) + +func validateHost(s string) error { + u, err := url.Parse(s) + if err != nil { + return err + } + if u.Host == "" || u.Scheme != "https" { + return fmt.Errorf("must start with https://") + } + if u.Path != "" && u.Path != "/" { + return fmt.Errorf("must use empty path") + } + return nil +} diff --git a/cmd/configure/host_test.go b/cmd/configure/host_test.go new file mode 100644 index 00000000..a4af199d --- /dev/null +++ b/cmd/configure/host_test.go @@ -0,0 +1,29 @@ +package configure + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidateHost(t *testing.T) { + var err error + + // Must start with https:// + err = validateHost("/path") + assert.ErrorContains(t, err, "must start with https://") + err = validateHost("http://host") + assert.ErrorContains(t, err, "must start with https://") + err = validateHost("ftp://host") + + // Must use empty path + assert.ErrorContains(t, err, "must start with https://") + err = validateHost("https://host/path") + assert.ErrorContains(t, err, "must use empty path") + + // Ignore query params + err = validateHost("https://host/?query") + assert.NoError(t, err) + err = validateHost("https://host/") + assert.NoError(t, err) +} From 618780300752c2190c76fb2c667244491e006691 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 24 Nov 2023 12:15:46 +0100 Subject: [PATCH 248/310] Correctly overwrite local state if remote state is newer (#1008) ## Changes A bug in the code that pulls the remote state could cause the local state to be empty instead of a copy of the remote state. This happened only if the local state was present and stale when compared to the remote version. We correctly checked for the state serial to see if the local state had to be replaced but didn't seek back on the remote state before writing it out. Because the staleness check would read the remote state in full, copying from the same reader would immediately yield an EOF. ## Tests * Unit tests for state pull and push mutators that rely on a mocked filer. * An integration test that deploys the same bundle from multiple paths, triggering the staleness logic. Both failed prior to the fix and now pass. --- NOTICE | 4 + bundle/deploy/terraform/filer.go | 14 ++ bundle/deploy/terraform/state_pull.go | 47 ++++-- bundle/deploy/terraform/state_pull_test.go | 128 ++++++++++++++++ bundle/deploy/terraform/state_push.go | 8 +- bundle/deploy/terraform/state_push_test.go | 63 ++++++++ bundle/deploy/terraform/state_test.go | 48 ++++++ bundle/deploy/terraform/util_test.go | 81 ++-------- go.mod | 5 +- go.sum | 2 + .../basic/databricks_template_schema.json | 16 ++ .../basic/template/databricks.yml.tmpl | 18 +++ .../bundles/basic/template/hello_world.py | 1 + internal/bundle/local_state_staleness_test.go | 70 +++++++++ internal/mocks/README.md | 7 + internal/mocks/libs/filer/filer_mock.go | 139 ++++++++++++++++++ libs/filer/filer.go | 6 +- 17 files changed, 572 insertions(+), 85 deletions(-) create mode 100644 bundle/deploy/terraform/filer.go create mode 100644 bundle/deploy/terraform/state_pull_test.go create mode 100644 bundle/deploy/terraform/state_push_test.go create mode 100644 bundle/deploy/terraform/state_test.go create mode 100644 internal/bundle/bundles/basic/databricks_template_schema.json create mode 100644 internal/bundle/bundles/basic/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/basic/template/hello_world.py create mode 100644 internal/bundle/local_state_staleness_test.go create mode 100644 internal/mocks/README.md create mode 100644 internal/mocks/libs/filer/filer_mock.go diff --git a/NOTICE b/NOTICE index bce870f1..7c7eb7db 100644 --- a/NOTICE +++ b/NOTICE @@ -16,6 +16,10 @@ go-ini/ini - https://github.com/go-ini/ini Copyright ini authors License - https://github.com/go-ini/ini/blob/main/LICENSE +uber-go/mock - https://go.uber.org/mock +Copyright Google Inc. +License - https://github.com/uber-go/mock/blob/main/LICENSE + —-- This software contains code from the following open source projects, licensed under the MPL 2.0 license: diff --git a/bundle/deploy/terraform/filer.go b/bundle/deploy/terraform/filer.go new file mode 100644 index 00000000..b1fa5a1b --- /dev/null +++ b/bundle/deploy/terraform/filer.go @@ -0,0 +1,14 @@ +package terraform + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/filer" +) + +// filerFunc is a function that returns a filer.Filer. +type filerFunc func(b *bundle.Bundle) (filer.Filer, error) + +// stateFiler returns a filer.Filer that can be used to read/write state files. +func stateFiler(b *bundle.Bundle) (filer.Filer, error) { + return filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) +} diff --git a/bundle/deploy/terraform/state_pull.go b/bundle/deploy/terraform/state_pull.go index 6dd12ccf..14e8ecf1 100644 --- a/bundle/deploy/terraform/state_pull.go +++ b/bundle/deploy/terraform/state_pull.go @@ -1,6 +1,7 @@ package terraform import ( + "bytes" "context" "errors" "io" @@ -13,14 +14,38 @@ import ( "github.com/databricks/cli/libs/log" ) -type statePull struct{} +type statePull struct { + filerFunc +} func (l *statePull) Name() string { return "terraform:state-pull" } +func (l *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buffer, error) { + // Download state file from filer to local cache directory. + remote, err := f.Read(ctx, TerraformStateFileName) + if err != nil { + // On first deploy this state file doesn't yet exist. + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + + defer remote.Close() + + var buf bytes.Buffer + _, err = io.Copy(&buf, remote) + if err != nil { + return nil, err + } + + return &buf, nil +} + func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { - f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) + f, err := l.filerFunc(b) if err != nil { return err } @@ -32,15 +57,15 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { // Download state file from filer to local cache directory. log.Infof(ctx, "Opening remote state file") - remote, err := f.Read(ctx, TerraformStateFileName) + remote, err := l.remoteState(ctx, f) if err != nil { - // On first deploy this state file doesn't yet exist. - if errors.Is(err, fs.ErrNotExist) { - log.Infof(ctx, "Remote state file does not exist") - return nil - } + log.Infof(ctx, "Unable to open remote state file: %s", err) return err } + if remote == nil { + log.Infof(ctx, "Remote state file does not exist") + return nil + } // Expect the state file to live under dir. local, err := os.OpenFile(filepath.Join(dir, TerraformStateFileName), os.O_CREATE|os.O_RDWR, 0600) @@ -49,7 +74,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { } defer local.Close() - if !IsLocalStateStale(local, remote) { + if !IsLocalStateStale(local, bytes.NewReader(remote.Bytes())) { log.Infof(ctx, "Local state is the same or newer, ignoring remote state") return nil } @@ -60,7 +85,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { // Write file to disk. log.Infof(ctx, "Writing remote state file to local cache directory") - _, err = io.Copy(local, remote) + _, err = io.Copy(local, bytes.NewReader(remote.Bytes())) if err != nil { return err } @@ -69,5 +94,5 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { } func StatePull() bundle.Mutator { - return &statePull{} + return &statePull{stateFiler} } diff --git a/bundle/deploy/terraform/state_pull_test.go b/bundle/deploy/terraform/state_pull_test.go new file mode 100644 index 00000000..60eb5d90 --- /dev/null +++ b/bundle/deploy/terraform/state_pull_test.go @@ -0,0 +1,128 @@ +package terraform + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/fs" + "os" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + mock "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func mockStateFilerForPull(t *testing.T, contents map[string]int, merr error) filer.Filer { + buf, err := json.Marshal(contents) + require.NoError(t, err) + + ctrl := gomock.NewController(t) + mock := mock.NewMockFiler(ctrl) + mock. + EXPECT(). + Read(gomock.Any(), gomock.Eq(TerraformStateFileName)). + Return(io.NopCloser(bytes.NewReader(buf)), merr). + Times(1) + return mock +} + +func statePullTestBundle(t *testing.T) *bundle.Bundle { + return &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Target: "default", + }, + Path: t.TempDir(), + }, + } +} + +func TestStatePullLocalMissingRemoteMissing(t *testing.T) { + m := &statePull{ + identityFiler(mockStateFilerForPull(t, nil, os.ErrNotExist)), + } + + ctx := context.Background() + b := statePullTestBundle(t) + err := bundle.Apply(ctx, b, m) + assert.NoError(t, err) + + // Confirm that no local state file has been written. + _, err = os.Stat(localStateFile(t, ctx, b)) + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestStatePullLocalMissingRemotePresent(t *testing.T) { + m := &statePull{ + identityFiler(mockStateFilerForPull(t, map[string]int{"serial": 5}, nil)), + } + + ctx := context.Background() + b := statePullTestBundle(t) + err := bundle.Apply(ctx, b, m) + assert.NoError(t, err) + + // Confirm that the local state file has been updated. + localState := readLocalState(t, ctx, b) + assert.Equal(t, map[string]int{"serial": 5}, localState) +} + +func TestStatePullLocalStale(t *testing.T) { + m := &statePull{ + identityFiler(mockStateFilerForPull(t, map[string]int{"serial": 5}, nil)), + } + + ctx := context.Background() + b := statePullTestBundle(t) + + // Write a stale local state file. + writeLocalState(t, ctx, b, map[string]int{"serial": 4}) + err := bundle.Apply(ctx, b, m) + assert.NoError(t, err) + + // Confirm that the local state file has been updated. + localState := readLocalState(t, ctx, b) + assert.Equal(t, map[string]int{"serial": 5}, localState) +} + +func TestStatePullLocalEqual(t *testing.T) { + m := &statePull{ + identityFiler(mockStateFilerForPull(t, map[string]int{"serial": 5, "some_other_key": 123}, nil)), + } + + ctx := context.Background() + b := statePullTestBundle(t) + + // Write a local state file with the same serial as the remote. + writeLocalState(t, ctx, b, map[string]int{"serial": 5}) + err := bundle.Apply(ctx, b, m) + assert.NoError(t, err) + + // Confirm that the local state file has not been updated. + localState := readLocalState(t, ctx, b) + assert.Equal(t, map[string]int{"serial": 5}, localState) +} + +func TestStatePullLocalNewer(t *testing.T) { + m := &statePull{ + identityFiler(mockStateFilerForPull(t, map[string]int{"serial": 5, "some_other_key": 123}, nil)), + } + + ctx := context.Background() + b := statePullTestBundle(t) + + // Write a local state file with a newer serial as the remote. + writeLocalState(t, ctx, b, map[string]int{"serial": 6}) + err := bundle.Apply(ctx, b, m) + assert.NoError(t, err) + + // Confirm that the local state file has not been updated. + localState := readLocalState(t, ctx, b) + assert.Equal(t, map[string]int{"serial": 6}, localState) +} diff --git a/bundle/deploy/terraform/state_push.go b/bundle/deploy/terraform/state_push.go index ae1d8b8b..30a43596 100644 --- a/bundle/deploy/terraform/state_push.go +++ b/bundle/deploy/terraform/state_push.go @@ -10,14 +10,16 @@ import ( "github.com/databricks/cli/libs/log" ) -type statePush struct{} +type statePush struct { + filerFunc +} func (l *statePush) Name() string { return "terraform:state-push" } func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { - f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) + f, err := l.filerFunc(b) if err != nil { return err } @@ -45,5 +47,5 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { } func StatePush() bundle.Mutator { - return &statePush{} + return &statePush{stateFiler} } diff --git a/bundle/deploy/terraform/state_push_test.go b/bundle/deploy/terraform/state_push_test.go new file mode 100644 index 00000000..4167b3cb --- /dev/null +++ b/bundle/deploy/terraform/state_push_test.go @@ -0,0 +1,63 @@ +package terraform + +import ( + "context" + "encoding/json" + "io" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + mock "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" +) + +func mockStateFilerForPush(t *testing.T, fn func(body io.Reader)) filer.Filer { + ctrl := gomock.NewController(t) + mock := mock.NewMockFiler(ctrl) + mock. + EXPECT(). + Write(gomock.Any(), gomock.Any(), gomock.Any(), filer.CreateParentDirectories, filer.OverwriteIfExists). + Do(func(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode) error { + fn(reader) + return nil + }). + Return(nil). + Times(1) + return mock +} + +func statePushTestBundle(t *testing.T) *bundle.Bundle { + return &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Target: "default", + }, + Path: t.TempDir(), + }, + } +} + +func TestStatePush(t *testing.T) { + mock := mockStateFilerForPush(t, func(body io.Reader) { + dec := json.NewDecoder(body) + var contents map[string]int + err := dec.Decode(&contents) + assert.NoError(t, err) + assert.Equal(t, map[string]int{"serial": 4}, contents) + }) + + m := &statePush{ + identityFiler(mock), + } + + ctx := context.Background() + b := statePushTestBundle(t) + + // Write a stale local state file. + writeLocalState(t, ctx, b, map[string]int{"serial": 4}) + err := bundle.Apply(ctx, b, m) + assert.NoError(t, err) +} diff --git a/bundle/deploy/terraform/state_test.go b/bundle/deploy/terraform/state_test.go new file mode 100644 index 00000000..ee15b953 --- /dev/null +++ b/bundle/deploy/terraform/state_test.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/require" +) + +// identityFiler returns a filerFunc that returns the specified filer. +func identityFiler(f filer.Filer) filerFunc { + return func(_ *bundle.Bundle) (filer.Filer, error) { + return f, nil + } +} + +func localStateFile(t *testing.T, ctx context.Context, b *bundle.Bundle) string { + dir, err := Dir(ctx, b) + require.NoError(t, err) + return filepath.Join(dir, TerraformStateFileName) +} + +func readLocalState(t *testing.T, ctx context.Context, b *bundle.Bundle) map[string]int { + f, err := os.Open(localStateFile(t, ctx, b)) + require.NoError(t, err) + defer f.Close() + + var contents map[string]int + dec := json.NewDecoder(f) + err = dec.Decode(&contents) + require.NoError(t, err) + return contents +} + +func writeLocalState(t *testing.T, ctx context.Context, b *bundle.Bundle, contents map[string]int) { + f, err := os.Create(localStateFile(t, ctx, b)) + require.NoError(t, err) + defer f.Close() + + enc := json.NewEncoder(f) + err = enc.Encode(contents) + require.NoError(t, err) +} diff --git a/bundle/deploy/terraform/util_test.go b/bundle/deploy/terraform/util_test.go index 1ddfbab3..4f2cf291 100644 --- a/bundle/deploy/terraform/util_test.go +++ b/bundle/deploy/terraform/util_test.go @@ -2,92 +2,39 @@ package terraform import ( "fmt" - "io" + "strings" "testing" "testing/iotest" "github.com/stretchr/testify/assert" ) -type mockedReader struct { - content string -} - -func (r *mockedReader) Read(p []byte) (n int, err error) { - content := []byte(r.content) - n = copy(p, content) - return n, io.EOF -} - func TestLocalStateIsNewer(t *testing.T) { - local := &mockedReader{content: ` -{ - "serial": 5 -} -`} - remote := &mockedReader{content: ` -{ - "serial": 4 -} -`} - - stale := IsLocalStateStale(local, remote) - - assert.False(t, stale) + local := strings.NewReader(`{"serial": 5}`) + remote := strings.NewReader(`{"serial": 4}`) + assert.False(t, IsLocalStateStale(local, remote)) } func TestLocalStateIsOlder(t *testing.T) { - local := &mockedReader{content: ` -{ - "serial": 5 -} -`} - remote := &mockedReader{content: ` -{ - "serial": 6 -} -`} - - stale := IsLocalStateStale(local, remote) - assert.True(t, stale) + local := strings.NewReader(`{"serial": 5}`) + remote := strings.NewReader(`{"serial": 6}`) + assert.True(t, IsLocalStateStale(local, remote)) } func TestLocalStateIsTheSame(t *testing.T) { - local := &mockedReader{content: ` -{ - "serial": 5 -} -`} - remote := &mockedReader{content: ` -{ - "serial": 5 -} -`} - - stale := IsLocalStateStale(local, remote) - assert.False(t, stale) + local := strings.NewReader(`{"serial": 5}`) + remote := strings.NewReader(`{"serial": 5}`) + assert.False(t, IsLocalStateStale(local, remote)) } func TestLocalStateMarkStaleWhenFailsToLoad(t *testing.T) { local := iotest.ErrReader(fmt.Errorf("Random error")) - remote := &mockedReader{content: ` -{ - "serial": 5 -} -`} - - stale := IsLocalStateStale(local, remote) - assert.True(t, stale) + remote := strings.NewReader(`{"serial": 5}`) + assert.True(t, IsLocalStateStale(local, remote)) } func TestLocalStateMarkNonStaleWhenRemoteFailsToLoad(t *testing.T) { - local := &mockedReader{content: ` -{ - "serial": 5 -} -`} + local := strings.NewReader(`{"serial": 5}`) remote := iotest.ErrReader(fmt.Errorf("Random error")) - - stale := IsLocalStateStale(local, remote) - assert.False(t, stale) + assert.False(t, IsLocalStateStale(local, remote)) } diff --git a/go.mod b/go.mod index 7cef4cd4..f703d8b0 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,10 @@ require ( gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) -require gopkg.in/yaml.v3 v3.0.1 +require ( + go.uber.org/mock v0.3.0 + gopkg.in/yaml.v3 v3.0.1 +) require ( cloud.google.com/go/compute v1.23.1 // indirect diff --git a/go.sum b/go.sum index 25409bd6..cff48c8f 100644 --- a/go.sum +++ b/go.sum @@ -158,6 +158,8 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= +go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= diff --git a/internal/bundle/bundles/basic/databricks_template_schema.json b/internal/bundle/bundles/basic/databricks_template_schema.json new file mode 100644 index 00000000..c1c5cf12 --- /dev/null +++ b/internal/bundle/bundles/basic/databricks_template_schema.json @@ -0,0 +1,16 @@ +{ + "properties": { + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" + } + } +} diff --git a/internal/bundle/bundles/basic/template/databricks.yml.tmpl b/internal/bundle/bundles/basic/template/databricks.yml.tmpl new file mode 100644 index 00000000..a88cbd30 --- /dev/null +++ b/internal/bundle/bundles/basic/template/databricks.yml.tmpl @@ -0,0 +1,18 @@ +bundle: + name: basic + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + +resources: + jobs: + foo: + name: test-job-basic-{{.unique_id}} + tasks: + - task_key: my_notebook_task + new_cluster: + num_workers: 1 + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + spark_python_task: + python_file: ./hello_world.py diff --git a/internal/bundle/bundles/basic/template/hello_world.py b/internal/bundle/bundles/basic/template/hello_world.py new file mode 100644 index 00000000..f301245e --- /dev/null +++ b/internal/bundle/bundles/basic/template/hello_world.py @@ -0,0 +1 @@ +print("Hello World!") diff --git a/internal/bundle/local_state_staleness_test.go b/internal/bundle/local_state_staleness_test.go new file mode 100644 index 00000000..06cfe0e0 --- /dev/null +++ b/internal/bundle/local_state_staleness_test.go @@ -0,0 +1,70 @@ +package bundle + +import ( + "context" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccLocalStateStaleness(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + // The approach for this test is as follows: + // 1) First deploy of bundle instance A + // 2) First deploy of bundle instance B + // 3) Second deploy of bundle instance A + // Because of deploy (2), the locally cached state of bundle instance A should be stale. + // Then for deploy (3), it must use the remote state over the stale local state. + + nodeTypeId := internal.GetNodeTypeId(env) + uniqueId := uuid.New().String() + initialize := func() string { + root, err := initTestTemplate(t, "basic", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": "13.2.x-snapshot-scala2.12", + }) + require.NoError(t, err) + + t.Cleanup(func() { + err = destroyBundle(t, root) + require.NoError(t, err) + }) + + return root + } + + bundleA := initialize() + bundleB := initialize() + + // 1) Deploy bundle A + err = deployBundle(t, bundleA) + require.NoError(t, err) + + // 2) Deploy bundle B + err = deployBundle(t, bundleB) + require.NoError(t, err) + + // 3) Deploy bundle A again + err = deployBundle(t, bundleA) + require.NoError(t, err) + + // Assert that there is only a single job in the workspace corresponding to this bundle. + iter := w.Jobs.List(context.Background(), jobs.ListJobsRequest{ + Name: "test-job-basic-" + uniqueId, + }) + jobs, err := listing.ToSlice(context.Background(), iter) + require.NoError(t, err) + assert.Len(t, jobs, 1) +} diff --git a/internal/mocks/README.md b/internal/mocks/README.md new file mode 100644 index 00000000..231bbfaa --- /dev/null +++ b/internal/mocks/README.md @@ -0,0 +1,7 @@ +# Interface mocking + +Use this directory to store mocks for interfaces in this repository. + +Please use the same package structure for the mocks as the interface it is mocking. + +See https://github.com/uber-go/mock for more information on how to generate mocks. diff --git a/internal/mocks/libs/filer/filer_mock.go b/internal/mocks/libs/filer/filer_mock.go new file mode 100644 index 00000000..ef00976a --- /dev/null +++ b/internal/mocks/libs/filer/filer_mock.go @@ -0,0 +1,139 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/databricks/cli/libs/filer (interfaces: Filer) +// +// Generated by this command: +// +// mockgen -destination filer_mock.go github.com/databricks/cli/libs/filer Filer +// +// Package mock_filer is a generated GoMock package. +package mock_filer + +import ( + context "context" + io "io" + fs "io/fs" + reflect "reflect" + + filer "github.com/databricks/cli/libs/filer" + gomock "go.uber.org/mock/gomock" +) + +// MockFiler is a mock of Filer interface. +type MockFiler struct { + ctrl *gomock.Controller + recorder *MockFilerMockRecorder +} + +// MockFilerMockRecorder is the mock recorder for MockFiler. +type MockFilerMockRecorder struct { + mock *MockFiler +} + +// NewMockFiler creates a new mock instance. +func NewMockFiler(ctrl *gomock.Controller) *MockFiler { + mock := &MockFiler{ctrl: ctrl} + mock.recorder = &MockFilerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFiler) EXPECT() *MockFilerMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockFiler) Delete(arg0 context.Context, arg1 string, arg2 ...filer.DeleteMode) error { + m.ctrl.T.Helper() + varargs := []any{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Delete", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockFilerMockRecorder) Delete(arg0, arg1 any, arg2 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockFiler)(nil).Delete), varargs...) +} + +// Mkdir mocks base method. +func (m *MockFiler) Mkdir(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Mkdir", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Mkdir indicates an expected call of Mkdir. +func (mr *MockFilerMockRecorder) Mkdir(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mkdir", reflect.TypeOf((*MockFiler)(nil).Mkdir), arg0, arg1) +} + +// Read mocks base method. +func (m *MockFiler) Read(arg0 context.Context, arg1 string) (io.ReadCloser, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Read", arg0, arg1) + ret0, _ := ret[0].(io.ReadCloser) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Read indicates an expected call of Read. +func (mr *MockFilerMockRecorder) Read(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockFiler)(nil).Read), arg0, arg1) +} + +// ReadDir mocks base method. +func (m *MockFiler) ReadDir(arg0 context.Context, arg1 string) ([]fs.DirEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadDir", arg0, arg1) + ret0, _ := ret[0].([]fs.DirEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadDir indicates an expected call of ReadDir. +func (mr *MockFilerMockRecorder) ReadDir(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDir", reflect.TypeOf((*MockFiler)(nil).ReadDir), arg0, arg1) +} + +// Stat mocks base method. +func (m *MockFiler) Stat(arg0 context.Context, arg1 string) (fs.FileInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stat", arg0, arg1) + ret0, _ := ret[0].(fs.FileInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stat indicates an expected call of Stat. +func (mr *MockFilerMockRecorder) Stat(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockFiler)(nil).Stat), arg0, arg1) +} + +// Write mocks base method. +func (m *MockFiler) Write(arg0 context.Context, arg1 string, arg2 io.Reader, arg3 ...filer.WriteMode) error { + m.ctrl.T.Helper() + varargs := []any{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Write", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Write indicates an expected call of Write. +func (mr *MockFilerMockRecorder) Write(arg0, arg1, arg2 any, arg3 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockFiler)(nil).Write), varargs...) +} diff --git a/libs/filer/filer.go b/libs/filer/filer.go index 8267dc34..c1c747c5 100644 --- a/libs/filer/filer.go +++ b/libs/filer/filer.go @@ -10,14 +10,14 @@ import ( type WriteMode int const ( - OverwriteIfExists WriteMode = iota - CreateParentDirectories = iota << 1 + OverwriteIfExists WriteMode = 1 << iota + CreateParentDirectories ) type DeleteMode int const ( - DeleteRecursively DeleteMode = iota + DeleteRecursively DeleteMode = 1 << iota ) type FileAlreadyExistsError struct { From ef97e249ec1ff6907798a6525905df4f9892d3a7 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 24 Nov 2023 14:21:47 +0100 Subject: [PATCH 249/310] Add function to check if `config.Value` is valid (#1009) ## Changes Small function broken out from other work in progress. --- libs/config/value.go | 4 ++++ libs/config/value_test.go | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/libs/config/value.go b/libs/config/value.go index c77f8147..fe0ced9b 100644 --- a/libs/config/value.go +++ b/libs/config/value.go @@ -51,6 +51,10 @@ func (v Value) Location() Location { return v.l } +func (v Value) IsValid() bool { + return v.k != KindInvalid +} + func (v Value) AsAny() any { switch v.k { case KindInvalid: diff --git a/libs/config/value_test.go b/libs/config/value_test.go index cb8ef16a..6c8befc7 100644 --- a/libs/config/value_test.go +++ b/libs/config/value_test.go @@ -35,3 +35,10 @@ func TestValueAsMap(t *testing.T) { assert.True(t, ok) assert.Len(t, m, 1) } + +func TestValueIsValid(t *testing.T) { + var zeroValue config.Value + assert.False(t, zeroValue.IsValid()) + var intValue = config.NewValue(1, config.Location{}) + assert.True(t, intValue.IsValid()) +} From f5f57b6bf92c5500ac6575efbd2551fff6d5519d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 27 Nov 2023 11:06:29 +0100 Subject: [PATCH 250/310] Populate struct field with `config.Value` instance if possible (#1010) ## Changes If a struct has a field of type `config.Value`, then we set it to the source value while converting a `config.Value` instance to a struct as part of a call to `convert.ToTyped`. This is convenient when dealing with deeply nested structs where functions on inner structs need access to the metadata provided by their corresponding `config.Value` (e.g. where they were defined). ## Tests Added unit tests pass. --- libs/config/convert/struct_info.go | 18 +++++++++++++++ libs/config/convert/struct_info_test.go | 30 +++++++++++++++++++++++++ libs/config/convert/to_typed.go | 6 +++++ libs/config/convert/to_typed_test.go | 18 +++++++++++++++ 4 files changed, 72 insertions(+) diff --git a/libs/config/convert/struct_info.go b/libs/config/convert/struct_info.go index 2457b3c2..80cfabb6 100644 --- a/libs/config/convert/struct_info.go +++ b/libs/config/convert/struct_info.go @@ -4,6 +4,8 @@ import ( "reflect" "strings" "sync" + + "github.com/databricks/cli/libs/config" ) // structInfo holds the type information we need to efficiently @@ -11,6 +13,10 @@ import ( type structInfo struct { // Fields maps the JSON-name of the field to the field's index for use with [FieldByIndex]. Fields map[string][]int + + // ValueField maps to the field with a [config.Value]. + // The underlying type is expected to only have one of these. + ValueField []int } // structInfoCache caches type information. @@ -68,6 +74,15 @@ func buildStructInfo(typ reflect.Type) structInfo { continue } + // If this field has type [config.Value], we populate it with the source [config.Value] from [ToTyped]. + if sf.IsExported() && sf.Type == configValueType { + if out.ValueField != nil { + panic("multiple config.Value fields") + } + out.ValueField = append(prefix, sf.Index...) + continue + } + name, _, _ := strings.Cut(sf.Tag.Get("json"), ",") if name == "" || name == "-" { continue @@ -113,3 +128,6 @@ func (s *structInfo) FieldValues(v reflect.Value) map[string]reflect.Value { return out } + +// Type of [config.Value]. +var configValueType = reflect.TypeOf((*config.Value)(nil)).Elem() diff --git a/libs/config/convert/struct_info_test.go b/libs/config/convert/struct_info_test.go index 2e31adac..685679ae 100644 --- a/libs/config/convert/struct_info_test.go +++ b/libs/config/convert/struct_info_test.go @@ -4,6 +4,7 @@ import ( "reflect" "testing" + "github.com/databricks/cli/libs/config" "github.com/stretchr/testify/assert" ) @@ -194,3 +195,32 @@ func TestStructInfoFieldValuesAnonymousByPointer(t *testing.T) { assert.Empty(t, fv) }) } + +func TestStructInfoValueFieldAbsent(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + assert.Nil(t, si.ValueField) +} + +func TestStructInfoValueFieldPresent(t *testing.T) { + type Tmp struct { + Foo config.Value + } + + si := getStructInfo(reflect.TypeOf(Tmp{})) + assert.NotNil(t, si.ValueField) +} + +func TestStructInfoValueFieldMultiple(t *testing.T) { + type Tmp struct { + Foo config.Value + Bar config.Value + } + + assert.Panics(t, func() { + getStructInfo(reflect.TypeOf(Tmp{})) + }) +} diff --git a/libs/config/convert/to_typed.go b/libs/config/convert/to_typed.go index ca09fce4..8c43d974 100644 --- a/libs/config/convert/to_typed.go +++ b/libs/config/convert/to_typed.go @@ -83,6 +83,12 @@ func toTypedStruct(dst reflect.Value, src config.Value) error { } } + // Populate field(s) for [config.Value], if any. + if info.ValueField != nil { + vv := dst.FieldByIndex(info.ValueField) + vv.Set(reflect.ValueOf(src)) + } + return nil case config.KindNil: dst.SetZero() diff --git a/libs/config/convert/to_typed_test.go b/libs/config/convert/to_typed_test.go index 26e17dcc..2845bdda 100644 --- a/libs/config/convert/to_typed_test.go +++ b/libs/config/convert/to_typed_test.go @@ -133,6 +133,24 @@ func TestToTypedStructNilOverwrite(t *testing.T) { assert.Equal(t, Tmp{}, out) } +func TestToTypedStructWithValueField(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + + ConfigValue config.Value + } + + var out Tmp + v := config.V(map[string]config.Value{ + "foo": config.V("bar"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, "bar", out.Foo) + assert.Equal(t, v, out.ConfigValue) +} + func TestToTypedMap(t *testing.T) { var out = map[string]string{} From 96e9545cf08b8feb46decc5ab3f6b4b0fd7220cd Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 27 Nov 2023 11:42:39 +0100 Subject: [PATCH 251/310] Automate the generation of bundle schema descriptions (#1007) ## Changes This PR makes changes required to automatically update the bundle docs during the CLI release process. We rely on `post_generate` scripts that are executed after code generation with CWD as the CLI repo root. The new `output-file` flag is introduced because stdout redirect does not work here and would otherwise require changes to our release automation CLI (deco CLI) ## Tests Manually. Regenerated the CLI and the descriptions were indeed generated for the CLI from the provided openapi spec. --- .codegen.json | 5 ++++- cmd/bundle/schema.go | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/.codegen.json b/.codegen.json index da4f3dd6..7e688a8c 100644 --- a/.codegen.json +++ b/.codegen.json @@ -8,6 +8,9 @@ ".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go" }, "toolchain": { - "required": ["go"] + "required": ["go"], + "post_generate": [ + "go run main.go bundle schema --only-docs --output-file ./bundle/schema/docs/bundle_descriptions.json" + ] } } diff --git a/cmd/bundle/schema.go b/cmd/bundle/schema.go index 8b2c0177..ec817037 100644 --- a/cmd/bundle/schema.go +++ b/cmd/bundle/schema.go @@ -2,6 +2,7 @@ package bundle import ( "encoding/json" + "os" "reflect" "github.com/databricks/cli/bundle/config" @@ -16,11 +17,18 @@ func newSchemaCommand() *cobra.Command { } var openapi string + var outputFile string var onlyDocs bool cmd.Flags().StringVar(&openapi, "openapi", "", "path to a databricks openapi spec") cmd.Flags().BoolVar(&onlyDocs, "only-docs", false, "only generate descriptions for the schema") + cmd.Flags().StringVar(&outputFile, "output-file", "", "File path to write the schema to. If not specified, the schema will be written to stdout.") cmd.RunE = func(cmd *cobra.Command, args []string) error { + // If no openapi spec is provided, try to use the environment variable. + // This environment variable is set during CLI code generation. + if openapi == "" { + openapi = os.Getenv("DATABRICKS_OPENAPI_SPEC") + } docs, err := schema.BundleDocs(openapi) if err != nil { return err @@ -39,6 +47,16 @@ func newSchemaCommand() *cobra.Command { return err } } + + // If outputFile is provided, write to that file. + if outputFile != "" { + f, err := os.OpenFile(outputFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + cmd.SetOut(f) + } cmd.OutOrStdout().Write(result) return nil } From dd1d540429150a848425835df615ac38cc4914b2 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 28 Nov 2023 10:04:06 +0100 Subject: [PATCH 252/310] Add mlops-stacks to the default `databricks bundle init` prompt (#988) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This makes mlops-stacks more discoverable and makes the UX of initialising the mlops-stack template better. ## Tests Manually Dropdown UI: ``` shreyas.goenka@THW32HFW6T projects % cli bundle init Template to use: ▸ default-python mlops-stacks ``` Help message: ``` shreyas.goenka@THW32HFW6T bricks % cli bundle init -h Initialize using a bundle template. TEMPLATE_PATH optionally specifies which template to use. It can be one of the following: - default-python: The default Python template - mlops-stacks: The Databricks MLOps Stacks template. More information can be found at: https://github.com/databricks/mlops-stacks ``` --- cmd/bundle/init.go | 66 ++++++++++++++++++++++++++++++++++------- cmd/bundle/init_test.go | 15 ++++++++++ 2 files changed, 71 insertions(+), 10 deletions(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index ccdc6859..ac6f49de 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -2,8 +2,10 @@ package bundle import ( "errors" + "fmt" "os" "path/filepath" + "slices" "strings" "github.com/databricks/cli/cmd/root" @@ -18,9 +20,52 @@ var gitUrlPrefixes = []string{ "git@", } -var aliasedTemplates = map[string]string{ - "mlops-stack": "https://github.com/databricks/mlops-stacks", - "mlops-stacks": "https://github.com/databricks/mlops-stacks", +type nativeTemplate struct { + name string + gitUrl string + description string + aliases []string +} + +var nativeTemplates = []nativeTemplate{ + { + name: "default-python", + description: "The default Python template", + }, + { + name: "mlops-stacks", + gitUrl: "https://github.com/databricks/mlops-stacks", + description: "The Databricks MLOps Stacks template (https://github.com/databricks/mlops-stacks)", + aliases: []string{"mlops-stack"}, + }, +} + +func nativeTemplateDescriptions() string { + var lines []string + for _, template := range nativeTemplates { + lines = append(lines, fmt.Sprintf("- %s: %s", template.name, template.description)) + } + return strings.Join(lines, "\n") +} + +func nativeTemplateOptions() []string { + names := make([]string, 0, len(nativeTemplates)) + for _, template := range nativeTemplates { + names = append(names, template.name) + } + return names +} + +func getUrlForNativeTemplate(name string) string { + for _, template := range nativeTemplates { + if template.name == name { + return template.gitUrl + } + if slices.Contains(template.aliases, name) { + return template.gitUrl + } + } + return "" } func isRepoUrl(url string) bool { @@ -47,14 +92,14 @@ func newInitCommand() *cobra.Command { Use: "init [TEMPLATE_PATH]", Short: "Initialize using a bundle template", Args: cobra.MaximumNArgs(1), - Long: `Initialize using a bundle template. + Long: fmt.Sprintf(`Initialize using a bundle template. TEMPLATE_PATH optionally specifies which template to use. It can be one of the following: -- 'default-python' for the default Python template +%s - a local file system path with a template directory - a Git repository URL, e.g. https://github.com/my/repository -See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more information on templates.`, +See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more information on templates.`, nativeTemplateDescriptions()), } var configFile string @@ -89,15 +134,16 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf if !cmdio.IsOutTTY(ctx) || !cmdio.IsInTTY(ctx) { return errors.New("please specify a template") } - templatePath, err = cmdio.Ask(ctx, "Template to use", "default-python") + templatePath, err = cmdio.AskSelect(ctx, "Template to use", nativeTemplateOptions()) if err != nil { return err } } - // Expand templatePath if it's an alias for a known template - if _, ok := aliasedTemplates[templatePath]; ok { - templatePath = aliasedTemplates[templatePath] + // Expand templatePath to a git URL if it's an alias for a known native template + // and we know it's git URL. + if gitUrl := getUrlForNativeTemplate(templatePath); gitUrl != "" { + templatePath = gitUrl } if !isRepoUrl(templatePath) { diff --git a/cmd/bundle/init_test.go b/cmd/bundle/init_test.go index 4a795160..db4446bb 100644 --- a/cmd/bundle/init_test.go +++ b/cmd/bundle/init_test.go @@ -25,3 +25,18 @@ func TestBundleInitRepoName(t *testing.T) { assert.Equal(t, "invalid-url", repoName("invalid-url")) assert.Equal(t, "www.github.com", repoName("https://www.github.com")) } + +func TestNativeTemplateOptions(t *testing.T) { + assert.Equal(t, []string{"default-python", "mlops-stacks"}, nativeTemplateOptions()) +} + +func TestNativeTemplateDescriptions(t *testing.T) { + assert.Equal(t, "- default-python: The default Python template\n- mlops-stacks: The Databricks MLOps Stacks template (https://github.com/databricks/mlops-stacks)", nativeTemplateDescriptions()) +} + +func TestGetUrlForNativeTemplate(t *testing.T) { + assert.Equal(t, "https://github.com/databricks/mlops-stacks", getUrlForNativeTemplate("mlops-stacks")) + assert.Equal(t, "https://github.com/databricks/mlops-stacks", getUrlForNativeTemplate("mlops-stack")) + assert.Equal(t, "", getUrlForNativeTemplate("default-python")) + assert.Equal(t, "", getUrlForNativeTemplate("invalid")) +} From 1932da0a87ff67a79ebc708dfd769a0b3c7ecacd Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Tue, 28 Nov 2023 11:50:16 +0100 Subject: [PATCH 253/310] Update cli version in the vscode extension during release (#1014) Similar to how we do it for setup-cli and homebrew-tap repos. The PR on the extension side: https://github.com/databricks/databricks-vscode/pull/948 --- .github/workflows/release.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a7901dae..ea9e4690 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -93,3 +93,27 @@ jobs: linux_arm64_sha: artifacts.get('linux_arm64') } }); + + create-vscode-extension-update-pr: + needs: goreleaser + runs-on: ubuntu-latest + steps: + - name: Set VERSION variable from tag + run: | + VERSION=${{ github.ref_name }} + echo "VERSION=${VERSION:1}" >> $GITHUB_ENV + + - name: Update CLI version in the VSCode extension + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.DECO_GITHUB_TOKEN }} + script: | + await github.rest.actions.createWorkflowDispatch({ + owner: 'databricks', + repo: 'databricks-vscode', + workflow_id: 'update-cli-version.yml', + ref: 'main', + inputs: { + version: "${{ env.VERSION }}", + } + }); From b5f34a118127e49e46f4614cb1fa2bcfe7e699a2 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 28 Nov 2023 17:08:27 +0100 Subject: [PATCH 254/310] Removed unused `ToHttpsUrl` method and corresponding library (#1017) ## Changes Removed unused ToHttpsUrl method and corresponding library --- go.mod | 1 - go.sum | 2 -- libs/git/utils.go | 28 ---------------------------- libs/git/utils_test.go | 22 ---------------------- 4 files changed, 53 deletions(-) delete mode 100644 libs/git/utils.go delete mode 100644 libs/git/utils_test.go diff --git a/go.mod b/go.mod index f703d8b0..b70341cd 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,6 @@ require ( github.com/spf13/cobra v1.8.0 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.8.4 // MIT - github.com/whilp/git-urls v1.0.0 // MIT golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/mod v0.14.0 golang.org/x/oauth2 v0.14.0 diff --git a/go.sum b/go.sum index cff48c8f..8f0f4157 100644 --- a/go.sum +++ b/go.sum @@ -149,8 +149,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/whilp/git-urls v1.0.0 h1:95f6UMWN5FKW71ECsXRUd3FVYiXdrE7aX4NZKcPmIjU= -github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgwk/qCfE= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= diff --git a/libs/git/utils.go b/libs/git/utils.go deleted file mode 100644 index 1d38da3a..00000000 --- a/libs/git/utils.go +++ /dev/null @@ -1,28 +0,0 @@ -package git - -import ( - "strings" - - giturls "github.com/whilp/git-urls" -) - -// Return an origin URL as an HTTPS URL. -// The transformations in this function are not guaranteed to work for all -// Git providers. They are only guaranteed to work for GitHub. -func ToHttpsUrl(url string) (string, error) { - origin, err := giturls.Parse(url) - if err != nil { - return "", err - } - // If this repository is checked out over SSH - if origin.Scheme != "https" { - origin.Scheme = "https" - } - // Basic auth is not applicable for an HTTPS URL. - if origin.User != nil { - origin.User = nil - } - // Remove `.git` suffix, if present. - origin.Path = strings.TrimSuffix(origin.Path, ".git") - return origin.String(), nil -} diff --git a/libs/git/utils_test.go b/libs/git/utils_test.go deleted file mode 100644 index 2a77cae1..00000000 --- a/libs/git/utils_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package git - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestToHttpsUrlForSsh(t *testing.T) { - for _, e := range []struct { - url string - expected string - }{ - {"user@foo.com:org/repo-name.git", "https://foo.com/org/repo-name"}, - {"git@github.com:databricks/cli.git", "https://github.com/databricks/cli"}, - {"https://github.com/databricks/cli.git", "https://github.com/databricks/cli"}, - } { - url, err := ToHttpsUrl(e.url) - assert.NoError(t, err) - assert.Equal(t, e.expected, url) - } -} From 5f88af54fd942f22563a5e7a0e41d964b02eaac0 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 29 Nov 2023 10:53:07 +0100 Subject: [PATCH 255/310] Revert automation for bundle schema documentation generation (#1018) ## Changes Introduced in #1007 but doesn't work well yet. This will be automated again as part of #1012. --- .codegen.json | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.codegen.json b/.codegen.json index 7e688a8c..da4f3dd6 100644 --- a/.codegen.json +++ b/.codegen.json @@ -8,9 +8,6 @@ ".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go" }, "toolchain": { - "required": ["go"], - "post_generate": [ - "go run main.go bundle schema --only-docs --output-file ./bundle/schema/docs/bundle_descriptions.json" - ] + "required": ["go"] } } From 5431174302a9a4461726ec9475c6c221890a0dd5 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 29 Nov 2023 11:40:12 +0100 Subject: [PATCH 256/310] Do not add wheel content hash in uploaded Python wheel path (#1015) ## Changes Removed hash from the upload path since it's not useful anyway. The main reason for that change was to make it work on all-purpose clusters. But in order to make it work, wheel version needs to be increased anyway. So having only hash in path is useless. Note: using --build-number (build tag) flag does not help with re-installing libraries on all-purpose clusters. The reason is that `pip` ignoring build tag when upgrading the library and only look at wheel version. Build tag is only used for sorting the versions and the one with higher build tag takes priority when installed. It only works if no library is installed. See https://github.com/pypa/pip/blob/a15dd75d98884c94a77d349b800c7c755d8c34e4/src/pip/_internal/index/package_finder.py#L522-L556 https://github.com/pypa/pip/issues/4781 Thus, the only way to reinstall the library on all-purpose cluster is to increase wheel version manually or use automatic version generation, f.e. ``` setup( version=datetime.datetime.utcnow().strftime("%Y%m%d.%H%M%S"), ... ) ``` ## Tests Integration tests passed. --- bundle/artifacts/artifacts.go | 38 +++++++++++------------------- bundle/artifacts/whl/autodetect.go | 6 ++--- bundle/artifacts/whl/build.go | 8 +++---- bundle/artifacts/whl/infer.go | 15 ++++++++++++ internal/bundle/artifacts_test.go | 4 ++-- 5 files changed, 38 insertions(+), 33 deletions(-) diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index e703668e..dd261d3b 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -3,7 +3,6 @@ package artifacts import ( "bytes" "context" - "crypto/sha256" "errors" "fmt" "os" @@ -62,13 +61,13 @@ func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) error { return fmt.Errorf("artifact doesn't exist: %s", m.name) } - cmdio.LogString(ctx, fmt.Sprintf("artifacts.Build(%s): Building...", m.name)) + cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name)) out, err := artifact.Build(ctx) if err != nil { - return fmt.Errorf("artifacts.Build(%s): %w, output: %s", m.name, err, out) + return fmt.Errorf("build for %s failed, error: %w, output: %s", m.name, err, out) } - cmdio.LogString(ctx, fmt.Sprintf("artifacts.Build(%s): Build succeeded", m.name)) + cmdio.LogString(ctx, "Build succeeded") return nil } @@ -108,7 +107,7 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error { err = uploadArtifact(ctx, artifact, uploadPath, client) if err != nil { - return fmt.Errorf("artifacts.Upload(%s): %w", m.name, err) + return fmt.Errorf("upload for %s failed, error: %w", m.name, err) } return nil @@ -119,15 +118,14 @@ func uploadArtifact(ctx context.Context, a *config.Artifact, uploadPath string, f := &a.Files[i] if f.NeedsUpload() { filename := filepath.Base(f.Source) - cmdio.LogString(ctx, fmt.Sprintf("artifacts.Upload(%s): Uploading...", filename)) + cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename)) - remotePath, err := uploadArtifactFile(ctx, f.Source, uploadPath, client) + err := uploadArtifactFile(ctx, f.Source, client) if err != nil { return err } - cmdio.LogString(ctx, fmt.Sprintf("artifacts.Upload(%s): Upload succeeded", filename)) - - f.RemotePath = remotePath + cmdio.LogString(ctx, "Upload succeeded") + f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) } } @@ -136,27 +134,19 @@ func uploadArtifact(ctx context.Context, a *config.Artifact, uploadPath string, } // Function to upload artifact file to Workspace -func uploadArtifactFile(ctx context.Context, file string, uploadPath string, client filer.Filer) (string, error) { +func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error { raw, err := os.ReadFile(file) if err != nil { - return "", fmt.Errorf("unable to read %s: %w", file, errors.Unwrap(err)) + return fmt.Errorf("unable to read %s: %w", file, errors.Unwrap(err)) } - fileHash := sha256.Sum256(raw) - relPath := path.Join(fmt.Sprintf("%x", fileHash), filepath.Base(file)) - remotePath := path.Join(uploadPath, relPath) - - err = client.Mkdir(ctx, path.Dir(relPath)) + filename := filepath.Base(file) + err = client.Write(ctx, filename, bytes.NewReader(raw), filer.OverwriteIfExists, filer.CreateParentDirectories) if err != nil { - return "", fmt.Errorf("unable to import %s: %w", remotePath, err) + return fmt.Errorf("unable to import %s: %w", filename, err) } - err = client.Write(ctx, relPath, bytes.NewReader(raw), filer.OverwriteIfExists, filer.CreateParentDirectories) - if err != nil { - return "", fmt.Errorf("unable to import %s: %w", remotePath, err) - } - - return remotePath, nil + return nil } func getUploadBasePath(b *bundle.Bundle) (string, error) { diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index 29031e86..7c1c59d4 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -32,17 +32,17 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect") return nil } - cmdio.LogString(ctx, "artifacts.whl.AutoDetect: Detecting Python wheel project...") + cmdio.LogString(ctx, "Detecting Python wheel project...") // checking if there is setup.py in the bundle root setupPy := filepath.Join(b.Config.Path, "setup.py") _, err := os.Stat(setupPy) if err != nil { - cmdio.LogString(ctx, "artifacts.whl.AutoDetect: No Python wheel project found at bundle root folder") + cmdio.LogString(ctx, "No Python wheel project found at bundle root folder") return nil } - cmdio.LogString(ctx, fmt.Sprintf("artifacts.whl.AutoDetect: Found Python wheel project at %s", b.Config.Path)) + cmdio.LogString(ctx, fmt.Sprintf("Found Python wheel project at %s", b.Config.Path)) module := extractModuleName(setupPy) if b.Config.Artifacts == nil { diff --git a/bundle/artifacts/whl/build.go b/bundle/artifacts/whl/build.go index 6ebc925f..c1e7e8fa 100644 --- a/bundle/artifacts/whl/build.go +++ b/bundle/artifacts/whl/build.go @@ -32,7 +32,7 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { return fmt.Errorf("artifact doesn't exist: %s", m.name) } - cmdio.LogString(ctx, fmt.Sprintf("artifacts.whl.Build(%s): Building...", m.name)) + cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name)) dir := artifact.Path @@ -42,13 +42,13 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { out, err := artifact.Build(ctx) if err != nil { - return fmt.Errorf("artifacts.whl.Build(%s): Failed %w, output: %s", m.name, err, out) + return fmt.Errorf("build failed %s, error: %w, output: %s", m.name, err, out) } - cmdio.LogString(ctx, fmt.Sprintf("artifacts.whl.Build(%s): Build succeeded", m.name)) + cmdio.LogString(ctx, "Build succeeded") wheels := python.FindFilesWithSuffixInPath(distPath, ".whl") if len(wheels) == 0 { - return fmt.Errorf("artifacts.whl.Build(%s): cannot find built wheel in %s", m.name, dir) + return fmt.Errorf("cannot find built wheel in %s for package %s", dir, m.name) } for _, wheel := range wheels { artifact.Files = append(artifact.Files, config.ArtifactFile{ diff --git a/bundle/artifacts/whl/infer.go b/bundle/artifacts/whl/infer.go index 1c0e9857..dedecc30 100644 --- a/bundle/artifacts/whl/infer.go +++ b/bundle/artifacts/whl/infer.go @@ -18,6 +18,21 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error { if err != nil { return err } + + // Note: using --build-number (build tag) flag does not help with re-installing + // libraries on all-purpose clusters. The reason is that `pip` ignoring build tag + // when upgrading the library and only look at wheel version. + // Build tag is only used for sorting the versions and the one with higher build tag takes priority when installed. + // It only works if no library is installed + // See https://github.com/pypa/pip/blob/a15dd75d98884c94a77d349b800c7c755d8c34e4/src/pip/_internal/index/package_finder.py#L522-L556 + // https://github.com/pypa/pip/issues/4781 + // + // Thus, the only way to reinstall the library on all-purpose cluster is to increase wheel version manually or + // use automatic version generation, f.e. + // setup( + // version=datetime.datetime.utcnow().strftime("%Y%m%d.%H%M%S"), + // ... + //) artifact.BuildCommand = fmt.Sprintf("%s setup.py bdist_wheel", py) return nil diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 689a4b4b..71f91fde 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -64,6 +64,6 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { err := bundle.Apply(context.Background(), b, artifacts.BasicUpload("test")) require.NoError(t, err) - require.Regexp(t, regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/[a-z0-9]+/test\.whl`)), artifact.Files[0].RemotePath) - require.Regexp(t, regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/[a-z0-9]+/test\.whl`)), artifact.Files[0].Libraries[0].Whl) + require.Regexp(t, regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), artifact.Files[0].RemotePath) + require.Regexp(t, regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), artifact.Files[0].Libraries[0].Whl) } From 833746cbdd7f9adc9c075307504a44003c2a5f61 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 29 Nov 2023 14:20:13 +0100 Subject: [PATCH 257/310] Do not replace pipeline libraries if there are no matches for pattern (#1021) ## Changes If there are no matches when doing Glob call for pipeline library defined, leave the entry as is. The next mutators in the chain will detect that file is missing and the error will be more user friendly. Before the change ``` Starting resource deployment Error: terraform apply: exit status 1 Error: cannot create pipeline: libraries must contain at least one element ``` After ``` Error: notebook ./non-existent not found ``` ## Tests Added regression unit tests --- .../mutator/expand_pipeline_glob_paths.go | 5 +++ .../expand_pipeline_glob_paths_test.go | 8 +++- .../bundle/pipeline_glob_paths/databricks.yml | 12 ++++++ .../dlt/nyc_taxi_loader.py | 3 ++ .../tests/bundle/pipeline_glob_paths_test.go | 40 +++++++++++++++++++ 5 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 bundle/tests/bundle/pipeline_glob_paths/databricks.yml create mode 100644 bundle/tests/bundle/pipeline_glob_paths/dlt/nyc_taxi_loader.py create mode 100644 bundle/tests/bundle/pipeline_glob_paths_test.go diff --git a/bundle/config/mutator/expand_pipeline_glob_paths.go b/bundle/config/mutator/expand_pipeline_glob_paths.go index 5fa203a0..cb147778 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths.go @@ -38,6 +38,11 @@ func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) err return err } + if len(matches) == 0 { + expandedLibraries = append(expandedLibraries, *library) + continue + } + for _, match := range matches { m, err := filepath.Rel(dir, match) if err != nil { diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index 48cd52a0..ad86865a 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -85,6 +85,11 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { Path: "/Repos/somerepo/test.ipynb", }, }, + { + Notebook: &pipelines.NotebookLibrary{ + Path: "./non-existent.ipynb", + }, + }, }, }, }, @@ -98,7 +103,7 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { require.NoError(t, err) libraries := b.Config.Resources.Pipelines["pipeline"].Libraries - require.Len(t, libraries, 10) + require.Len(t, libraries, 11) // Making sure glob patterns are expanded correctly require.True(t, containsNotebook(libraries, filepath.Join("test", "test2.ipynb"))) @@ -117,6 +122,7 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { // Making sure other libraries are not replaced require.True(t, containsJar(libraries, "./*.jar")) require.True(t, containsMaven(libraries, "org.jsoup:jsoup:1.7.2")) + require.True(t, containsNotebook(libraries, "./non-existent.ipynb")) } func containsNotebook(libraries []pipelines.PipelineLibrary, path string) bool { diff --git a/bundle/tests/bundle/pipeline_glob_paths/databricks.yml b/bundle/tests/bundle/pipeline_glob_paths/databricks.yml new file mode 100644 index 00000000..2e69691c --- /dev/null +++ b/bundle/tests/bundle/pipeline_glob_paths/databricks.yml @@ -0,0 +1,12 @@ +bundle: + name: pipeline_glob_paths + +resources: + pipelines: + nyc_taxi_pipeline: + name: "nyc taxi loader" + libraries: + - notebook: + path: ./dlt/* + - notebook: + path: ./non-existent diff --git a/bundle/tests/bundle/pipeline_glob_paths/dlt/nyc_taxi_loader.py b/bundle/tests/bundle/pipeline_glob_paths/dlt/nyc_taxi_loader.py new file mode 100644 index 00000000..83181c70 --- /dev/null +++ b/bundle/tests/bundle/pipeline_glob_paths/dlt/nyc_taxi_loader.py @@ -0,0 +1,3 @@ +# Databricks notebook source + +print("Hello from notebook!") diff --git a/bundle/tests/bundle/pipeline_glob_paths_test.go b/bundle/tests/bundle/pipeline_glob_paths_test.go new file mode 100644 index 00000000..539ffc9d --- /dev/null +++ b/bundle/tests/bundle/pipeline_glob_paths_test.go @@ -0,0 +1,40 @@ +package bundle + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/require" +) + +func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) { + ctx := context.Background() + ctx = root.SetWorkspaceClient(ctx, nil) + + b, err := bundle.Load(ctx, "./pipeline_glob_paths") + require.NoError(t, err) + + err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + require.NoError(t, err) + b.Config.Bundle.Target = "default" + + b.Config.Workspace.CurrentUser = &config.User{User: &iam.User{UserName: "user@domain.com"}} + b.WorkspaceClient() + + m := phases.Initialize() + err = bundle.Apply(ctx, b, m) + require.Error(t, err) + require.ErrorContains(t, err, "notebook ./non-existent not found") + + require.Equal( + t, + b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Libraries[0].Notebook.Path, + "/Users/user@domain.com/.bundle/pipeline_glob_paths/default/files/dlt/nyc_taxi_loader", + ) +} From deb062c48996c90b39bc12fa76824e66a532b69f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 29 Nov 2023 14:29:17 +0100 Subject: [PATCH 258/310] Fix bug where the account or workspace client could be `nil` (#1020) ## Changes We didn't return the error upon creating a workspace or account client. If there is an error, it must always propagate up the stack. The result of this bug was that we were setting a `nil` account or workspace client, which in turn caused SIGSEGVs. Fixes #913. ## Tests Manually confirmed this fixes the linked issue. The CLI now correctly returns an error when the client cannot be constructed. The issue was reproducible using a `.databrickscfg` with a single, incorrectly configured profile. --- cmd/root/auth.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 350cbc65..99e91043 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -65,7 +65,7 @@ func accountClientOrPrompt(ctx context.Context, cfg *config.Config, allowPrompt return nil, err } } - return a, nil + return a, err } func MustAccountClient(cmd *cobra.Command, args []string) error { @@ -133,7 +133,7 @@ func workspaceClientOrPrompt(ctx context.Context, cfg *config.Config, allowPromp return nil, err } } - return w, nil + return w, err } func MustWorkspaceClient(cmd *cobra.Command, args []string) error { From 0cd3bb072dcf76e8a7e14f7377b7928e9e41c208 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 29 Nov 2023 14:29:31 +0100 Subject: [PATCH 259/310] Bump Go SDK to v0.26.0 (#1019) ## Changes Bump Go SDK to v0.26.0. Changelog at https://github.com/databricks/databricks-sdk-go/releases/tag/v0.26.0. ## Tests Integration tests pass. --- .codegen/_openapi_sha | 2 +- cmd/account/groups/groups.go | 4 +- .../network-connectivity.go | 124 ++++++++++++++++++ .../service-principals/service-principals.go | 4 +- cmd/account/users/users.go | 4 +- cmd/workspace/catalogs/catalogs.go | 1 + .../cluster-policies/cluster-policies.go | 8 +- cmd/workspace/groups/groups.go | 4 +- cmd/workspace/metastores/metastores.go | 87 ------------ cmd/workspace/schemas/schemas.go | 1 + .../service-principals/service-principals.go | 4 +- cmd/workspace/settings/settings.go | 50 ++++--- cmd/workspace/users/users.go | 4 +- go.mod | 20 +-- go.sum | 40 +++--- 15 files changed, 205 insertions(+), 152 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 7c42f6dc..4343d612 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -e7b127cb07af8dd4d8c61c7cc045c8910cdbb02a \ No newline at end of file +22f09783eb8a84d52026f856be3b2068f9498db3 \ No newline at end of file diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index 53bafc41..826d7700 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -283,12 +283,12 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) - cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) + cmd.Flags().Int64Var(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) - cmd.Flags().IntVar(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) + cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" cmd.Short = `List group details.` diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index 3bc1e74e..e603dd89 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -480,4 +480,128 @@ func init() { }) } +// start list-network-connectivity-configurations command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listNetworkConnectivityConfigurationsOverrides []func( + *cobra.Command, + *settings.ListNetworkConnectivityConfigurationsRequest, +) + +func newListNetworkConnectivityConfigurations() *cobra.Command { + cmd := &cobra.Command{} + + var listNetworkConnectivityConfigurationsReq settings.ListNetworkConnectivityConfigurationsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listNetworkConnectivityConfigurationsReq.PageToken, "page-token", listNetworkConnectivityConfigurationsReq.PageToken, `Pagination token to go to next page based on previous query.`) + + cmd.Use = "list-network-connectivity-configurations" + cmd.Short = `List network connectivity configurations.` + cmd.Long = `List network connectivity configurations. + + Gets an array of network connectivity configurations.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.NetworkConnectivity.ListNetworkConnectivityConfigurationsAll(ctx, listNetworkConnectivityConfigurationsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listNetworkConnectivityConfigurationsOverrides { + fn(cmd, &listNetworkConnectivityConfigurationsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newListNetworkConnectivityConfigurations()) + }) +} + +// start list-private-endpoint-rules command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listPrivateEndpointRulesOverrides []func( + *cobra.Command, + *settings.ListPrivateEndpointRulesRequest, +) + +func newListPrivateEndpointRules() *cobra.Command { + cmd := &cobra.Command{} + + var listPrivateEndpointRulesReq settings.ListPrivateEndpointRulesRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listPrivateEndpointRulesReq.PageToken, "page-token", listPrivateEndpointRulesReq.PageToken, `Pagination token to go to next page based on previous query.`) + + cmd.Use = "list-private-endpoint-rules NETWORK_CONNECTIVITY_CONFIG_ID" + cmd.Short = `List private endpoint rules.` + cmd.Long = `List private endpoint rules. + + Gets an array of private endpoint rules.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + listPrivateEndpointRulesReq.NetworkConnectivityConfigId = args[0] + + response, err := a.NetworkConnectivity.ListPrivateEndpointRulesAll(ctx, listPrivateEndpointRulesReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listPrivateEndpointRulesOverrides { + fn(cmd, &listPrivateEndpointRulesReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newListPrivateEndpointRules()) + }) +} + // end service NetworkConnectivity diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index 4ad57d4e..864cd287 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -282,12 +282,12 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) - cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) + cmd.Flags().Int64Var(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) - cmd.Flags().IntVar(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) + cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" cmd.Short = `List service principals.` diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 730f3fc1..05b27d8b 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -298,12 +298,12 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) - cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) + cmd.Flags().Int64Var(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) - cmd.Flags().IntVar(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) + cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" cmd.Short = `List users.` diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 7846c0e0..e9e48fde 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -327,6 +327,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) + cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it.`) cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of catalog.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of catalog.`) diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 80608718..59939a49 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -74,11 +74,11 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.Definition, "definition", createReq.Definition, `Policy definition document expressed in Databricks Cluster Policy Definition Language.`) + cmd.Flags().StringVar(&createReq.Definition, "definition", createReq.Definition, `Policy definition document expressed in [Databricks Cluster Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html).`) cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `Additional human-readable description of the cluster policy.`) // TODO: array: libraries cmd.Flags().Int64Var(&createReq.MaxClustersPerUser, "max-clusters-per-user", createReq.MaxClustersPerUser, `Max number of clusters per user that can be active using this policy.`) - cmd.Flags().StringVar(&createReq.PolicyFamilyDefinitionOverrides, "policy-family-definition-overrides", createReq.PolicyFamilyDefinitionOverrides, `Policy definition JSON document expressed in Databricks Policy Definition Language.`) + cmd.Flags().StringVar(&createReq.PolicyFamilyDefinitionOverrides, "policy-family-definition-overrides", createReq.PolicyFamilyDefinitionOverrides, `Policy definition JSON document expressed in [Databricks Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html).`) cmd.Flags().StringVar(&createReq.PolicyFamilyId, "policy-family-id", createReq.PolicyFamilyId, `ID of the policy family.`) cmd.Use = "create NAME" @@ -242,11 +242,11 @@ func newEdit() *cobra.Command { // TODO: short flags cmd.Flags().Var(&editJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&editReq.Definition, "definition", editReq.Definition, `Policy definition document expressed in Databricks Cluster Policy Definition Language.`) + cmd.Flags().StringVar(&editReq.Definition, "definition", editReq.Definition, `Policy definition document expressed in [Databricks Cluster Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html).`) cmd.Flags().StringVar(&editReq.Description, "description", editReq.Description, `Additional human-readable description of the cluster policy.`) // TODO: array: libraries cmd.Flags().Int64Var(&editReq.MaxClustersPerUser, "max-clusters-per-user", editReq.MaxClustersPerUser, `Max number of clusters per user that can be active using this policy.`) - cmd.Flags().StringVar(&editReq.PolicyFamilyDefinitionOverrides, "policy-family-definition-overrides", editReq.PolicyFamilyDefinitionOverrides, `Policy definition JSON document expressed in Databricks Policy Definition Language.`) + cmd.Flags().StringVar(&editReq.PolicyFamilyDefinitionOverrides, "policy-family-definition-overrides", editReq.PolicyFamilyDefinitionOverrides, `Policy definition JSON document expressed in [Databricks Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html).`) cmd.Flags().StringVar(&editReq.PolicyFamilyId, "policy-family-id", editReq.PolicyFamilyId, `ID of the policy family.`) cmd.Use = "edit POLICY_ID NAME" diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index d4765235..ac7f7ba4 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -283,12 +283,12 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) - cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) + cmd.Flags().Int64Var(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) - cmd.Flags().IntVar(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) + cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" cmd.Short = `List group details.` diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index ef473df2..85b1b286 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -342,93 +342,6 @@ func init() { }) } -// start enable-optimization command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var enableOptimizationOverrides []func( - *cobra.Command, - *catalog.UpdatePredictiveOptimization, -) - -func newEnableOptimization() *cobra.Command { - cmd := &cobra.Command{} - - var enableOptimizationReq catalog.UpdatePredictiveOptimization - var enableOptimizationJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&enableOptimizationJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Use = "enable-optimization METASTORE_ID ENABLE" - cmd.Short = `Toggle predictive optimization on the metastore.` - cmd.Long = `Toggle predictive optimization on the metastore. - - Enables or disables predictive optimization on the metastore.` - - // This command is being previewed; hide from help output. - cmd.Hidden = true - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'metastore_id', 'enable' in your JSON input") - } - return nil - } - check := cobra.ExactArgs(2) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - err = enableOptimizationJson.Unmarshal(&enableOptimizationReq) - if err != nil { - return err - } - } - if !cmd.Flags().Changed("json") { - enableOptimizationReq.MetastoreId = args[0] - } - if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &enableOptimizationReq.Enable) - if err != nil { - return fmt.Errorf("invalid ENABLE: %s", args[1]) - } - } - - response, err := w.Metastores.EnableOptimization(ctx, enableOptimizationReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range enableOptimizationOverrides { - fn(cmd, &enableOptimizationReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEnableOptimization()) - }) -} - // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index 8b42281a..3313bfdb 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -359,6 +359,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) + cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of schema, relative to parent catalog.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of schema.`) // TODO: map via StringToStringVar: properties diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index 134e8c1f..4068698b 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -282,12 +282,12 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) - cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) + cmd.Flags().Int64Var(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) - cmd.Flags().IntVar(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) + cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" cmd.Short = `List service principals.` diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 3ef9a7e0..c1d669de 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -16,16 +16,24 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "settings", - Short: `// TODO(yuyuan.tang) to add the description for the setting.`, - Long: `// TODO(yuyuan.tang) to add the description for the setting`, + Use: "settings", + Short: `The default namespace setting API allows users to configure the default namespace for a Databricks workspace.`, + Long: `The default namespace setting API allows users to configure the default + namespace for a Databricks workspace. + + Through this API, users can retrieve, set, or modify the default namespace + used when queries do not reference a fully qualified three-level name. For + example, if you use the API to set 'retail_prod' as the default catalog, then + a query 'SELECT * FROM myTable' would reference the object + 'retail_prod.default.myTable' (the schema 'default' is always assumed). + + This setting requires a restart of clusters and SQL warehouses to take effect. + Additionally, the default namespace only applies when using Unity + Catalog-enabled compute.`, GroupID: "settings", Annotations: map[string]string{ "package": "settings", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Apply optional overrides to this command. @@ -53,10 +61,14 @@ func newDeleteDefaultWorkspaceNamespace() *cobra.Command { // TODO: short flags cmd.Use = "delete-default-workspace-namespace ETAG" - cmd.Short = `Delete the default namespace.` - cmd.Long = `Delete the default namespace. + cmd.Short = `Delete the default namespace setting.` + cmd.Long = `Delete the default namespace setting. - Deletes the default namespace.` + Deletes the default namespace setting for the workspace. A fresh etag needs to + be provided in DELETE requests (as a query parameter). The etag can be + retrieved by making a GET request before the DELETE request. If the setting is + updated/deleted concurrently, DELETE will fail with 409 and the request will + need to be retried by using the fresh etag in the 409 response.` cmd.Annotations = make(map[string]string) @@ -114,10 +126,10 @@ func newReadDefaultWorkspaceNamespace() *cobra.Command { // TODO: short flags cmd.Use = "read-default-workspace-namespace ETAG" - cmd.Short = `Get the default namespace.` - cmd.Long = `Get the default namespace. + cmd.Short = `Get the default namespace setting.` + cmd.Long = `Get the default namespace setting. - Gets the default namespace.` + Gets the default namespace setting.` cmd.Annotations = make(map[string]string) @@ -176,19 +188,21 @@ func newUpdateDefaultWorkspaceNamespace() *cobra.Command { // TODO: short flags cmd.Flags().Var(&updateDefaultWorkspaceNamespaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().BoolVar(&updateDefaultWorkspaceNamespaceReq.AllowMissing, "allow-missing", updateDefaultWorkspaceNamespaceReq.AllowMissing, `This should always be set to true for Settings RPCs.`) - cmd.Flags().StringVar(&updateDefaultWorkspaceNamespaceReq.FieldMask, "field-mask", updateDefaultWorkspaceNamespaceReq.FieldMask, `Field mask required to be passed into the PATCH request.`) + cmd.Flags().BoolVar(&updateDefaultWorkspaceNamespaceReq.AllowMissing, "allow-missing", updateDefaultWorkspaceNamespaceReq.AllowMissing, `This should always be set to true for Settings API.`) + cmd.Flags().StringVar(&updateDefaultWorkspaceNamespaceReq.FieldMask, "field-mask", updateDefaultWorkspaceNamespaceReq.FieldMask, `Field mask is required to be passed into the PATCH request.`) // TODO: complex arg: setting cmd.Use = "update-default-workspace-namespace" - cmd.Short = `Updates the default namespace setting.` - cmd.Long = `Updates the default namespace setting. + cmd.Short = `Update the default namespace setting.` + cmd.Long = `Update the default namespace setting. Updates the default namespace setting for the workspace. A fresh etag needs to - be provided in PATCH requests (as part the setting field). The etag can be + be provided in PATCH requests (as part of the setting field). The etag can be retrieved by making a GET request before the PATCH request. Note that if the setting does not exist, GET will return a NOT_FOUND error and the etag will be - present in the error response, which should be set in the PATCH request.` + present in the error response, which should be set in the PATCH request. If + the setting is updated concurrently, PATCH will fail with 409 and the request + will need to be retried by using the fresh etag in the 409 response.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index daa95df3..b44237cf 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -395,12 +395,12 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.Attributes, "attributes", listReq.Attributes, `Comma-separated list of attributes to return in response.`) - cmd.Flags().IntVar(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) + cmd.Flags().Int64Var(&listReq.Count, "count", listReq.Count, `Desired number of results per page.`) cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) - cmd.Flags().IntVar(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) + cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" cmd.Short = `List users.` diff --git a/go.mod b/go.mod index b70341cd..8d09c3df 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.25.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.26.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.4.0 // BSD-3-Clause @@ -23,9 +23,9 @@ require ( github.com/stretchr/testify v1.8.4 // MIT golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/mod v0.14.0 - golang.org/x/oauth2 v0.14.0 + golang.org/x/oauth2 v0.15.0 golang.org/x/sync v0.5.0 - golang.org/x/term v0.14.0 + golang.org/x/term v0.15.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) @@ -36,7 +36,7 @@ require ( ) require ( - cloud.google.com/go/compute v1.23.1 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -54,13 +54,13 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.15.0 // indirect - golang.org/x/net v0.18.0 // indirect - golang.org/x/sys v0.14.0 // indirect - golang.org/x/time v0.4.0 // indirect - google.golang.org/api v0.150.0 // indirect + golang.org/x/crypto v0.16.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/api v0.152.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 8f0f4157..4b4ade21 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0= -cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= @@ -31,8 +31,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.25.0 h1:qEpYHQ18HHqLIsIXXHhixakTtt6Q0tT3m34xws6BuZ8= -github.com/databricks/databricks-sdk-go v0.25.0/go.mod h1:s3/f2T8UGyKkcMywIyporj/Kb/lsiWkiksT/C84Swrs= +github.com/databricks/databricks-sdk-go v0.26.0 h1:RItNgdWm+5kWYSzgtflWFp5T+OvIEVNxPnPbPYsXaaY= +github.com/databricks/databricks-sdk-go v0.26.0/go.mod h1:cyFYsqaDiIdaKPdNAuh+YsMUL1k9Lt02JB/72+zgCxg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -163,8 +163,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= @@ -187,11 +187,11 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= -golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -216,15 +216,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= -golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -234,8 +234,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY= -golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -248,8 +248,8 @@ golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.150.0 h1:Z9k22qD289SZ8gCJrk4DrWXkNjtfvKAUo/l1ma8eBYE= -google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= +google.golang.org/api v0.152.0 h1:t0r1vPnfMc260S2Ci+en7kfCZaLOPs5KI0sVV/6jZrY= +google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -257,8 +257,8 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From 94a9fe4385d7d72412bc52e42b98e9d6100af96a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 29 Nov 2023 14:50:17 +0100 Subject: [PATCH 260/310] No need to fetch repository history when running tests (#1022) Test runs don't need access to the repository history and only need the commit being tested. --- .github/workflows/push.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 617238c2..24856381 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -30,9 +30,6 @@ jobs: - name: Checkout repository and submodules uses: actions/checkout@v4 - - name: Unshallow - run: git fetch --prune --unshallow - - name: Setup Go uses: actions/setup-go@v4 with: From 3338cfc4550a0602626d7af7d51e05d6caccdfb2 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 29 Nov 2023 15:06:51 +0100 Subject: [PATCH 261/310] Discontinue 32-bit Windows build (#1024) ## Changes Build failure for 32-bit Windows binary due to integer overflow in the SDK. We don't test 32-bit anywhere. I propose we stop publishing these builds until we receive evidence they are still useful. ## Tests n/a --- .goreleaser.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index ef5846d2..0cf87a9c 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -35,12 +35,6 @@ builds: goarch: - amd64 - arm64 - - '386' - ignore: - - goos: darwin - goarch: '386' - - goos: linux - goarch: '386' binary: databricks archives: - format: zip From 09991da5349a89a3cfaca54ce9350154823a5016 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 29 Nov 2023 15:19:20 +0100 Subject: [PATCH 262/310] Release v0.210.0 (#1023) This release includes the new `databricks labs` command to install, manage, and run Databricks Labs projects. CLI: * Add `--debug` as shortcut for `--log-level debug` ([#964](https://github.com/databricks/cli/pull/964)). * Improved usability of `databricks auth login ... --configure-cluster` ([#956](https://github.com/databricks/cli/pull/956)). * Make `databricks configure` save only explicit fields ([#973](https://github.com/databricks/cli/pull/973)). * Add `databricks labs` command group ([#914](https://github.com/databricks/cli/pull/914)). * Tolerate missing .databrickscfg file during `databricks auth login` ([#1003](https://github.com/databricks/cli/pull/1003)). * Add `--configure-cluster` flag to configure command ([#1005](https://github.com/databricks/cli/pull/1005)). * Fix bug where the account or workspace client could be `nil` ([#1020](https://github.com/databricks/cli/pull/1020)). Bundles: * Do not allow empty descriptions for bundle template inputs ([#967](https://github.com/databricks/cli/pull/967)). * Added support for top-level permissions ([#928](https://github.com/databricks/cli/pull/928)). * Allow jobs to be manually unpaused in development mode ([#885](https://github.com/databricks/cli/pull/885)). * Fix template initialization from current working directory ([#976](https://github.com/databricks/cli/pull/976)). * Add `--tag` and `--branch` options to bundle init command ([#975](https://github.com/databricks/cli/pull/975)). * Work around DLT issue with `$PYTHONPATH` not being set correctly ([#999](https://github.com/databricks/cli/pull/999)). * Enable `spark_jar_task` with local JAR libraries ([#993](https://github.com/databricks/cli/pull/993)). * Pass `USERPROFILE` environment variable to Terraform ([#1001](https://github.com/databricks/cli/pull/1001)). * Improve error message when path is not a bundle template ([#985](https://github.com/databricks/cli/pull/985)). * Correctly overwrite local state if remote state is newer ([#1008](https://github.com/databricks/cli/pull/1008)). * Add mlops-stacks to the default `databricks bundle init` prompt ([#988](https://github.com/databricks/cli/pull/988)). * Do not add wheel content hash in uploaded Python wheel path ([#1015](https://github.com/databricks/cli/pull/1015)). * Do not replace pipeline libraries if there are no matches for pattern ([#1021](https://github.com/databricks/cli/pull/1021)). Internal: * Update CLI version in the VS Code extension during release ([#1014](https://github.com/databricks/cli/pull/1014)). API Changes: * Changed `databricks functions create` command. * Changed `databricks metastores create` command with new required argument order. * Removed `databricks metastores enable-optimization` command. * Removed `databricks account o-auth-enrollment` command group. * Removed `databricks apps delete` command. * Removed `databricks apps get` command. * Added `databricks apps delete-app` command. * Added `databricks apps get-app` command. * Added `databricks apps get-app-deployment-status` command. * Added `databricks apps get-apps` command. * Added `databricks apps get-events` command. * Added `databricks account network-connectivity` command group. OpenAPI commit 22f09783eb8a84d52026f856be3b2068f9498db3 (2023-11-23) Dependency updates: * Bump golang.org/x/term from 0.13.0 to 0.14.0 ([#981](https://github.com/databricks/cli/pull/981)). * Bump github.com/hashicorp/terraform-json from 0.17.1 to 0.18.0 ([#979](https://github.com/databricks/cli/pull/979)). * Bump golang.org/x/oauth2 from 0.13.0 to 0.14.0 ([#982](https://github.com/databricks/cli/pull/982)). * Bump github.com/databricks/databricks-sdk-go from 0.24.0 to 0.25.0 ([#980](https://github.com/databricks/cli/pull/980)). * Bump github.com/databricks/databricks-sdk-go from 0.25.0 to 0.26.0 ([#1019](https://github.com/databricks/cli/pull/1019)). --- CHANGELOG.md | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fba60a06..7a40523c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,59 @@ # Version changelog +## 0.210.0 + +This release includes the new `databricks labs` command to install, manage, and run Databricks Labs projects. + +CLI: + * Add `--debug` as shortcut for `--log-level debug` ([#964](https://github.com/databricks/cli/pull/964)). + * Improved usability of `databricks auth login ... --configure-cluster` ([#956](https://github.com/databricks/cli/pull/956)). + * Make `databricks configure` save only explicit fields ([#973](https://github.com/databricks/cli/pull/973)). + * Add `databricks labs` command group ([#914](https://github.com/databricks/cli/pull/914)). + * Tolerate missing .databrickscfg file during `databricks auth login` ([#1003](https://github.com/databricks/cli/pull/1003)). + * Add `--configure-cluster` flag to configure command ([#1005](https://github.com/databricks/cli/pull/1005)). + * Fix bug where the account or workspace client could be `nil` ([#1020](https://github.com/databricks/cli/pull/1020)). + +Bundles: + * Do not allow empty descriptions for bundle template inputs ([#967](https://github.com/databricks/cli/pull/967)). + * Added support for top-level permissions ([#928](https://github.com/databricks/cli/pull/928)). + * Allow jobs to be manually unpaused in development mode ([#885](https://github.com/databricks/cli/pull/885)). + * Fix template initialization from current working directory ([#976](https://github.com/databricks/cli/pull/976)). + * Add `--tag` and `--branch` options to bundle init command ([#975](https://github.com/databricks/cli/pull/975)). + * Work around DLT issue with `` not being set correctly ([#999](https://github.com/databricks/cli/pull/999)). + * Enable `spark_jar_task` with local JAR libraries ([#993](https://github.com/databricks/cli/pull/993)). + * Pass `USERPROFILE` environment variable to Terraform ([#1001](https://github.com/databricks/cli/pull/1001)). + * Improve error message when path is not a bundle template ([#985](https://github.com/databricks/cli/pull/985)). + * Correctly overwrite local state if remote state is newer ([#1008](https://github.com/databricks/cli/pull/1008)). + * Add mlops-stacks to the default `databricks bundle init` prompt ([#988](https://github.com/databricks/cli/pull/988)). + * Do not add wheel content hash in uploaded Python wheel path ([#1015](https://github.com/databricks/cli/pull/1015)). + * Do not replace pipeline libraries if there are no matches for pattern ([#1021](https://github.com/databricks/cli/pull/1021)). + +Internal: + * Update CLI version in the VS Code extension during release ([#1014](https://github.com/databricks/cli/pull/1014)). + +API Changes: + * Changed `databricks functions create` command. + * Changed `databricks metastores create` command with new required argument order. + * Removed `databricks metastores enable-optimization` command. + * Removed `databricks account o-auth-enrollment` command group. + * Removed `databricks apps delete` command. + * Removed `databricks apps get` command. + * Added `databricks apps delete-app` command. + * Added `databricks apps get-app` command. + * Added `databricks apps get-app-deployment-status` command. + * Added `databricks apps get-apps` command. + * Added `databricks apps get-events` command. + * Added `databricks account network-connectivity` command group. + +OpenAPI commit 22f09783eb8a84d52026f856be3b2068f9498db3 (2023-11-23) + +Dependency updates: + * Bump golang.org/x/term from 0.13.0 to 0.14.0 ([#981](https://github.com/databricks/cli/pull/981)). + * Bump github.com/hashicorp/terraform-json from 0.17.1 to 0.18.0 ([#979](https://github.com/databricks/cli/pull/979)). + * Bump golang.org/x/oauth2 from 0.13.0 to 0.14.0 ([#982](https://github.com/databricks/cli/pull/982)). + * Bump github.com/databricks/databricks-sdk-go from 0.24.0 to 0.25.0 ([#980](https://github.com/databricks/cli/pull/980)). + * Bump github.com/databricks/databricks-sdk-go from 0.25.0 to 0.26.0 ([#1019](https://github.com/databricks/cli/pull/1019)). + ## 0.209.1 CLI: From f2969e91bda4ca12bf7d44fbf06031543abe9b0e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 29 Nov 2023 16:24:01 +0100 Subject: [PATCH 263/310] Use `fetch-tags` option in release workflows (#1025) ## Changes The manual unshallow step is superfluous and can be done as part of the `actions/checkout` step. Companion to #1022. ## Tests Manual trigger of the snapshot build workflow. --- .github/workflows/release-snapshot.yml | 10 +++++----- .github/workflows/release.yml | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index c3398a2b..51d3ad16 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -14,9 +14,9 @@ jobs: steps: - name: Checkout repository and submodules uses: actions/checkout@v4 - - - name: Unshallow - run: git fetch --prune --unshallow + with: + fetch-depth: 0 + fetch-tags: true - name: Setup Go uses: actions/setup-go@v4 @@ -56,7 +56,7 @@ jobs: - name: Update snapshot tag # Snapshot release may only be updated for commits to the main branch. - # if: github.ref == 'refs/heads/main' + if: github.ref == 'refs/heads/main' run: | git tag snapshot @@ -65,7 +65,7 @@ jobs: - name: Update snapshot release # Snapshot release may only be updated for commits to the main branch. - # if: github.ref == 'refs/heads/main' + if: github.ref == 'refs/heads/main' uses: softprops/action-gh-release@v1 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ea9e4690..519dcf43 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,9 +15,9 @@ jobs: steps: - name: Checkout repository and submodules uses: actions/checkout@v4 - - - name: Unshallow - run: git fetch --prune --unshallow + with: + fetch-depth: 0 + fetch-tags: true - name: Setup Go uses: actions/setup-go@v4 From 4d8d8257462da5f7bb648234b4169210a7407089 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 29 Nov 2023 17:32:42 +0100 Subject: [PATCH 264/310] Fixed panic when job has trigger and in development mode (#1026) ## Changes Fixed panic when job has trigger and in development mode --- bundle/config/mutator/process_target_mode.go | 4 ++-- .../mutator/process_target_mode_test.go | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index f9d2795a..9fdb82a1 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -54,10 +54,10 @@ func transformDevelopmentMode(b *bundle.Bundle) error { if r.Jobs[i].Schedule != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused { r.Jobs[i].Schedule.PauseStatus = jobs.PauseStatusPaused } - if r.Jobs[i].Continuous != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused { + if r.Jobs[i].Continuous != nil && r.Jobs[i].Continuous.PauseStatus != jobs.PauseStatusUnpaused { r.Jobs[i].Continuous.PauseStatus = jobs.PauseStatusPaused } - if r.Jobs[i].Trigger != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused { + if r.Jobs[i].Trigger != nil && r.Jobs[i].Trigger.PauseStatus != jobs.PauseStatusUnpaused { r.Jobs[i].Trigger.PauseStatus = jobs.PauseStatusPaused } } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 0e360263..8feab191 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -62,6 +62,24 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, }, }, + "job3": { + JobSettings: &jobs.JobSettings{ + Name: "job3", + Trigger: &jobs.TriggerSettings{ + FileArrival: &jobs.FileArrivalTriggerConfiguration{ + Url: "test.com", + }, + }, + }, + }, + "job4": { + JobSettings: &jobs.JobSettings{ + Name: "job4", + Continuous: &jobs.Continuous{ + PauseStatus: jobs.PauseStatusPaused, + }, + }, + }, }, Pipelines: map[string]*resources.Pipeline{ "pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}}, @@ -205,6 +223,8 @@ func TestProcessTargetModeProduction(t *testing.T) { b.Config.Resources.Jobs["job1"].Permissions = permissions b.Config.Resources.Jobs["job1"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} b.Config.Resources.Jobs["job2"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} + b.Config.Resources.Jobs["job3"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} + b.Config.Resources.Jobs["job4"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"} b.Config.Resources.Pipelines["pipeline1"].Permissions = permissions b.Config.Resources.Experiments["experiment1"].Permissions = permissions b.Config.Resources.Experiments["experiment2"].Permissions = permissions From 65458cbde6956c0b095796037697aa7f88bf878d Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Wed, 29 Nov 2023 20:08:27 +0100 Subject: [PATCH 265/310] Fix `panic: $HOME is not set` (#1027) This PR adds error to `env.UserHomeDir(ctx)` Fixes https://github.com/databricks/setup-cli/issues/73 --------- Co-authored-by: Pieter Noordhuis --- cmd/labs/clear_cache.go | 10 ++-- cmd/labs/labs.go | 4 +- cmd/labs/list.go | 5 +- cmd/labs/project/entrypoint.go | 54 +++++++++++++++------- cmd/labs/project/fetcher.go | 17 +++++-- cmd/labs/project/helpers.go | 11 +++-- cmd/labs/project/installed.go | 11 ++++- cmd/labs/project/installer.go | 29 ++++++++---- cmd/labs/project/installer_test.go | 8 ++-- cmd/labs/project/project.go | 74 +++++++++++++++--------------- cmd/labs/project/proxy.go | 6 +-- cmd/labs/show.go | 10 ++-- libs/databrickscfg/profiles.go | 11 ++++- libs/env/context.go | 11 +++-- libs/env/context_test.go | 3 +- 15 files changed, 168 insertions(+), 96 deletions(-) diff --git a/cmd/labs/clear_cache.go b/cmd/labs/clear_cache.go index e2f531cf..e136c13c 100644 --- a/cmd/labs/clear_cache.go +++ b/cmd/labs/clear_cache.go @@ -19,13 +19,17 @@ func newClearCacheCommand() *cobra.Command { if err != nil { return err } - _ = os.Remove(project.PathInLabs(ctx, "databrickslabs-repositories.json")) + cache, err := project.PathInLabs(ctx, "databrickslabs-repositories.json") + if err != nil { + return err + } + _ = os.Remove(cache) logger := log.GetLogger(ctx) for _, prj := range projects { logger.Info("clearing labs project cache", slog.String("name", prj.Name)) - _ = os.RemoveAll(prj.CacheDir(ctx)) + _ = os.RemoveAll(prj.CacheDir()) // recreating empty cache folder for downstream apps to work normally - _ = prj.EnsureFoldersExist(ctx) + _ = prj.EnsureFoldersExist() } return nil }, diff --git a/cmd/labs/labs.go b/cmd/labs/labs.go index cccf8ac4..c8c8546a 100644 --- a/cmd/labs/labs.go +++ b/cmd/labs/labs.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/libs/log" "github.com/spf13/cobra" ) @@ -30,7 +31,8 @@ func New(ctx context.Context) *cobra.Command { ) all, err := project.Installed(ctx) if err != nil { - panic(err) + log.Errorf(ctx, "Cannot retrieve installed labs: %s", err) + return cmd } for _, v := range all { v.Register(cmd) diff --git a/cmd/labs/list.go b/cmd/labs/list.go index 07cc180c..b9624cbb 100644 --- a/cmd/labs/list.go +++ b/cmd/labs/list.go @@ -16,7 +16,10 @@ type labsMeta struct { } func allRepos(ctx context.Context) (github.Repositories, error) { - cacheDir := project.PathInLabs(ctx) + cacheDir, err := project.PathInLabs(ctx) + if err != nil { + return nil, err + } cache := github.NewRepositoryCache("databrickslabs", cacheDir) return cache.Load(ctx) } diff --git a/cmd/labs/project/entrypoint.go b/cmd/labs/project/entrypoint.go index fedd70a4..113bf321 100644 --- a/cmd/labs/project/entrypoint.go +++ b/cmd/labs/project/entrypoint.go @@ -54,15 +54,15 @@ func (e *Entrypoint) NeedsWarehouse() bool { func (e *Entrypoint) Prepare(cmd *cobra.Command) (map[string]string, error) { ctx := cmd.Context() - libDir := e.EffectiveLibDir(ctx) + libDir := e.EffectiveLibDir() environment := map[string]string{ "DATABRICKS_CLI_VERSION": build.GetInfo().Version, - "DATABRICKS_LABS_CACHE_DIR": e.CacheDir(ctx), - "DATABRICKS_LABS_CONFIG_DIR": e.ConfigDir(ctx), - "DATABRICKS_LABS_STATE_DIR": e.StateDir(ctx), + "DATABRICKS_LABS_CACHE_DIR": e.CacheDir(), + "DATABRICKS_LABS_CONFIG_DIR": e.ConfigDir(), + "DATABRICKS_LABS_STATE_DIR": e.StateDir(), "DATABRICKS_LABS_LIB_DIR": libDir, } - if e.IsPythonProject(ctx) { + if e.IsPythonProject() { e.preparePython(ctx, environment) } cfg, err := e.validLogin(cmd) @@ -112,7 +112,7 @@ func (e *Entrypoint) preparePython(ctx context.Context, environment map[string]s // Here we are also supporting the "src" layout for python projects. // // See https://docs.python.org/3/using/cmdline.html#envvar-PYTHONPATH - libDir := e.EffectiveLibDir(ctx) + libDir := e.EffectiveLibDir() // The intention for every install is to be sandboxed - not dependent on anything else than Python binary. // Having ability to override PYTHONPATH in the mix will break this assumption. Need strong evidence that // this is really needed. @@ -139,21 +139,28 @@ func (e *Entrypoint) joinPaths(paths ...string) string { return strings.Join(paths, string(os.PathListSeparator)) } -func (e *Entrypoint) envAwareConfig(ctx context.Context) *config.Config { +func (e *Entrypoint) envAwareConfig(ctx context.Context) (*config.Config, error) { + home, err := env.UserHomeDir(ctx) + if err != nil { + return nil, err + } return &config.Config{ - ConfigFile: filepath.Join(env.UserHomeDir(ctx), ".databrickscfg"), + ConfigFile: filepath.Join(home, ".databrickscfg"), Loaders: []config.Loader{ env.NewConfigLoader(ctx), config.ConfigAttributes, config.ConfigFile, }, - } + }, nil } -func (e *Entrypoint) envAwareConfigWithProfile(ctx context.Context, profile string) *config.Config { - cfg := e.envAwareConfig(ctx) +func (e *Entrypoint) envAwareConfigWithProfile(ctx context.Context, profile string) (*config.Config, error) { + cfg, err := e.envAwareConfig(ctx) + if err != nil { + return nil, err + } cfg.Profile = profile - return cfg + return cfg, nil } func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.Config, error) { @@ -164,11 +171,18 @@ func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.C profileOverride := e.profileOverride(cmd) if profileOverride != "" { log.Infof(ctx, "Overriding login profile: %s", profileOverride) - return &loginConfig{}, e.envAwareConfigWithProfile(ctx, profileOverride), nil + cfg, err := e.envAwareConfigWithProfile(ctx, profileOverride) + if err != nil { + return nil, nil, err + } + return &loginConfig{}, cfg, nil } lc, err := e.loadLoginConfig(ctx) isNoLoginConfig := errors.Is(err, fs.ErrNotExist) - defaultConfig := e.envAwareConfig(ctx) + defaultConfig, err := e.envAwareConfig(ctx) + if err != nil { + return nil, nil, err + } if isNoLoginConfig && !e.IsBundleAware && e.isAuthConfigured(defaultConfig) { log.Debugf(ctx, "Login is configured via environment variables") return &loginConfig{}, defaultConfig, nil @@ -181,7 +195,11 @@ func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.C } if e.IsAccountLevel { log.Debugf(ctx, "Using account-level login profile: %s", lc.AccountProfile) - return lc, e.envAwareConfigWithProfile(ctx, lc.AccountProfile), nil + cfg, err := e.envAwareConfigWithProfile(ctx, lc.AccountProfile) + if err != nil { + return nil, nil, err + } + return lc, cfg, nil } if e.IsBundleAware { err = root.TryConfigureBundle(cmd, []string{}) @@ -194,7 +212,11 @@ func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.C } } log.Debugf(ctx, "Using workspace-level login profile: %s", lc.WorkspaceProfile) - return lc, e.envAwareConfigWithProfile(ctx, lc.WorkspaceProfile), nil + cfg, err := e.envAwareConfigWithProfile(ctx, lc.WorkspaceProfile) + if err != nil { + return nil, nil, err + } + return lc, cfg, nil } func (e *Entrypoint) validLogin(cmd *cobra.Command) (*config.Config, error) { diff --git a/cmd/labs/project/fetcher.go b/cmd/labs/project/fetcher.go index b677bcd9..8f4fafde 100644 --- a/cmd/labs/project/fetcher.go +++ b/cmd/labs/project/fetcher.go @@ -29,7 +29,10 @@ func (d *devInstallation) Install(ctx context.Context) error { } _, err := d.Installer.validLogin(d.Command) if errors.Is(err, ErrNoLoginConfig) { - cfg := d.Installer.envAwareConfig(ctx) + cfg, err := d.Installer.envAwareConfig(ctx) + if err != nil { + return err + } lc := &loginConfig{Entrypoint: d.Installer.Entrypoint} _, err = lc.askWorkspace(ctx, cfg) if err != nil { @@ -39,7 +42,7 @@ func (d *devInstallation) Install(ctx context.Context) error { if err != nil { return fmt.Errorf("ask for account: %w", err) } - err = lc.EnsureFoldersExist(ctx) + err = lc.EnsureFoldersExist() if err != nil { return fmt.Errorf("folders: %w", err) } @@ -97,7 +100,10 @@ func NewUpgrader(cmd *cobra.Command, name string) (*installer, error) { if err != nil { return nil, fmt.Errorf("remote: %w", err) } - prj.folder = PathInLabs(cmd.Context(), name) + prj.folder, err = PathInLabs(cmd.Context(), name) + if err != nil { + return nil, err + } return &installer{ Project: prj, version: version, @@ -111,7 +117,10 @@ type fetcher struct { func (f *fetcher) checkReleasedVersions(cmd *cobra.Command, version string) (string, error) { ctx := cmd.Context() - cacheDir := PathInLabs(ctx, f.name, "cache") + cacheDir, err := PathInLabs(ctx, f.name, "cache") + if err != nil { + return "", err + } // `databricks labs isntall X` doesn't know which exact version to fetch, so first // we fetch all versions and then pick the latest one dynamically. versions, err := github.NewReleaseCache("databrickslabs", f.name, cacheDir).Load(ctx) diff --git a/cmd/labs/project/helpers.go b/cmd/labs/project/helpers.go index 9117d875..118c0ff0 100644 --- a/cmd/labs/project/helpers.go +++ b/cmd/labs/project/helpers.go @@ -12,10 +12,13 @@ import ( "github.com/databricks/cli/libs/env" ) -func PathInLabs(ctx context.Context, dirs ...string) string { - homdeDir := env.UserHomeDir(ctx) - prefix := []string{homdeDir, ".databricks", "labs"} - return filepath.Join(append(prefix, dirs...)...) +func PathInLabs(ctx context.Context, dirs ...string) (string, error) { + homeDir, err := env.UserHomeDir(ctx) + if err != nil { + return "", err + } + prefix := []string{homeDir, ".databricks", "labs"} + return filepath.Join(append(prefix, dirs...)...), nil } func tryLoadAndParseJSON[T any](jsonFile string) (*T, error) { diff --git a/cmd/labs/project/installed.go b/cmd/labs/project/installed.go index 77fee544..9a98a780 100644 --- a/cmd/labs/project/installed.go +++ b/cmd/labs/project/installed.go @@ -9,6 +9,7 @@ import ( "path/filepath" "github.com/databricks/cli/folders" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/log" ) @@ -26,7 +27,13 @@ func projectInDevMode(ctx context.Context) (*Project, error) { } func Installed(ctx context.Context) (projects []*Project, err error) { - labsDir, err := os.ReadDir(PathInLabs(ctx)) + root, err := PathInLabs(ctx) + if errors.Is(err, env.ErrNoHomeEnv) { + return nil, nil + } else if err != nil { + return nil, err + } + labsDir, err := os.ReadDir(root) if err != nil && !errors.Is(err, fs.ErrNotExist) { return nil, err } @@ -44,7 +51,7 @@ func Installed(ctx context.Context) (projects []*Project, err error) { if projectDev != nil && v.Name() == projectDev.Name { continue } - labsYml := PathInLabs(ctx, v.Name(), "lib", "labs.yml") + labsYml := filepath.Join(root, v.Name(), "lib", "labs.yml") prj, err := Load(ctx, labsYml) if errors.Is(err, fs.ErrNotExist) { continue diff --git a/cmd/labs/project/installer.go b/cmd/labs/project/installer.go index 2e09ed37..fa676819 100644 --- a/cmd/labs/project/installer.go +++ b/cmd/labs/project/installer.go @@ -55,7 +55,7 @@ func (h *hook) runHook(cmd *cobra.Command) error { if err != nil { return fmt.Errorf("prepare: %w", err) } - libDir := h.EffectiveLibDir(ctx) + libDir := h.EffectiveLibDir() args := []string{} if strings.HasSuffix(h.Script, ".py") { args = append(args, h.virtualEnvPython(ctx)) @@ -80,14 +80,20 @@ type installer struct { } func (i *installer) Install(ctx context.Context) error { - err := i.EnsureFoldersExist(ctx) + err := i.EnsureFoldersExist() if err != nil { return fmt.Errorf("folders: %w", err) } - i.folder = PathInLabs(ctx, i.Name) + i.folder, err = PathInLabs(ctx, i.Name) + if err != nil { + return err + } w, err := i.login(ctx) if err != nil && errors.Is(err, databrickscfg.ErrNoConfiguration) { - cfg := i.Installer.envAwareConfig(ctx) + cfg, err := i.Installer.envAwareConfig(ctx) + if err != nil { + return err + } w, err = databricks.NewWorkspaceClient((*databricks.Config)(cfg)) if err != nil { return fmt.Errorf("no ~/.databrickscfg: %w", err) @@ -138,7 +144,7 @@ func (i *installer) warningf(text string, v ...any) { } func (i *installer) cleanupLib(ctx context.Context) error { - libDir := i.LibDir(ctx) + libDir := i.LibDir() err := os.RemoveAll(libDir) if err != nil { return fmt.Errorf("remove all: %w", err) @@ -157,7 +163,10 @@ func (i *installer) login(ctx context.Context) (*databricks.WorkspaceClient, err } cfg, err := i.metaEntrypoint(ctx).validLogin(i.cmd) if errors.Is(err, ErrNoLoginConfig) { - cfg = i.Installer.envAwareConfig(ctx) + cfg, err = i.Installer.envAwareConfig(ctx) + if err != nil { + return nil, err + } } else if err != nil { return nil, fmt.Errorf("valid: %w", err) } @@ -188,7 +197,7 @@ func (i *installer) downloadLibrary(ctx context.Context) error { if err != nil { return fmt.Errorf("cleanup: %w", err) } - libTarget := i.LibDir(ctx) + libTarget := i.LibDir() // we may support wheels, jars, and golang binaries. but those are not zipballs if i.IsZipball() { feedback <- fmt.Sprintf("Downloading and unpacking zipball for %s", i.version) @@ -254,10 +263,10 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr } func (i *installer) installPythonDependencies(ctx context.Context, spec string) error { - if !i.IsPythonProject(ctx) { + if !i.IsPythonProject() { return nil } - libDir := i.LibDir(ctx) + libDir := i.LibDir() log.Debugf(ctx, "Installing Python dependencies for: %s", libDir) // maybe we'll need to add call one of the two scripts: // - python3 -m ensurepip --default-pip @@ -281,6 +290,6 @@ func (i *installer) runInstallHook(ctx context.Context) error { if i.Installer.Script == "" { return nil } - log.Debugf(ctx, "Launching installer script %s in %s", i.Installer.Script, i.LibDir(ctx)) + log.Debugf(ctx, "Launching installer script %s in %s", i.Installer.Script, i.LibDir()) return i.Installer.runHook(i.cmd) } diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index b61026f2..60af43c6 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -105,7 +105,7 @@ func installerContext(t *testing.T, server *httptest.Server) context.Context { ctx = github.WithUserContentOverride(ctx, server.URL) ctx = env.WithUserHomeDir(ctx, t.TempDir()) // trick release cache to thing it went to github already - cachePath := project.PathInLabs(ctx, "blueprint", "cache") + cachePath, _ := project.PathInLabs(ctx, "blueprint", "cache") err := os.MkdirAll(cachePath, ownerRWXworldRX) require.NoError(t, err) bs := []byte(`{"refreshed_at": "2033-01-01T00:00:00.92857+02:00","data": [{"tag_name": "v0.3.15"}]}`) @@ -317,8 +317,8 @@ func TestInstallerWorksForDevelopment(t *testing.T) { // development installer assumes it's in the active virtualenv ctx = env.Set(ctx, "PYTHON_BIN", py) - - err = os.WriteFile(filepath.Join(env.UserHomeDir(ctx), ".databrickscfg"), []byte(fmt.Sprintf(` + home, _ := env.UserHomeDir(ctx) + err = os.WriteFile(filepath.Join(home, ".databrickscfg"), []byte(fmt.Sprintf(` [profile-one] host = %s token = ... @@ -399,7 +399,7 @@ func TestUpgraderWorksForReleases(t *testing.T) { py, _ = filepath.Abs(py) ctx = env.Set(ctx, "PYTHON_BIN", py) - cachePath := project.PathInLabs(ctx, "blueprint", "cache") + cachePath, _ := project.PathInLabs(ctx, "blueprint", "cache") bs := []byte(`{"refreshed_at": "2033-01-01T00:00:00.92857+02:00","data": [{"tag_name": "v0.4.0"}]}`) err := os.WriteFile(filepath.Join(cachePath, "databrickslabs-blueprint-releases.json"), bs, ownerRW) require.NoError(t, err) diff --git a/cmd/labs/project/project.go b/cmd/labs/project/project.go index 6adf9a3c..75f5e584 100644 --- a/cmd/labs/project/project.go +++ b/cmd/labs/project/project.go @@ -49,6 +49,11 @@ func readFromBytes(ctx context.Context, labsYmlRaw []byte) (*Project, error) { if project.Uninstaller != nil { project.Uninstaller.Entrypoint = e } + rootDir, err := PathInLabs(ctx, project.Name) + if err != nil { + return nil, err + } + project.rootDir = rootDir return &project, nil } @@ -63,7 +68,8 @@ type Project struct { MinPython string `yaml:"min_python"` Commands []*proxy `yaml:"commands,omitempty"` - folder string + folder string + rootDir string } func (p *Project) IsZipball() bool { @@ -108,22 +114,22 @@ func (p *Project) fileExists(name string) bool { return err == nil } -func (p *Project) projectFilePath(ctx context.Context, name string) string { - return filepath.Join(p.EffectiveLibDir(ctx), name) +func (p *Project) projectFilePath(name string) string { + return filepath.Join(p.EffectiveLibDir(), name) } -func (p *Project) IsPythonProject(ctx context.Context) bool { - if p.fileExists(p.projectFilePath(ctx, "setup.py")) { +func (p *Project) IsPythonProject() bool { + if p.fileExists(p.projectFilePath("setup.py")) { return true } - if p.fileExists(p.projectFilePath(ctx, "pyproject.toml")) { + if p.fileExists(p.projectFilePath("pyproject.toml")) { return true } return false } -func (p *Project) IsDeveloperMode(ctx context.Context) bool { - return p.folder != "" && !strings.HasPrefix(p.LibDir(ctx), p.folder) +func (p *Project) IsDeveloperMode() bool { + return p.folder != "" && !strings.HasPrefix(p.LibDir(), p.folder) } func (p *Project) HasFolder() bool { @@ -161,36 +167,32 @@ func (p *Project) Register(parent *cobra.Command) { } } -func (p *Project) rootDir(ctx context.Context) string { - return PathInLabs(ctx, p.Name) +func (p *Project) CacheDir() string { + return filepath.Join(p.rootDir, "cache") } -func (p *Project) CacheDir(ctx context.Context) string { - return filepath.Join(p.rootDir(ctx), "cache") +func (p *Project) ConfigDir() string { + return filepath.Join(p.rootDir, "config") } -func (p *Project) ConfigDir(ctx context.Context) string { - return filepath.Join(p.rootDir(ctx), "config") +func (p *Project) LibDir() string { + return filepath.Join(p.rootDir, "lib") } -func (p *Project) LibDir(ctx context.Context) string { - return filepath.Join(p.rootDir(ctx), "lib") -} - -func (p *Project) EffectiveLibDir(ctx context.Context) string { - if p.IsDeveloperMode(ctx) { +func (p *Project) EffectiveLibDir() string { + if p.IsDeveloperMode() { // developer is working on a local checkout, that is not inside of installed root return p.folder } - return p.LibDir(ctx) + return p.LibDir() } -func (p *Project) StateDir(ctx context.Context) string { - return filepath.Join(p.rootDir(ctx), "state") +func (p *Project) StateDir() string { + return filepath.Join(p.rootDir, "state") } -func (p *Project) EnsureFoldersExist(ctx context.Context) error { - dirs := []string{p.CacheDir(ctx), p.ConfigDir(ctx), p.LibDir(ctx), p.StateDir(ctx)} +func (p *Project) EnsureFoldersExist() error { + dirs := []string{p.CacheDir(), p.ConfigDir(), p.LibDir(), p.StateDir()} for _, v := range dirs { err := os.MkdirAll(v, ownerRWXworldRX) if err != nil { @@ -209,11 +211,11 @@ func (p *Project) Uninstall(cmd *cobra.Command) error { } ctx := cmd.Context() log.Infof(ctx, "Removing project: %s", p.Name) - return os.RemoveAll(p.rootDir(ctx)) + return os.RemoveAll(p.rootDir) } func (p *Project) virtualEnvPath(ctx context.Context) string { - if p.IsDeveloperMode(ctx) { + if p.IsDeveloperMode() { // When a virtual environment has been activated, the VIRTUAL_ENV environment variable // is set to the path of the environment. Since explicitly activating a virtual environment // is not required to use it, VIRTUAL_ENV cannot be relied upon to determine whether a virtual @@ -225,14 +227,14 @@ func (p *Project) virtualEnvPath(ctx context.Context) string { logger.Debugf(ctx, "(development mode) using active virtual environment from: %s", activatedVenv) return activatedVenv } - nonActivatedVenv, err := python.DetectVirtualEnvPath(p.EffectiveLibDir(ctx)) + nonActivatedVenv, err := python.DetectVirtualEnvPath(p.EffectiveLibDir()) if err == nil { logger.Debugf(ctx, "(development mode) using virtual environment from: %s", nonActivatedVenv) return nonActivatedVenv } } // by default, we pick Virtual Environment from DATABRICKS_LABS_STATE_DIR - return filepath.Join(p.StateDir(ctx), "venv") + return filepath.Join(p.StateDir(), "venv") } func (p *Project) virtualEnvPython(ctx context.Context) string { @@ -247,13 +249,13 @@ func (p *Project) virtualEnvPython(ctx context.Context) string { } func (p *Project) loginFile(ctx context.Context) string { - if p.IsDeveloperMode(ctx) { + if p.IsDeveloperMode() { // developers may not want to pollute the state in // ~/.databricks/labs/X/config while the version is not yet // released - return p.projectFilePath(ctx, ".databricks-login.json") + return p.projectFilePath(".databricks-login.json") } - return filepath.Join(p.ConfigDir(ctx), "login.json") + return filepath.Join(p.ConfigDir(), "login.json") } func (p *Project) loadLoginConfig(ctx context.Context) (*loginConfig, error) { @@ -268,11 +270,11 @@ func (p *Project) loadLoginConfig(ctx context.Context) (*loginConfig, error) { } func (p *Project) versionFile(ctx context.Context) string { - return filepath.Join(p.StateDir(ctx), "version.json") + return filepath.Join(p.StateDir(), "version.json") } func (p *Project) InstalledVersion(ctx context.Context) (*version, error) { - if p.IsDeveloperMode(ctx) { + if p.IsDeveloperMode() { return &version{ Version: "*", Date: time.Now(), @@ -300,12 +302,12 @@ func (p *Project) writeVersionFile(ctx context.Context, ver string) error { // giving users hints when they need to update their installations. func (p *Project) checkUpdates(cmd *cobra.Command) error { ctx := cmd.Context() - if p.IsDeveloperMode(ctx) { + if p.IsDeveloperMode() { // skipping update check for projects in developer mode, that // might not be installed yet return nil } - r := github.NewReleaseCache("databrickslabs", p.Name, p.CacheDir(ctx)) + r := github.NewReleaseCache("databrickslabs", p.Name, p.CacheDir()) versions, err := r.Load(ctx) if err != nil { return err diff --git a/cmd/labs/project/proxy.go b/cmd/labs/project/proxy.go index ae7df286..d872560a 100644 --- a/cmd/labs/project/proxy.go +++ b/cmd/labs/project/proxy.go @@ -60,7 +60,7 @@ func (cp *proxy) runE(cmd *cobra.Command, _ []string) error { cmd.OutOrStdout(), cmd.ErrOrStderr(), process.WithEnvs(envs)) - if errors.Is(err, fs.ErrNotExist) && cp.IsPythonProject(ctx) { + if errors.Is(err, fs.ErrNotExist) && cp.IsPythonProject() { msg := "cannot find Python %s. Please re-run: databricks labs install %s" return fmt.Errorf(msg, cp.MinPython, cp.Name) } @@ -113,9 +113,9 @@ func (cp *proxy) commandInput(cmd *cobra.Command) ([]string, error) { } args := []string{} ctx := cmd.Context() - if cp.IsPythonProject(ctx) { + if cp.IsPythonProject() { args = append(args, cp.virtualEnvPython(ctx)) - libDir := cp.EffectiveLibDir(cmd.Context()) + libDir := cp.EffectiveLibDir() entrypoint := filepath.Join(libDir, cp.Main) args = append(args, entrypoint) } diff --git a/cmd/labs/show.go b/cmd/labs/show.go index fc9d175c..1ae6498c 100644 --- a/cmd/labs/show.go +++ b/cmd/labs/show.go @@ -37,7 +37,7 @@ func newShowCommand() *cobra.Command { } name := args[0] for _, v := range installed { - isDev := name == "." && v.IsDeveloperMode(ctx) + isDev := name == "." && v.IsDeveloperMode() isMatch := name == v.Name if !(isDev || isMatch) { continue @@ -45,10 +45,10 @@ func newShowCommand() *cobra.Command { return cmdio.Render(ctx, map[string]any{ "name": v.Name, "description": v.Description, - "cache_dir": v.CacheDir(ctx), - "config_dir": v.ConfigDir(ctx), - "lib_dir": v.EffectiveLibDir(ctx), - "is_python": v.IsPythonProject(ctx), + "cache_dir": v.CacheDir(), + "config_dir": v.ConfigDir(), + "lib_dir": v.EffectiveLibDir(), + "is_python": v.IsPythonProject(), }) } return nil diff --git a/libs/databrickscfg/profiles.go b/libs/databrickscfg/profiles.go index 9f31eff6..c7bb2719 100644 --- a/libs/databrickscfg/profiles.go +++ b/libs/databrickscfg/profiles.go @@ -76,7 +76,10 @@ func GetPath(ctx context.Context) (string, error) { configFile = "~/.databrickscfg" } if strings.HasPrefix(configFile, "~") { - homedir := env.UserHomeDir(ctx) + homedir, err := env.UserHomeDir(ctx) + if err != nil { + return "", err + } configFile = filepath.Join(homedir, configFile[1:]) } return configFile, nil @@ -108,7 +111,11 @@ func LoadProfiles(ctx context.Context, fn ProfileMatchFunction) (file string, pr // Replace homedir with ~ if applicable. // This is to make the output more readable. file = filepath.Clean(f.Path()) - homedir := filepath.Clean(env.UserHomeDir(ctx)) + home, err := env.UserHomeDir(ctx) + if err != nil { + return "", nil, err + } + homedir := filepath.Clean(home) if strings.HasPrefix(file, homedir) { file = "~" + file[len(homedir):] } diff --git a/libs/env/context.go b/libs/env/context.go index 84518ad7..af4d1afa 100644 --- a/libs/env/context.go +++ b/libs/env/context.go @@ -2,7 +2,7 @@ package env import ( "context" - "fmt" + "errors" "os" "runtime" "strings" @@ -76,12 +76,15 @@ func WithUserHomeDir(ctx context.Context, value string) context.Context { return Set(ctx, homeEnvVar(), value) } -func UserHomeDir(ctx context.Context) string { +// ErrNoHomeEnv indicates the absence of $HOME env variable +var ErrNoHomeEnv = errors.New("$HOME is not set") + +func UserHomeDir(ctx context.Context) (string, error) { home := Get(ctx, homeEnvVar()) if home == "" { - panic(fmt.Errorf("$HOME is not set")) + return "", ErrNoHomeEnv } - return home + return home, nil } // All returns environment variables that are defined in both os.Environ diff --git a/libs/env/context_test.go b/libs/env/context_test.go index 5befe4ac..28a8d880 100644 --- a/libs/env/context_test.go +++ b/libs/env/context_test.go @@ -51,6 +51,7 @@ func TestContext(t *testing.T) { func TestHome(t *testing.T) { ctx := context.Background() ctx = WithUserHomeDir(ctx, "...") - home := UserHomeDir(ctx) + home, err := UserHomeDir(ctx) assert.Equal(t, "...", home) + assert.NoError(t, err) } From 4a228e6f1218f9c5ef8cc8c7e69a8d25d546bead Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 30 Nov 2023 10:51:52 +0100 Subject: [PATCH 266/310] Fix `databricks configure` if new profile is specified (#1030) ## Changes The code included the to-be-created profile in the configuration and that triggered the SDK to try and load it. Instead, we must use the specified host and token directly. ## Tests Manually. More integration test coverage tbd. --- cmd/configure/configure.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index 1c4d2e6b..cfc44187 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -42,7 +42,11 @@ func configureInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config // Ask user to specify a cluster if not already set. if flags.ConfigureCluster && cfg.ClusterID == "" { - w, err := databricks.NewWorkspaceClient((*databricks.Config)(cfg)) + // Create workspace client with configuration without the profile name set. + w, err := databricks.NewWorkspaceClient(&databricks.Config{ + Host: cfg.Host, + Token: cfg.Token, + }) if err != nil { return err } From 10c9eca06f0ec1258a6c1e666583db7bc48874aa Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 30 Nov 2023 10:59:11 +0100 Subject: [PATCH 267/310] Filter out system clusters for `--configure-cluster` (#1031) ## Changes Only clusters with their source attribute equal to `UI` or `API` should be presented in the dropdown. ## Tests Unit test and manual confirmation. --- cmd/configure/configure.go | 2 +- libs/databrickscfg/cfgpickers/clusters.go | 12 ++++++++++ .../databrickscfg/cfgpickers/clusters_test.go | 22 +++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index cfc44187..1e94ddae 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -50,7 +50,7 @@ func configureInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config if err != nil { return err } - clusterID, err := cfgpickers.AskForCluster(cmd.Context(), w) + clusterID, err := cfgpickers.AskForCluster(cmd.Context(), w, cfgpickers.WithoutSystemClusters()) if err != nil { return err } diff --git a/libs/databrickscfg/cfgpickers/clusters.go b/libs/databrickscfg/cfgpickers/clusters.go index ac037698..d955be35 100644 --- a/libs/databrickscfg/cfgpickers/clusters.go +++ b/libs/databrickscfg/cfgpickers/clusters.go @@ -118,6 +118,18 @@ func WithDatabricksConnect(minVersion string) func(*compute.ClusterDetails, *iam } } +// WithoutSystemClusters removes clusters created for system purposes (e.g. job runs, pipeline maintenance, etc.). +// It does this by keeping only clusters created through the UI or an API call. +func WithoutSystemClusters() func(*compute.ClusterDetails, *iam.User) bool { + return func(cluster *compute.ClusterDetails, me *iam.User) bool { + switch cluster.ClusterSource { + case compute.ClusterSourceApi, compute.ClusterSourceUi: + return true + } + return false + } +} + func loadInteractiveClusters(ctx context.Context, w *databricks.WorkspaceClient, filters []clusterFilter) ([]compatibleCluster, error) { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "Loading list of clusters to select from" diff --git a/libs/databrickscfg/cfgpickers/clusters_test.go b/libs/databrickscfg/cfgpickers/clusters_test.go index 362d6904..8afcd6d0 100644 --- a/libs/databrickscfg/cfgpickers/clusters_test.go +++ b/libs/databrickscfg/cfgpickers/clusters_test.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/databricks-sdk-go/qa" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -44,6 +45,27 @@ func TestIsCompatibleWithSnapshots(t *testing.T) { }, "14.0")) } +func TestWithoutSystemClusters(t *testing.T) { + fn := WithoutSystemClusters() + + // Sources to exclude. + for _, v := range []string{ + "JOB", + "PIPELINE", + "SOME_UNKNOWN_VALUE", + } { + assert.False(t, fn(&compute.ClusterDetails{ClusterSource: compute.ClusterSource(v)}, nil)) + } + + // Sources to include. + for _, v := range []string{ + "UI", + "API", + } { + assert.True(t, fn(&compute.ClusterDetails{ClusterSource: compute.ClusterSource(v)}, nil)) + } +} + func TestFirstCompatibleCluster(t *testing.T) { cfg, server := qa.HTTPFixtures{ { From 1a1f1b1b4d00e9c51b437c98e68c3fcf9ae587fc Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 30 Nov 2023 11:49:22 +0100 Subject: [PATCH 268/310] Release v0.210.1 (#1032) This is a bugfix release to address issues with v0.210.0. CLI: * Fix `panic: $HOME is not set` ([#1027](https://github.com/databricks/cli/pull/1027)). * Fix `databricks configure` if new profile is specified ([#1030](https://github.com/databricks/cli/pull/1030)). * Filter out system clusters for `--configure-cluster` ([#1031](https://github.com/databricks/cli/pull/1031)). Bundles: * Fixed panic when job has trigger and in development mode ([#1026](https://github.com/databricks/cli/pull/1026)). Internal: * Use `fetch-tags` option in release workflows ([#1025](https://github.com/databricks/cli/pull/1025)). --- CHANGELOG.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a40523c..2cc418d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,22 @@ # Version changelog +## 0.210.1 + +This is a bugfix release to address issues with v0.210.0. + +CLI: + * Fix `panic: is not set` ([#1027](https://github.com/databricks/cli/pull/1027)). + * Fix `databricks configure` if new profile is specified ([#1030](https://github.com/databricks/cli/pull/1030)). + * Filter out system clusters for `--configure-cluster` ([#1031](https://github.com/databricks/cli/pull/1031)). + +Bundles: + * Fixed panic when job has trigger and in development mode ([#1026](https://github.com/databricks/cli/pull/1026)). + +Internal: + * Use `fetch-tags` option in release workflows ([#1025](https://github.com/databricks/cli/pull/1025)). + + + ## 0.210.0 This release includes the new `databricks labs` command to install, manage, and run Databricks Labs projects. From 677926b78b34c95f65d7d12cd8688a21d020eaaf Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 30 Nov 2023 15:28:01 +0100 Subject: [PATCH 269/310] Fix panic when bundle auth resolution fails (#1002) ## Changes CLI would panic if an invalid bundle auth is setup when running CLI commands. This PR removes the panic and shows the error message directly instead. ## Tests The CWD is a bundle with: ``` workspace: profile: DEFAULT ``` Before: ``` shreyas.goenka@THW32HFW6T bundle-playground % cli clusters list panic: resolve: /Users/shreyas.goenka/.databrickscfg has no DEFAULT profile configured. Config: profile=DEFAULT goroutine 1 [running]: ``` After: ``` shreyas.goenka@THW32HFW6T bundle-playground % cli clusters list Error: cannot resolve bundle auth configuration: resolve: /Users/shreyas.goenka/.databrickscfg has no DEFAULT profile configured. Config: profile=DEFAULT ``` ``` shreyas.goenka@THW32HFW6T bundle-playground % DATABRICKS_CONFIG_FILE=/dev/null cli bundle deploy Error: cannot resolve bundle auth configuration: resolve: /dev/null has no DEFAULT profile configured. Config: profile=DEFAULT, config_file=/dev/null. Env: DATABRICKS_CONFIG_FILE ``` --- bundle/bundle.go | 10 +++++++- .../mutator/initialize_workspace_client.go | 25 +++++++++++++++++++ bundle/phases/initialize.go | 1 + cmd/root/auth.go | 6 ++++- cmd/root/bundle_test.go | 4 +-- 5 files changed, 42 insertions(+), 4 deletions(-) create mode 100644 bundle/config/mutator/initialize_workspace_client.go diff --git a/bundle/bundle.go b/bundle/bundle.go index b4f5ee10..ef22d9a8 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -121,10 +121,18 @@ func TryLoad(ctx context.Context) (*Bundle, error) { return Load(ctx, root) } +func (b *Bundle) InitializeWorkspaceClient() (*databricks.WorkspaceClient, error) { + client, err := b.Config.Workspace.Client() + if err != nil { + return nil, fmt.Errorf("cannot resolve bundle auth configuration: %w", err) + } + return client, nil +} + func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient { b.clientOnce.Do(func() { var err error - b.client, err = b.Config.Workspace.Client() + b.client, err = b.InitializeWorkspaceClient() if err != nil { panic(err) } diff --git a/bundle/config/mutator/initialize_workspace_client.go b/bundle/config/mutator/initialize_workspace_client.go new file mode 100644 index 00000000..afc38d4d --- /dev/null +++ b/bundle/config/mutator/initialize_workspace_client.go @@ -0,0 +1,25 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" +) + +type initializeWorkspaceClient struct{} + +func InitializeWorkspaceClient() bundle.Mutator { + return &initializeWorkspaceClient{} +} + +func (m *initializeWorkspaceClient) Name() string { + return "InitializeWorkspaceClient" +} + +// Apply initializes the workspace client for the bundle. We do this here so +// downstream calls to b.WorkspaceClient() do not panic if there's an error in the +// auth configuration. +func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) error { + _, err := b.InitializeWorkspaceClient() + return err +} diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index fb9e7b24..6d84b0e1 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -19,6 +19,7 @@ func Initialize() bundle.Mutator { return newPhase( "initialize", []bundle.Mutator{ + mutator.InitializeWorkspaceClient(), mutator.PopulateCurrentUser(), mutator.SetRunAs(), mutator.DefineDefaultWorkspaceRoot(), diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 99e91043..33f80e1f 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -152,7 +152,11 @@ func MustWorkspaceClient(cmd *cobra.Command, args []string) error { return err } if b := bundle.GetOrNil(cmd.Context()); b != nil { - cfg = b.WorkspaceClient().Config + client, err := b.InitializeWorkspaceClient() + if err != nil { + return err + } + cfg = client.Config } } diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index d7bae2d1..a3dec491 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -95,7 +95,7 @@ func TestBundleConfigureWithMismatchedProfile(t *testing.T) { cmd.Flag("profile").Value.Set("PROFILE-1") b := setup(t, cmd, "https://x.com") - assert.PanicsWithError(t, "config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com", func() { + assert.PanicsWithError(t, "cannot resolve bundle auth configuration: config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com", func() { b.WorkspaceClient() }) } @@ -118,7 +118,7 @@ func TestBundleConfigureWithMismatchedProfileEnvVariable(t *testing.T) { cmd := emptyCommand(t) b := setup(t, cmd, "https://x.com") - assert.PanicsWithError(t, "config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com", func() { + assert.PanicsWithError(t, "cannot resolve bundle auth configuration: config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com", func() { b.WorkspaceClient() }) } From 1f1ed6db53fd5d7424e951e1fc1675b2553c0f62 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 30 Nov 2023 15:28:51 +0100 Subject: [PATCH 270/310] Add versioning for bundle templates (#972) ## Changes This PR adds versioning for bundle templates. Right now there's only logic for the maximum version of templates supported. At some point in the future if we make a breaking template change we can also include a minimum version of template supported by the CLI. ## Tests Unit tests. --- libs/jsonschema/extension.go | 4 ++++ libs/template/config.go | 6 ++++++ libs/template/config_test.go | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go index ffb77bd8..9127a0d6 100644 --- a/libs/jsonschema/extension.go +++ b/libs/jsonschema/extension.go @@ -26,4 +26,8 @@ type Extension struct { // If the CLI version is less than this value, then validation for this // schema will fail. MinDatabricksCliVersion string `json:"min_databricks_cli_version,omitempty"` + + // Version of the schema. This is used to determine if the schema is + // compatible with the current CLI version. + Version *int `json:"version,omitempty"` } diff --git a/libs/template/config.go b/libs/template/config.go index 51283e03..508e7736 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -9,6 +9,9 @@ import ( "golang.org/x/exp/maps" ) +// The latest template schema version supported by the CLI +const latestSchemaVersion = 1 + type config struct { ctx context.Context values map[string]any @@ -49,6 +52,9 @@ func validateSchema(schema *jsonschema.Schema) error { return fmt.Errorf("property type %s is not supported by bundle templates", v.Type) } } + if schema.Version != nil && *schema.Version > latestSchemaVersion { + return fmt.Errorf("template schema version %d is not supported by this version of the CLI. Please upgrade your CLI to the latest version", *schema.Version) + } return nil } diff --git a/libs/template/config_test.go b/libs/template/config_test.go index 69e7054f..d76952dc 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -2,6 +2,7 @@ package template import ( "context" + "fmt" "testing" "github.com/databricks/cli/cmd/root" @@ -150,6 +151,40 @@ func TestTemplateValidateSchema(t *testing.T) { assert.EqualError(t, err, "property type array is not supported by bundle templates") } +func TestTemplateValidateSchemaVersion(t *testing.T) { + version := latestSchemaVersion + schema := jsonschema.Schema{ + Extension: jsonschema.Extension{ + Version: &version, + }, + } + assert.NoError(t, validateSchema(&schema)) + + version = latestSchemaVersion + 1 + schema = jsonschema.Schema{ + Extension: jsonschema.Extension{ + Version: &version, + }, + } + assert.EqualError(t, validateSchema(&schema), fmt.Sprintf("template schema version %d is not supported by this version of the CLI. Please upgrade your CLI to the latest version", version)) + + version = 5000 + schema = jsonschema.Schema{ + Extension: jsonschema.Extension{ + Version: &version, + }, + } + assert.EqualError(t, validateSchema(&schema), "template schema version 5000 is not supported by this version of the CLI. Please upgrade your CLI to the latest version") + + version = 0 + schema = jsonschema.Schema{ + Extension: jsonschema.Extension{ + Version: &version, + }, + } + assert.NoError(t, validateSchema(&schema)) +} + func TestTemplateEnumValidation(t *testing.T) { schema := jsonschema.Schema{ Properties: map[string]*jsonschema.Schema{ From bdef0f7b23360fe2531511e7369a039fa52cdcdd Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 30 Nov 2023 17:07:45 +0100 Subject: [PATCH 271/310] Add support for conditional prompting in bundle init (#971) ## Changes This PR introduces the `skip_prompt_if` extension to the jsonschema library. If the inputs provided by the user match the JSON schema then the prompt for that property is skipped. Right now only constant checks are supported, but if in the future more complicated conditionals are required, this can be extended to support `allOf`, `oneOf`, `anyOf` etc allowing template authors to specify conditionals of arbitary complexity. ## Tests Unit tests and manually. --- libs/jsonschema/extension.go | 4 + libs/jsonschema/schema.go | 33 ++++ libs/jsonschema/schema_test.go | 55 ++++++ .../schema-load-int/schema-invalid-const.json | 9 + .../schema-load-int/schema-valid.json | 4 + libs/template/config.go | 47 ++++- libs/template/config_test.go | 169 ++++++++++++++++++ 7 files changed, 318 insertions(+), 3 deletions(-) create mode 100644 libs/jsonschema/testdata/schema-load-int/schema-invalid-const.json diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go index 9127a0d6..3e32caf1 100644 --- a/libs/jsonschema/extension.go +++ b/libs/jsonschema/extension.go @@ -27,6 +27,10 @@ type Extension struct { // schema will fail. MinDatabricksCliVersion string `json:"min_databricks_cli_version,omitempty"` + // Skip prompting if this schema is satisfied by the configuration already present. In + // that case the default value of the property is used instead. + SkipPromptIf *Schema `json:"skip_prompt_if,omitempty"` + // Version of the schema. This is used to determine if the schema is // compatible with the current CLI version. Version *int `json:"version,omitempty"` diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index 83213791..443e7af6 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -20,6 +20,10 @@ type Schema struct { // IDE. This is manually injected here using schema.Docs Description string `json:"description,omitempty"` + // Expected value for the JSON object. The object value must be equal to this + // field if it's specified in the schema. + Const any `json:"const,omitempty"` + // Schemas for the fields of an struct. The keys are the first json tag. // The values are the schema for the type of the field Properties map[string]*Schema `json:"properties,omitempty"` @@ -118,6 +122,18 @@ func (schema *Schema) validateSchemaDefaultValueTypes() error { return nil } +func (schema *Schema) validateConstValueTypes() error { + for name, property := range schema.Properties { + if property.Const == nil { + continue + } + if err := validateType(property.Const, property.Type); err != nil { + return fmt.Errorf("type validation for const value of property %s failed: %w", name, err) + } + } + return nil +} + // Validate enum field values for properties are consistent with types. func (schema *Schema) validateSchemaEnumValueTypes() error { for name, property := range schema.Properties { @@ -203,14 +219,25 @@ func (schema *Schema) validateSchemaMinimumCliVersion(currentVersion string) fun } } +func (schema *Schema) validateSchemaSkippedPropertiesHaveDefaults() error { + for name, property := range schema.Properties { + if property.SkipPromptIf != nil && property.Default == nil { + return fmt.Errorf("property %q has a skip_prompt_if clause but no default value", name) + } + } + return nil +} + func (schema *Schema) validate() error { for _, fn := range []func() error{ schema.validateSchemaPropertyTypes, schema.validateSchemaDefaultValueTypes, schema.validateSchemaEnumValueTypes, + schema.validateConstValueTypes, schema.validateSchemaDefaultValueIsInEnums, schema.validateSchemaPattern, schema.validateSchemaMinimumCliVersion("v" + build.GetInfo().Version), + schema.validateSchemaSkippedPropertiesHaveDefaults, } { err := fn() if err != nil { @@ -248,6 +275,12 @@ func Load(path string) (*Schema, error) { return nil, fmt.Errorf("failed to parse default value for property %s: %w", name, err) } } + if property.Const != nil { + property.Const, err = toInteger(property.Const) + if err != nil { + return nil, fmt.Errorf("failed to parse const value for property %s: %w", name, err) + } + } for i, enum := range property.Enum { property.Enum[i], err = toInteger(enum) if err != nil { diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go index a750f44a..cf1f1276 100644 --- a/libs/jsonschema/schema_test.go +++ b/libs/jsonschema/schema_test.go @@ -48,6 +48,7 @@ func TestSchemaLoadIntegers(t *testing.T) { assert.NoError(t, err) assert.Equal(t, int64(1), schema.Properties["abc"].Default) assert.Equal(t, []any{int64(1), int64(2), int64(3)}, schema.Properties["abc"].Enum) + assert.Equal(t, int64(5), schema.Properties["def"].Const) } func TestSchemaLoadIntegersWithInvalidDefault(t *testing.T) { @@ -60,6 +61,11 @@ func TestSchemaLoadIntegersWithInvalidEnums(t *testing.T) { assert.EqualError(t, err, "failed to parse enum value 2.4 at index 1 for property abc: expected integer value, got: 2.4") } +func TestSchemaLoadIntergersWithInvalidConst(t *testing.T) { + _, err := Load("./testdata/schema-load-int/schema-invalid-const.json") + assert.EqualError(t, err, "failed to parse const value for property def: expected integer value, got: 5.1") +} + func TestSchemaValidateDefaultType(t *testing.T) { invalidSchema := &Schema{ Properties: map[string]*Schema{ @@ -250,3 +256,52 @@ func TestValidateSchemaMinimumCliVersion(t *testing.T) { err = s.validateSchemaMinimumCliVersion("v0.0.0-dev")() assert.NoError(t, err) } + +func TestValidateSchemaConstTypes(t *testing.T) { + s := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Const: "abc", + }, + }, + } + err := s.validate() + assert.NoError(t, err) + + s = &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Const: 123, + }, + }, + } + err = s.validate() + assert.EqualError(t, err, "type validation for const value of property foo failed: expected type string, but value is 123") +} + +func TestValidateSchemaSkippedPropertiesHaveDefaults(t *testing.T) { + s := &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Extension: Extension{SkipPromptIf: &Schema{}}, + }, + }, + } + err := s.validate() + assert.EqualError(t, err, "property \"foo\" has a skip_prompt_if clause but no default value") + + s = &Schema{ + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Default: "abc", + Extension: Extension{SkipPromptIf: &Schema{}}, + }, + }, + } + err = s.validate() + assert.NoError(t, err) +} diff --git a/libs/jsonschema/testdata/schema-load-int/schema-invalid-const.json b/libs/jsonschema/testdata/schema-load-int/schema-invalid-const.json new file mode 100644 index 00000000..9c1b3c0d --- /dev/null +++ b/libs/jsonschema/testdata/schema-load-int/schema-invalid-const.json @@ -0,0 +1,9 @@ +{ + "type": "object", + "properties": { + "def": { + "type": "integer", + "const": 5.1 + } + } +} diff --git a/libs/jsonschema/testdata/schema-load-int/schema-valid.json b/libs/jsonschema/testdata/schema-load-int/schema-valid.json index a1167a6c..425d7c5a 100644 --- a/libs/jsonschema/testdata/schema-load-int/schema-valid.json +++ b/libs/jsonschema/testdata/schema-load-int/schema-valid.json @@ -5,6 +5,10 @@ "type": "integer", "default": 1, "enum": [1,2,3] + }, + "def": { + "type": "integer", + "const": 5 } } } diff --git a/libs/template/config.go b/libs/template/config.go index 508e7736..2b4d19d1 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -105,20 +105,61 @@ func (c *config) assignDefaultValues(r *renderer) error { return nil } +func (c *config) skipPrompt(p jsonschema.Property, r *renderer) (bool, error) { + // Config already has a value assigned. We don't have to prompt for a user input. + if _, ok := c.values[p.Name]; ok { + return true, nil + } + + if p.Schema.SkipPromptIf == nil { + return false, nil + } + + // Check if conditions specified by template author for skipping the prompt + // are satisfied. If they are not, we have to prompt for a user input. + for name, property := range p.Schema.SkipPromptIf.Properties { + if v, ok := c.values[name]; ok && v == property.Const { + continue + } + return false, nil + } + + if p.Schema.Default == nil { + return false, fmt.Errorf("property %s has skip_prompt_if set but no default value", p.Name) + } + + // Assign default value to property if we are skipping it. + if p.Schema.Type != jsonschema.StringType { + c.values[p.Name] = p.Schema.Default + return true, nil + } + + // Execute the default value as a template and assign it to the property. + var err error + c.values[p.Name], err = r.executeTemplate(p.Schema.Default.(string)) + if err != nil { + return false, err + } + return true, nil +} + // Prompts user for values for properties that do not have a value set yet func (c *config) promptForValues(r *renderer) error { for _, p := range c.schema.OrderedProperties() { name := p.Name property := p.Schema - // Config already has a value assigned - if _, ok := c.values[name]; ok { + // Skip prompting if we can. + skip, err := c.skipPrompt(p, r) + if err != nil { + return err + } + if skip { continue } // Compute default value to display by converting it to a string var defaultVal string - var err error if property.Default != nil { defaultValRaw, err := property.DefaultString() if err != nil { diff --git a/libs/template/config_test.go b/libs/template/config_test.go index d76952dc..c4968ee1 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "testing" + "text/template" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/jsonschema" @@ -229,3 +230,171 @@ func TestTemplateSchemaErrorsWithEmptyDescription(t *testing.T) { _, err := newConfig(context.Background(), "./testdata/config-test-schema/invalid-test-schema.json") assert.EqualError(t, err, "template property property-without-description is missing a description") } + +func testRenderer() *renderer { + return &renderer{ + config: map[string]any{ + "fruit": "apples", + }, + baseTemplate: template.New(""), + } +} + +func TestPromptIsSkippedWhenEmpty(t *testing.T) { + c := config{ + ctx: context.Background(), + values: make(map[string]any), + schema: &jsonschema.Schema{ + Properties: map[string]*jsonschema.Schema{ + "always-skip": { + Type: "string", + Default: "I like {{.fruit}}", + Extension: jsonschema.Extension{ + SkipPromptIf: &jsonschema.Schema{}, + }, + }, + }, + }, + } + + // We should always skip the prompt here. An empty JSON schema by definition + // matches all possible configurations. + skip, err := c.skipPrompt(jsonschema.Property{ + Name: "always-skip", + Schema: c.schema.Properties["always-skip"], + }, testRenderer()) + assert.NoError(t, err) + assert.True(t, skip) + assert.Equal(t, "I like apples", c.values["always-skip"]) +} + +func TestPromptSkipErrorsWithEmptyDefault(t *testing.T) { + c := config{ + ctx: context.Background(), + values: make(map[string]any), + schema: &jsonschema.Schema{ + Properties: map[string]*jsonschema.Schema{ + "no-default": { + Type: "string", + Extension: jsonschema.Extension{ + SkipPromptIf: &jsonschema.Schema{}, + }, + }, + }, + }, + } + + _, err := c.skipPrompt(jsonschema.Property{ + Name: "no-default", + Schema: c.schema.Properties["no-default"], + }, testRenderer()) + assert.EqualError(t, err, "property no-default has skip_prompt_if set but no default value") +} + +func TestPromptIsSkippedIfValueIsAssigned(t *testing.T) { + c := config{ + ctx: context.Background(), + values: make(map[string]any), + schema: &jsonschema.Schema{ + Properties: map[string]*jsonschema.Schema{ + "already-assigned": { + Type: "string", + Default: "some-default-value", + }, + }, + }, + } + + c.values["already-assigned"] = "some-value" + skip, err := c.skipPrompt(jsonschema.Property{ + Name: "already-assigned", + Schema: c.schema.Properties["already-assigned"], + }, testRenderer()) + assert.NoError(t, err) + assert.True(t, skip) + assert.Equal(t, "some-value", c.values["already-assigned"]) +} + +func TestPromptIsSkipped(t *testing.T) { + c := config{ + ctx: context.Background(), + values: make(map[string]any), + schema: &jsonschema.Schema{ + Properties: map[string]*jsonschema.Schema{ + "abc": { + Type: "string", + }, + "def": { + Type: "integer", + }, + "xyz": { + Type: "string", + Default: "hello-world", + Extension: jsonschema.Extension{ + SkipPromptIf: &jsonschema.Schema{ + Properties: map[string]*jsonschema.Schema{ + "abc": { + Const: "foobar", + }, + "def": { + Const: 123, + }, + }, + }, + }, + }, + }, + }, + } + + // No skip condition defined. Prompt should not be skipped. + skip, err := c.skipPrompt(jsonschema.Property{ + Name: "abc", + Schema: c.schema.Properties["abc"], + }, testRenderer()) + assert.NoError(t, err) + assert.False(t, skip) + + // No values assigned to config. Prompt should not be skipped. + skip, err = c.skipPrompt(jsonschema.Property{ + Name: "xyz", + Schema: c.schema.Properties["xyz"], + }, testRenderer()) + assert.NoError(t, err) + assert.False(t, skip) + assert.NotContains(t, c.values, "xyz") + + // Values do not match skip condition. Prompt should not be skipped. + c.values["abc"] = "foo" + c.values["def"] = 123 + skip, err = c.skipPrompt(jsonschema.Property{ + Name: "xyz", + Schema: c.schema.Properties["xyz"], + }, testRenderer()) + assert.NoError(t, err) + assert.False(t, skip) + assert.NotContains(t, c.values, "xyz") + + // Values do not match skip condition. Prompt should not be skipped. + c.values["abc"] = "foobar" + c.values["def"] = 1234 + skip, err = c.skipPrompt(jsonschema.Property{ + Name: "xyz", + Schema: c.schema.Properties["xyz"], + }, testRenderer()) + assert.NoError(t, err) + assert.False(t, skip) + assert.NotContains(t, c.values, "xyz") + + // Values match skip condition. Prompt should be skipped. Default value should + // be assigned to "xyz". + c.values["abc"] = "foobar" + c.values["def"] = 123 + skip, err = c.skipPrompt(jsonschema.Property{ + Name: "xyz", + Schema: c.schema.Properties["xyz"], + }, testRenderer()) + assert.NoError(t, err) + assert.True(t, skip) + assert.Equal(t, "hello-world", c.values["xyz"]) +} From 76840176e33ba0c10c7e5de7356226f00174f12b Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 30 Nov 2023 17:22:23 +0100 Subject: [PATCH 272/310] Add documentation for positional args in commands generated from the Databricks OpenAPI specification (#1033) ## Changes This PR adds documentation for positional arguments in commands that are generated from the openapi spec. Note: the changes to `.gitattributes` will be revert / properly fixed in https://github.com/databricks/cli/pull/1012 --- .codegen/service.go.tmpl | 19 +- .gitattributes | 3 - cmd/account/access-control/access-control.go | 18 +- cmd/account/billable-usage/billable-usage.go | 8 +- cmd/account/budgets/budgets.go | 10 +- cmd/account/credentials/credentials.go | 10 +- .../custom-app-integration.go | 15 +- .../encryption-keys/encryption-keys.go | 10 +- cmd/account/groups/groups.go | 20 +- .../ip-access-lists/ip-access-lists.go | 10 +- cmd/account/log-delivery/log-delivery.go | 15 +- .../metastore-assignments.go | 28 ++- cmd/account/metastores/metastores.go | 15 +- .../network-connectivity.go | 47 +++- cmd/account/network-policy/network-policy.go | 22 +- cmd/account/networks/networks.go | 15 +- cmd/account/private-access/private-access.go | 25 +- .../published-app-integration.go | 15 +- .../service-principal-secrets.go | 16 +- .../service-principals/service-principals.go | 20 +- cmd/account/settings/settings.go | 22 +- .../storage-credentials.go | 28 ++- cmd/account/storage/storage.go | 10 +- cmd/account/users/users.go | 21 +- cmd/account/vpc-endpoints/vpc-endpoints.go | 15 +- .../workspace-assignment.go | 16 +- cmd/account/workspaces/workspaces.go | 20 +- cmd/workspace/apps/apps.go | 20 +- .../artifact-allowlists.go | 5 +- cmd/workspace/catalogs/catalogs.go | 20 +- cmd/workspace/clean-rooms/clean-rooms.go | 15 +- .../cluster-policies/cluster-policies.go | 43 +++- cmd/workspace/clusters/clusters.go | 86 +++++-- cmd/workspace/connections/connections.go | 10 +- cmd/workspace/experiments/experiments.go | 124 ++++++++-- .../external-locations/external-locations.go | 22 +- cmd/workspace/functions/functions.go | 24 +- .../git-credentials/git-credentials.go | 22 +- .../global-init-scripts.go | 23 +- cmd/workspace/grants/grants.go | 18 +- cmd/workspace/groups/groups.go | 20 +- .../instance-pools/instance-pools.go | 53 +++- .../instance-profiles/instance-profiles.go | 17 +- .../ip-access-lists/ip-access-lists.go | 10 +- cmd/workspace/jobs/jobs.go | 72 ++++-- cmd/workspace/libraries/libraries.go | 5 +- cmd/workspace/metastores/metastores.go | 38 ++- .../model-registry/model-registry.go | 231 +++++++++++++++--- .../model-versions/model-versions.go | 30 ++- cmd/workspace/permissions/permissions.go | 24 +- cmd/workspace/pipelines/pipelines.go | 36 ++- cmd/workspace/providers/providers.go | 26 +- .../recipient-activation.go | 10 +- cmd/workspace/recipients/recipients.go | 35 ++- .../registered-models/registered-models.go | 35 ++- cmd/workspace/repos/repos.go | 43 +++- cmd/workspace/schemas/schemas.go | 26 +- cmd/workspace/secrets/secrets.go | 51 +++- .../service-principals/service-principals.go | 20 +- .../serving-endpoints/serving-endpoints.go | 63 ++++- cmd/workspace/settings/settings.go | 22 +- cmd/workspace/shares/shares.go | 30 ++- .../storage-credentials.go | 20 +- .../system-schemas/system-schemas.go | 17 +- .../table-constraints/table-constraints.go | 9 +- cmd/workspace/tables/tables.go | 26 +- .../token-management/token-management.go | 16 +- cmd/workspace/tokens/tokens.go | 5 +- cmd/workspace/users/users.go | 21 +- cmd/workspace/volumes/volumes.go | 29 ++- cmd/workspace/warehouses/warehouses.go | 45 +++- .../workspace-bindings/workspace-bindings.go | 22 +- cmd/workspace/workspace/workspace.go | 58 ++++- 73 files changed, 1714 insertions(+), 356 deletions(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 5feb0c87..b5916cbe 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -126,11 +126,28 @@ func new{{.PascalName}}() *cobra.Command { {{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}} {{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt .Request) (eq 1 (len .Request.RequiredRequestBodyFields)) -}} + {{- $atleastOneArgumentWithDescription := false -}} + {{- if $hasPosArgs -}} + {{- range .Request.RequiredFields -}} + {{- if .HasComment -}} + {{- $atleastOneArgumentWithDescription = true -}} + {{- break -}} + {{- end -}} + {{- end -}} + {{- end -}} cmd.Use = "{{.KebabName}}{{if $hasPosArgs}}{{range .Request.RequiredFields}} {{.ConstantName}}{{end}}{{end}}" {{- if .Description }} cmd.Short = `{{.Summary | without "`"}}` - cmd.Long = `{{.Comment " " 80 | without "`"}}` + cmd.Long = `{{.Comment " " 80 | without "`"}} + {{- if $atleastOneArgumentWithDescription }} + + Arguments: + {{- range .Request.RequiredFields }} + {{ .ConstantName }}: {{.Comment " " 80 | without "`"}} + {{- end -}} + {{- end -}} + ` {{- end }} {{- if .IsPrivatePreview }} diff --git a/.gitattributes b/.gitattributes index 0a8a7191..ddd698a0 100755 --- a/.gitattributes +++ b/.gitattributes @@ -83,6 +83,3 @@ cmd/workspace/warehouses/warehouses.go linguist-generated=true cmd/workspace/workspace-bindings/workspace-bindings.go linguist-generated=true cmd/workspace/workspace-conf/workspace-conf.go linguist-generated=true cmd/workspace/workspace/workspace.go linguist-generated=true - -# Hide diff for Go structs generated from databricks terraform provider schema -bundle/internal/tf/schema/*.go linguist-generated=true diff --git a/cmd/account/access-control/access-control.go b/cmd/account/access-control/access-control.go index 01c076fb..36b69d01 100755 --- a/cmd/account/access-control/access-control.go +++ b/cmd/account/access-control/access-control.go @@ -59,7 +59,10 @@ func newGetAssignableRolesForResource() *cobra.Command { Gets all the roles that can be granted on an account level resource. A role is grantable if the rule set on the resource can contain an access rule of the - role.` + role. + + Arguments: + RESOURCE: The resource name for which assignable roles will be listed.` cmd.Annotations = make(map[string]string) @@ -122,7 +125,18 @@ func newGetRuleSet() *cobra.Command { Get a rule set by its name. A rule set is always attached to a resource and contains a list of access rules on the said resource. Currently only a default - rule set for each resource is supported.` + rule set for each resource is supported. + + Arguments: + NAME: The ruleset name associated with the request. + ETAG: Etag used for versioning. The response is at least as fresh as the eTag + provided. Etag is used for optimistic concurrency control as a way to help + prevent simultaneous updates of a rule set from overwriting each other. It + is strongly suggested that systems make use of the etag in the read -> + modify -> write pattern to perform rule set updates in order to avoid race + conditions that is get an etag from a GET rule set request, and pass it + with the PUT update request to identify the rule set version you are + updating.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/billable-usage/billable-usage.go b/cmd/account/billable-usage/billable-usage.go index 5e9b33f8..ec9b7a63 100755 --- a/cmd/account/billable-usage/billable-usage.go +++ b/cmd/account/billable-usage/billable-usage.go @@ -64,7 +64,13 @@ func newDownload() *cobra.Command { this API may hit a timeout after a few minutes. If you experience this, try to mitigate by calling the API with narrower date ranges. - [CSV file schema]: https://docs.databricks.com/administration-guide/account-settings/usage-analysis.html#schema` + [CSV file schema]: https://docs.databricks.com/administration-guide/account-settings/usage-analysis.html#schema + + Arguments: + START_MONTH: Format: YYYY-MM. First month to return billable usage logs for. This + field is required. + END_MONTH: Format: YYYY-MM. Last month to return billable usage logs for. This + field is required.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/budgets/budgets.go b/cmd/account/budgets/budgets.go index 1a0c7a0a..d5ffe663 100755 --- a/cmd/account/budgets/budgets.go +++ b/cmd/account/budgets/budgets.go @@ -124,7 +124,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete budget.` cmd.Long = `Delete budget. - Deletes the budget specified by its UUID.` + Deletes the budget specified by its UUID. + + Arguments: + BUDGET_ID: Budget ID` cmd.Annotations = make(map[string]string) @@ -198,7 +201,10 @@ func newGet() *cobra.Command { cmd.Long = `Get budget and its status. Gets the budget specified by its UUID, including noncumulative status for each - day that the budget is configured to include.` + day that the budget is configured to include. + + Arguments: + BUDGET_ID: Budget ID` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/credentials/credentials.go b/cmd/account/credentials/credentials.go index 99204bfb..72fcd70b 100755 --- a/cmd/account/credentials/credentials.go +++ b/cmd/account/credentials/credentials.go @@ -139,7 +139,10 @@ func newDelete() *cobra.Command { Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any - workspace.` + workspace. + + Arguments: + CREDENTIALS_ID: Databricks Account API credential configuration ID` cmd.Annotations = make(map[string]string) @@ -213,7 +216,10 @@ func newGet() *cobra.Command { cmd.Long = `Get credential configuration. Gets a Databricks credential configuration object for an account, both - specified by ID.` + specified by ID. + + Arguments: + CREDENTIALS_ID: Databricks Account API credential configuration ID` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index e7b56df7..e6d216df 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -130,7 +130,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete Custom OAuth App Integration. Delete an existing Custom OAuth App Integration. You can retrieve the custom - oauth app integration via :method:CustomAppIntegration/get.` + oauth app integration via :method:CustomAppIntegration/get. + + Arguments: + INTEGRATION_ID: The oauth app integration ID.` cmd.Annotations = make(map[string]string) @@ -191,7 +194,10 @@ func newGet() *cobra.Command { cmd.Short = `Get OAuth Custom App Integration.` cmd.Long = `Get OAuth Custom App Integration. - Gets the Custom OAuth App Integration for the given integration id.` + Gets the Custom OAuth App Integration for the given integration id. + + Arguments: + INTEGRATION_ID: The oauth app integration ID.` cmd.Annotations = make(map[string]string) @@ -307,7 +313,10 @@ func newUpdate() *cobra.Command { cmd.Long = `Updates Custom OAuth App Integration. Updates an existing custom OAuth App Integration. You can retrieve the custom - oauth app integration via :method:CustomAppIntegration/get.` + oauth app integration via :method:CustomAppIntegration/get. + + Arguments: + INTEGRATION_ID: The oauth app integration ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/encryption-keys/encryption-keys.go b/cmd/account/encryption-keys/encryption-keys.go index 2172c49f..3977f583 100755 --- a/cmd/account/encryption-keys/encryption-keys.go +++ b/cmd/account/encryption-keys/encryption-keys.go @@ -155,7 +155,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete encryption key configuration. Deletes a customer-managed key configuration object for an account. You cannot - delete a configuration that is associated with a running workspace.` + delete a configuration that is associated with a running workspace. + + Arguments: + CUSTOMER_MANAGED_KEY_ID: Databricks encryption key configuration ID.` cmd.Annotations = make(map[string]string) @@ -230,7 +233,10 @@ func newGet() *cobra.Command { types, subscription types, and AWS regions. This operation is available only if your account is on the E2 version of the - platform.",` + platform.", + + Arguments: + CUSTOMER_MANAGED_KEY_ID: Databricks encryption key configuration ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index 826d7700..42333c18 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -140,7 +140,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a group.` cmd.Long = `Delete a group. - Deletes a group from the Databricks account.` + Deletes a group from the Databricks account. + + Arguments: + ID: Unique ID for a group in the Databricks account.` cmd.Annotations = make(map[string]string) @@ -213,7 +216,10 @@ func newGet() *cobra.Command { cmd.Short = `Get group details.` cmd.Long = `Get group details. - Gets the information for a specific group in the Databricks account.` + Gets the information for a specific group in the Databricks account. + + Arguments: + ID: Unique ID for a group in the Databricks account.` cmd.Annotations = make(map[string]string) @@ -358,7 +364,10 @@ func newPatch() *cobra.Command { cmd.Short = `Update group details.` cmd.Long = `Update group details. - Partially updates the details of a group.` + Partially updates the details of a group. + + Arguments: + ID: Unique ID for a group in the Databricks account.` cmd.Annotations = make(map[string]string) @@ -449,7 +458,10 @@ func newUpdate() *cobra.Command { cmd.Short = `Replace a group.` cmd.Long = `Replace a group. - Updates the details of a group by replacing the entire group entity.` + Updates the details of a group by replacing the entire group entity. + + Arguments: + ID: Databricks group ID` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/ip-access-lists/ip-access-lists.go b/cmd/account/ip-access-lists/ip-access-lists.go index 328883ac..5d95614a 100755 --- a/cmd/account/ip-access-lists/ip-access-lists.go +++ b/cmd/account/ip-access-lists/ip-access-lists.go @@ -154,7 +154,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete access list.` cmd.Long = `Delete access list. - Deletes an IP access list, specified by its list ID.` + Deletes an IP access list, specified by its list ID. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list.` cmd.Annotations = make(map[string]string) @@ -227,7 +230,10 @@ func newGet() *cobra.Command { cmd.Short = `Get IP access list.` cmd.Long = `Get IP access list. - Gets an IP access list, specified by its list ID.` + Gets an IP access list, specified by its list ID. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index fdc5e386..782d71ac 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -208,7 +208,10 @@ func newGet() *cobra.Command { cmd.Long = `Get log delivery configuration. Gets a Databricks log delivery configuration object for an account, both - specified by ID.` + specified by ID. + + Arguments: + LOG_DELIVERY_CONFIGURATION_ID: Databricks log delivery configuration ID` cmd.Annotations = make(map[string]string) @@ -351,7 +354,15 @@ func newPatchStatus() *cobra.Command { configurations is not supported, so disable log delivery configurations that are no longer needed. Note that you can't re-enable a delivery configuration if this would violate the delivery configuration limits described under - [Create log delivery](:method:LogDelivery/Create).` + [Create log delivery](:method:LogDelivery/Create). + + Arguments: + LOG_DELIVERY_CONFIGURATION_ID: Databricks log delivery configuration ID + STATUS: Status of log delivery configuration. Set to ENABLED (enabled) or + DISABLED (disabled). Defaults to ENABLED. You can [enable or disable + the configuration](#operation/patch-log-delivery-config-status) later. + Deletion of a configuration is not supported, so disable a log delivery + configuration that is no longer needed.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index 00979f45..619bde50 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -59,7 +59,11 @@ func newCreate() *cobra.Command { cmd.Short = `Assigns a workspace to a metastore.` cmd.Long = `Assigns a workspace to a metastore. - Creates an assignment to a metastore for a workspace` + Creates an assignment to a metastore for a workspace + + Arguments: + WORKSPACE_ID: Workspace ID. + METASTORE_ID: Unity Catalog metastore ID` cmd.Annotations = make(map[string]string) @@ -131,7 +135,11 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a metastore assignment. Deletes a metastore assignment to a workspace, leaving the workspace with no - metastore.` + metastore. + + Arguments: + WORKSPACE_ID: Workspace ID. + METASTORE_ID: Unity Catalog metastore ID` cmd.Annotations = make(map[string]string) @@ -199,7 +207,10 @@ func newGet() *cobra.Command { Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace is assigned a metastore, the mappig will be returned. If no metastore is assigned to the workspace, the assignment will not be found and a - 404 returned.` + 404 returned. + + Arguments: + WORKSPACE_ID: Workspace ID.` cmd.Annotations = make(map[string]string) @@ -264,7 +275,10 @@ func newList() *cobra.Command { cmd.Long = `Get all workspaces assigned to a metastore. Gets a list of all Databricks workspace IDs that have been assigned to given - metastore.` + metastore. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID` cmd.Annotations = make(map[string]string) @@ -330,7 +344,11 @@ func newUpdate() *cobra.Command { cmd.Long = `Updates a metastore assignment to a workspaces. Updates an assignment to a metastore for a workspace. Currently, only the - default catalog may be updated.` + default catalog may be updated. + + Arguments: + WORKSPACE_ID: Workspace ID. + METASTORE_ID: Unity Catalog metastore ID` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/metastores/metastores.go b/cmd/account/metastores/metastores.go index 30be31ba..797bef5e 100755 --- a/cmd/account/metastores/metastores.go +++ b/cmd/account/metastores/metastores.go @@ -126,7 +126,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a metastore.` cmd.Long = `Delete a metastore. - Deletes a Unity Catalog metastore for an account, both specified by ID.` + Deletes a Unity Catalog metastore for an account, both specified by ID. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID` cmd.Annotations = make(map[string]string) @@ -187,7 +190,10 @@ func newGet() *cobra.Command { cmd.Short = `Get a metastore.` cmd.Long = `Get a metastore. - Gets a Unity Catalog metastore from an account, both specified by ID.` + Gets a Unity Catalog metastore from an account, both specified by ID. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID` cmd.Annotations = make(map[string]string) @@ -300,7 +306,10 @@ func newUpdate() *cobra.Command { cmd.Short = `Update a metastore.` cmd.Long = `Update a metastore. - Updates an existing Unity Catalog metastore.` + Updates an existing Unity Catalog metastore. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index e603dd89..27ab3174 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -77,7 +77,16 @@ func newCreateNetworkConnectivityConfiguration() *cobra.Command { multiple workspaces from the same Azure region within the same Databricks account. See [configure serverless secure connectivity]. - [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security` + [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security + + Arguments: + NAME: The name of the network connectivity configuration. The name can contain + alphanumeric characters, hyphens, and underscores. The length must be + between 3 and 30 characters. The name must match the regular expression + ^[0-9a-zA-Z-_]{3,30}$. + REGION: The Azure region for this network connectivity configuration. Only + workspaces in the same Azure region can be attached to this network + connectivity configuration.` cmd.Annotations = make(map[string]string) @@ -167,7 +176,14 @@ func newCreatePrivateEndpointRule() *cobra.Command { private endpoint created, make a GET request on the new private endpoint rule. See [serverless private link]. - [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link` + [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. + RESOURCE_ID: The Azure resource ID of the target resource. + GROUP_ID: The sub-resource type (group ID) of the target resource. Note that to + connect to workspace root storage (root DBFS), you need two endpoints, one + for blob and one for dfs.` cmd.Annotations = make(map[string]string) @@ -250,7 +266,10 @@ func newDeleteNetworkConnectivityConfiguration() *cobra.Command { cmd.Short = `Delete a network connectivity configuration.` cmd.Long = `Delete a network connectivity configuration. - Deletes a network connectivity configuration.` + Deletes a network connectivity configuration. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID.` cmd.Annotations = make(map[string]string) @@ -315,7 +334,11 @@ func newDeletePrivateEndpointRule() *cobra.Command { deactivated and will be purged after seven days of deactivation. When a private endpoint is in deactivated state, deactivated field is set to true and the private endpoint is not available to your serverless compute - resources.` + resources. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. + PRIVATE_ENDPOINT_RULE_ID: Your private endpoint rule ID.` cmd.Annotations = make(map[string]string) @@ -377,7 +400,10 @@ func newGetNetworkConnectivityConfiguration() *cobra.Command { cmd.Short = `Get a network connectivity configuration.` cmd.Long = `Get a network connectivity configuration. - Gets a network connectivity configuration.` + Gets a network connectivity configuration. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID.` cmd.Annotations = make(map[string]string) @@ -438,7 +464,11 @@ func newGetPrivateEndpointRule() *cobra.Command { cmd.Short = `Get a private endpoint rule.` cmd.Long = `Get a private endpoint rule. - Gets the private endpoint rule.` + Gets the private endpoint rule. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. + PRIVATE_ENDPOINT_RULE_ID: Your private endpoint rule ID.` cmd.Annotations = make(map[string]string) @@ -563,7 +593,10 @@ func newListPrivateEndpointRules() *cobra.Command { cmd.Short = `List private endpoint rules.` cmd.Long = `List private endpoint rules. - Gets an array of private endpoint rules.` + Gets an array of private endpoint rules. + + Arguments: + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/network-policy/network-policy.go b/cmd/account/network-policy/network-policy.go index 23a9605c..c89b53ee 100755 --- a/cmd/account/network-policy/network-policy.go +++ b/cmd/account/network-policy/network-policy.go @@ -63,7 +63,16 @@ func newDeleteAccountNetworkPolicy() *cobra.Command { cmd.Short = `Delete Account Network Policy.` cmd.Long = `Delete Account Network Policy. - Reverts back all the account network policies back to default.` + Reverts back all the account network policies back to default. + + Arguments: + ETAG: etag used for versioning. The response is at least as fresh as the eTag + provided. This is used for optimistic concurrency control as a way to help + prevent simultaneous writes of a setting overwriting each other. It is + strongly suggested that systems make use of the etag in the read -> delete + pattern to perform setting deletions in order to avoid race conditions. + That is, get an etag from a GET request, and pass it with the DELETE + request to identify the rule set version you are deleting.` cmd.Annotations = make(map[string]string) @@ -124,7 +133,16 @@ func newReadAccountNetworkPolicy() *cobra.Command { cmd.Short = `Get Account Network Policy.` cmd.Long = `Get Account Network Policy. - Gets the value of Account level Network Policy.` + Gets the value of Account level Network Policy. + + Arguments: + ETAG: etag used for versioning. The response is at least as fresh as the eTag + provided. This is used for optimistic concurrency control as a way to help + prevent simultaneous writes of a setting overwriting each other. It is + strongly suggested that systems make use of the etag in the read -> delete + pattern to perform setting deletions in order to avoid race conditions. + That is, get an etag from a GET request, and pass it with the DELETE + request to identify the rule set version you are deleting.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/networks/networks.go b/cmd/account/networks/networks.go index 1aa2520f..15586bdc 100755 --- a/cmd/account/networks/networks.go +++ b/cmd/account/networks/networks.go @@ -66,7 +66,10 @@ func newCreate() *cobra.Command { Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be used for new Databricks clusters. This requires a - pre-existing VPC and subnets.` + pre-existing VPC and subnets. + + Arguments: + NETWORK_NAME: The human-readable name of the network configuration.` cmd.Annotations = make(map[string]string) @@ -147,7 +150,10 @@ func newDelete() *cobra.Command { workspace. This operation is available only if your account is on the E2 version of the - platform.` + platform. + + Arguments: + NETWORK_ID: Databricks Account API network configuration ID.` cmd.Annotations = make(map[string]string) @@ -221,7 +227,10 @@ func newGet() *cobra.Command { cmd.Long = `Get a network configuration. Gets a Databricks network configuration, which represents a cloud VPC and its - resources.` + resources. + + Arguments: + NETWORK_ID: Databricks Account API network configuration ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index 4aff4192..2f3a1a7e 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -75,7 +75,12 @@ func newCreate() *cobra.Command { PrivateLink]. [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html` + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + PRIVATE_ACCESS_SETTINGS_NAME: The human-readable name of the private access settings object. + REGION: The cloud region for workspaces associated with this private access + settings object.` cmd.Annotations = make(map[string]string) @@ -161,7 +166,10 @@ func newDelete() *cobra.Command { PrivateLink]. [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html` + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + PRIVATE_ACCESS_SETTINGS_ID: Databricks Account API private access settings ID.` cmd.Annotations = make(map[string]string) @@ -241,7 +249,10 @@ func newGet() *cobra.Command { PrivateLink]. [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html` + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + PRIVATE_ACCESS_SETTINGS_ID: Databricks Account API private access settings ID.` cmd.Annotations = make(map[string]string) @@ -389,7 +400,13 @@ func newReplace() *cobra.Command { PrivateLink]. [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html` + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + PRIVATE_ACCESS_SETTINGS_ID: Databricks Account API private access settings ID. + PRIVATE_ACCESS_SETTINGS_NAME: The human-readable name of the private access settings object. + REGION: The cloud region for workspaces associated with this private access + settings object.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/published-app-integration/published-app-integration.go b/cmd/account/published-app-integration/published-app-integration.go index 9fe4e6e8..d3209c67 100755 --- a/cmd/account/published-app-integration/published-app-integration.go +++ b/cmd/account/published-app-integration/published-app-integration.go @@ -130,7 +130,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete Published OAuth App Integration. Delete an existing Published OAuth App Integration. You can retrieve the - published oauth app integration via :method:PublishedAppIntegration/get.` + published oauth app integration via :method:PublishedAppIntegration/get. + + Arguments: + INTEGRATION_ID: The oauth app integration ID.` cmd.Annotations = make(map[string]string) @@ -191,7 +194,10 @@ func newGet() *cobra.Command { cmd.Short = `Get OAuth Published App Integration.` cmd.Long = `Get OAuth Published App Integration. - Gets the Published OAuth App Integration for the given integration id.` + Gets the Published OAuth App Integration for the given integration id. + + Arguments: + INTEGRATION_ID: The oauth app integration ID.` cmd.Annotations = make(map[string]string) @@ -306,7 +312,10 @@ func newUpdate() *cobra.Command { cmd.Long = `Updates Published OAuth App Integration. Updates an existing published OAuth App Integration. You can retrieve the - published oauth app integration via :method:PublishedAppIntegration/get.` + published oauth app integration via :method:PublishedAppIntegration/get. + + Arguments: + INTEGRATION_ID: The oauth app integration ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/service-principal-secrets/service-principal-secrets.go b/cmd/account/service-principal-secrets/service-principal-secrets.go index a28f75fa..19d6a491 100755 --- a/cmd/account/service-principal-secrets/service-principal-secrets.go +++ b/cmd/account/service-principal-secrets/service-principal-secrets.go @@ -66,7 +66,10 @@ func newCreate() *cobra.Command { cmd.Short = `Create service principal secret.` cmd.Long = `Create service principal secret. - Create a secret for the given service principal.` + Create a secret for the given service principal. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal ID.` cmd.Annotations = make(map[string]string) @@ -130,7 +133,11 @@ func newDelete() *cobra.Command { cmd.Short = `Delete service principal secret.` cmd.Long = `Delete service principal secret. - Delete a secret from the given service principal.` + Delete a secret from the given service principal. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal ID. + SECRET_ID: The secret ID.` cmd.Annotations = make(map[string]string) @@ -197,7 +204,10 @@ func newList() *cobra.Command { List all secrets associated with the given service principal. This operation only returns information about the secrets themselves and does not include the - secret values.` + secret values. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index 864cd287..79098217 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -138,7 +138,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a service principal.` cmd.Long = `Delete a service principal. - Delete a single service principal in the Databricks account.` + Delete a single service principal in the Databricks account. + + Arguments: + ID: Unique ID for a service principal in the Databricks account.` cmd.Annotations = make(map[string]string) @@ -212,7 +215,10 @@ func newGet() *cobra.Command { cmd.Long = `Get service principal details. Gets the details for a single service principal define in the Databricks - account.` + account. + + Arguments: + ID: Unique ID for a service principal in the Databricks account.` cmd.Annotations = make(map[string]string) @@ -358,7 +364,10 @@ func newPatch() *cobra.Command { cmd.Long = `Update service principal details. Partially updates the details of a single service principal in the Databricks - account.` + account. + + Arguments: + ID: Unique ID for a service principal in the Databricks account.` cmd.Annotations = make(map[string]string) @@ -451,7 +460,10 @@ func newUpdate() *cobra.Command { Updates the details of a single service principal. - This action replaces the existing service principal with the same name.` + This action replaces the existing service principal with the same name. + + Arguments: + ID: Databricks service principal ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/settings/settings.go b/cmd/account/settings/settings.go index 09794488..e22b9950 100755 --- a/cmd/account/settings/settings.go +++ b/cmd/account/settings/settings.go @@ -64,7 +64,16 @@ func newDeletePersonalComputeSetting() *cobra.Command { cmd.Short = `Delete Personal Compute setting.` cmd.Long = `Delete Personal Compute setting. - Reverts back the Personal Compute setting value to default (ON)` + Reverts back the Personal Compute setting value to default (ON) + + Arguments: + ETAG: etag used for versioning. The response is at least as fresh as the eTag + provided. This is used for optimistic concurrency control as a way to help + prevent simultaneous writes of a setting overwriting each other. It is + strongly suggested that systems make use of the etag in the read -> delete + pattern to perform setting deletions in order to avoid race conditions. + That is, get an etag from a GET request, and pass it with the DELETE + request to identify the rule set version you are deleting.` cmd.Annotations = make(map[string]string) @@ -125,7 +134,16 @@ func newReadPersonalComputeSetting() *cobra.Command { cmd.Short = `Get Personal Compute setting.` cmd.Long = `Get Personal Compute setting. - Gets the value of the Personal Compute setting.` + Gets the value of the Personal Compute setting. + + Arguments: + ETAG: etag used for versioning. The response is at least as fresh as the eTag + provided. This is used for optimistic concurrency control as a way to help + prevent simultaneous writes of a setting overwriting each other. It is + strongly suggested that systems make use of the etag in the read -> delete + pattern to perform setting deletions in order to avoid race conditions. + That is, get an etag from a GET request, and pass it with the DELETE + request to identify the rule set version you are deleting.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/storage-credentials/storage-credentials.go b/cmd/account/storage-credentials/storage-credentials.go index 670bb26d..35b865c7 100755 --- a/cmd/account/storage-credentials/storage-credentials.go +++ b/cmd/account/storage-credentials/storage-credentials.go @@ -63,7 +63,10 @@ func newCreate() *cobra.Command { credentials * **GcpServiceAcountKey** for GCP credentials. The caller must be a metastore admin and have the - **CREATE_STORAGE_CREDENTIAL** privilege on the metastore.` + **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID` cmd.Annotations = make(map[string]string) @@ -133,7 +136,11 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a storage credential. Deletes a storage credential from the metastore. The caller must be an owner - of the storage credential.` + of the storage credential. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + STORAGE_CREDENTIAL_NAME: Name of the storage credential.` cmd.Annotations = make(map[string]string) @@ -197,7 +204,11 @@ func newGet() *cobra.Command { Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have a level of privilege on - the storage credential.` + the storage credential. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + STORAGE_CREDENTIAL_NAME: Name of the storage credential.` cmd.Annotations = make(map[string]string) @@ -260,7 +271,10 @@ func newList() *cobra.Command { cmd.Long = `Get all storage credentials assigned to a metastore. Gets a list of all storage credentials that have been assigned to given - metastore.` + metastore. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID` cmd.Annotations = make(map[string]string) @@ -327,7 +341,11 @@ func newUpdate() *cobra.Command { Updates a storage credential on the metastore. The caller must be the owner of the storage credential. If the caller is a metastore admin, only the __owner__ - credential can be changed.` + credential can be changed. + + Arguments: + METASTORE_ID: Unity Catalog metastore ID + STORAGE_CREDENTIAL_NAME: Name of the storage credential.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/storage/storage.go b/cmd/account/storage/storage.go index 8eebbab1..d671355d 100755 --- a/cmd/account/storage/storage.go +++ b/cmd/account/storage/storage.go @@ -135,7 +135,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete storage configuration. Deletes a Databricks storage configuration. You cannot delete a storage - configuration that is associated with any workspace.` + configuration that is associated with any workspace. + + Arguments: + STORAGE_CONFIGURATION_ID: Databricks Account API storage configuration ID.` cmd.Annotations = make(map[string]string) @@ -208,7 +211,10 @@ func newGet() *cobra.Command { cmd.Short = `Get storage configuration.` cmd.Long = `Get storage configuration. - Gets a Databricks storage configuration for an account, both specified by ID.` + Gets a Databricks storage configuration for an account, both specified by ID. + + Arguments: + STORAGE_CONFIGURATION_ID: Databricks Account API storage configuration ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 05b27d8b..294aba1c 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -147,7 +147,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a user. Deletes a user. Deleting a user from a Databricks account also removes objects - associated with the user.` + associated with the user. + + Arguments: + ID: Unique ID for a user in the Databricks account.` cmd.Annotations = make(map[string]string) @@ -228,7 +231,10 @@ func newGet() *cobra.Command { cmd.Short = `Get user details.` cmd.Long = `Get user details. - Gets information for a specific user in Databricks account.` + Gets information for a specific user in Databricks account. + + Arguments: + ID: Unique ID for a user in the Databricks account.` cmd.Annotations = make(map[string]string) @@ -374,7 +380,10 @@ func newPatch() *cobra.Command { cmd.Long = `Update user details. Partially updates a user resource by applying the supplied operations on - specific user attributes.` + specific user attributes. + + Arguments: + ID: Unique ID for a user in the Databricks account.` cmd.Annotations = make(map[string]string) @@ -467,7 +476,11 @@ func newUpdate() *cobra.Command { cmd.Short = `Replace a user.` cmd.Long = `Replace a user. - Replaces a user's information with the data supplied in request.` + Replaces a user's information with the data supplied in request. + + Arguments: + ID: Databricks user ID. This is automatically set by Databricks. Any value + provided by the client will be ignored.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/vpc-endpoints/vpc-endpoints.go b/cmd/account/vpc-endpoints/vpc-endpoints.go index 8c46ab82..6d80e731 100755 --- a/cmd/account/vpc-endpoints/vpc-endpoints.go +++ b/cmd/account/vpc-endpoints/vpc-endpoints.go @@ -73,7 +73,10 @@ func newCreate() *cobra.Command { [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html - [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html` + [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html + + Arguments: + VPC_ENDPOINT_NAME: The human-readable name of the storage configuration.` cmd.Annotations = make(map[string]string) @@ -157,7 +160,10 @@ func newDelete() *cobra.Command { [AWS PrivateLink]: https://aws.amazon.com/privatelink [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html` + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + + Arguments: + VPC_ENDPOINT_ID: Databricks VPC endpoint ID.` cmd.Annotations = make(map[string]string) @@ -234,7 +240,10 @@ func newGet() *cobra.Command { AWS used to communicate privately with Databricks over [AWS PrivateLink]. [AWS PrivateLink]: https://aws.amazon.com/privatelink - [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html` + [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html + + Arguments: + VPC_ENDPOINT_ID: Databricks VPC endpoint ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index 9e8c1404..f442b03e 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -57,7 +57,11 @@ func newDelete() *cobra.Command { cmd.Long = `Delete permissions assignment. Deletes the workspace permissions assignment in a given account and workspace - for the specified principal.` + for the specified principal. + + Arguments: + WORKSPACE_ID: The workspace ID. + PRINCIPAL_ID: The ID of the user, service principal, or group.` cmd.Annotations = make(map[string]string) @@ -125,7 +129,10 @@ func newGet() *cobra.Command { cmd.Short = `List workspace permissions.` cmd.Long = `List workspace permissions. - Get an array of workspace permissions for the specified account and workspace.` + Get an array of workspace permissions for the specified account and workspace. + + Arguments: + WORKSPACE_ID: The workspace ID.` cmd.Annotations = make(map[string]string) @@ -190,7 +197,10 @@ func newList() *cobra.Command { cmd.Long = `Get permission assignments. Get the permission assignments for the specified Databricks account and - Databricks workspace.` + Databricks workspace. + + Arguments: + WORKSPACE_ID: The workspace ID for the account.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 60eeb505..332f5262 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -94,7 +94,10 @@ func newCreate() *cobra.Command { workspace status is typically PROVISIONING. Use the workspace ID (workspace_id) field in the response to identify the new workspace and make repeated GET requests with the workspace ID and check its status. The - workspace becomes available when the status changes to RUNNING.` + workspace becomes available when the status changes to RUNNING. + + Arguments: + WORKSPACE_NAME: The workspace's human-readable name.` cmd.Annotations = make(map[string]string) @@ -189,7 +192,10 @@ func newDelete() *cobra.Command { This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per - account.` + account. + + Arguments: + WORKSPACE_ID: Workspace ID.` cmd.Annotations = make(map[string]string) @@ -278,7 +284,10 @@ func newGet() *cobra.Command { platform or on a select custom plan that allows multiple workspaces per account. - [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html` + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + + Arguments: + WORKSPACE_ID: Workspace ID.` cmd.Annotations = make(map[string]string) @@ -534,7 +543,10 @@ func newUpdate() *cobra.Command { account. [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html - [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html` + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + + Arguments: + WORKSPACE_ID: Workspace ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index ff5433d1..f0bd6acf 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -127,7 +127,10 @@ func newDeleteApp() *cobra.Command { cmd.Short = `Delete an application.` cmd.Long = `Delete an application. - Delete an application definition` + Delete an application definition + + Arguments: + NAME: The name of an application. This field is required.` cmd.Annotations = make(map[string]string) @@ -188,7 +191,10 @@ func newGetApp() *cobra.Command { cmd.Short = `Get definition for an application.` cmd.Long = `Get definition for an application. - Get an application definition` + Get an application definition + + Arguments: + NAME: The name of an application. This field is required.` cmd.Annotations = make(map[string]string) @@ -251,7 +257,10 @@ func newGetAppDeploymentStatus() *cobra.Command { cmd.Short = `Get deployment status for an application.` cmd.Long = `Get deployment status for an application. - Get deployment status for an application` + Get deployment status for an application + + Arguments: + DEPLOYMENT_ID: The deployment id for an application. This field is required.` cmd.Annotations = make(map[string]string) @@ -360,7 +369,10 @@ func newGetEvents() *cobra.Command { cmd.Short = `Get deployment events for an application.` cmd.Long = `Get deployment events for an application. - Get deployment events for an application` + Get deployment events for an application + + Arguments: + NAME: The name of an application. This field is required.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/artifact-allowlists/artifact-allowlists.go b/cmd/workspace/artifact-allowlists/artifact-allowlists.go index ad6e58b4..21fdba12 100755 --- a/cmd/workspace/artifact-allowlists/artifact-allowlists.go +++ b/cmd/workspace/artifact-allowlists/artifact-allowlists.go @@ -58,7 +58,10 @@ func newGet() *cobra.Command { cmd.Long = `Get an artifact allowlist. Get the artifact allowlist of a certain artifact type. The caller must be a - metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore.` + metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore. + + Arguments: + ARTIFACT_TYPE: The artifact type of the allowlist.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index e9e48fde..58e85bda 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -73,7 +73,10 @@ func newCreate() *cobra.Command { cmd.Long = `Create a catalog. Creates a new catalog instance in the parent metastore if the caller is a - metastore admin or has the **CREATE_CATALOG** privilege.` + metastore admin or has the **CREATE_CATALOG** privilege. + + Arguments: + NAME: Name of catalog.` cmd.Annotations = make(map[string]string) @@ -152,7 +155,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a catalog. Deletes the catalog that matches the supplied name. The caller must be a - metastore admin or the owner of the catalog.` + metastore admin or the owner of the catalog. + + Arguments: + NAME: The name of the catalog.` cmd.Annotations = make(map[string]string) @@ -215,7 +221,10 @@ func newGet() *cobra.Command { Gets the specified catalog in a metastore. The caller must be a metastore admin, the owner of the catalog, or a user that has the **USE_CATALOG** - privilege set for their account.` + privilege set for their account. + + Arguments: + NAME: The name of the catalog.` cmd.Annotations = make(map[string]string) @@ -339,7 +348,10 @@ func newUpdate() *cobra.Command { Updates the catalog that matches the supplied name. The caller must be either the owner of the catalog, or a metastore admin (when changing the owner field - of the catalog).` + of the catalog). + + Arguments: + NAME: Name of catalog.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index 05a1141d..aa2e9f3c 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -132,7 +132,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a clean room. Deletes a data object clean room from the metastore. The caller must be an - owner of the clean room.` + owner of the clean room. + + Arguments: + NAME_ARG: The name of the clean room.` cmd.Annotations = make(map[string]string) @@ -196,7 +199,10 @@ func newGet() *cobra.Command { cmd.Long = `Get a clean room. Gets a data object clean room from the metastore. The caller must be a - metastore admin or the owner of the clean room.` + metastore admin or the owner of the clean room. + + Arguments: + NAME_ARG: The name of the clean room.` cmd.Annotations = make(map[string]string) @@ -341,7 +347,10 @@ func newUpdate() *cobra.Command { indefinitely for recipients to be able to access the table. Typically, you should use a group as the clean room owner. - Table removals through **update** do not require additional privileges.` + Table removals through **update** do not require additional privileges. + + Arguments: + NAME_ARG: The name of the clean room.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 59939a49..a5464479 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -85,7 +85,11 @@ func newCreate() *cobra.Command { cmd.Short = `Create a new policy.` cmd.Long = `Create a new policy. - Creates a new policy with prescribed settings.` + Creates a new policy with prescribed settings. + + Arguments: + NAME: Cluster Policy name requested by the user. This has to be unique. Length + must be between 1 and 100 characters.` cmd.Annotations = make(map[string]string) @@ -164,7 +168,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a cluster policy. Delete a policy for a cluster. Clusters governed by this policy can still run, - but cannot be edited.` + but cannot be edited. + + Arguments: + POLICY_ID: The ID of the policy to delete.` cmd.Annotations = make(map[string]string) @@ -254,7 +261,12 @@ func newEdit() *cobra.Command { cmd.Long = `Update a cluster policy. Update an existing policy for cluster. This operation may make some clusters - governed by the previous policy invalid.` + governed by the previous policy invalid. + + Arguments: + POLICY_ID: The ID of the policy to update. + NAME: Cluster Policy name requested by the user. This has to be unique. Length + must be between 1 and 100 characters.` cmd.Annotations = make(map[string]string) @@ -333,7 +345,10 @@ func newGet() *cobra.Command { cmd.Short = `Get a cluster policy.` cmd.Long = `Get a cluster policy. - Get a cluster policy entity. Creation and editing is available to admins only.` + Get a cluster policy entity. Creation and editing is available to admins only. + + Arguments: + POLICY_ID: Canonical unique identifier for the cluster policy.` cmd.Annotations = make(map[string]string) @@ -406,7 +421,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get cluster policy permission levels.` cmd.Long = `Get cluster policy permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -480,7 +498,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get cluster policy permissions. Gets the permissions of a cluster policy. Cluster policies can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -620,7 +641,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set cluster policy permissions. Sets permissions on a cluster policy. Cluster policies can inherit permissions - from their root object.` + from their root object. + + Arguments: + CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -704,7 +728,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update cluster policy permissions. Updates the permissions on a cluster policy. Cluster policies can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index bc45d14a..627e2275 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -85,7 +85,11 @@ func newChangeOwner() *cobra.Command { cmd.Long = `Change cluster owner. Change the owner of the cluster. You must be an admin to perform this - operation.` + operation. + + Arguments: + CLUSTER_ID: + OWNER_USERNAME: New owner of the cluster_id after this RPC.` cmd.Annotations = make(map[string]string) @@ -206,7 +210,12 @@ func newCreate() *cobra.Command { If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed. Otherwise the cluster will terminate with an - informative error message.` + informative error message. + + Arguments: + SPARK_VERSION: The Spark version of the cluster, e.g. 3.3.x-scala2.11. A list of + available Spark versions can be retrieved by using the + :method:clusters/sparkVersions API call.` cmd.Annotations = make(map[string]string) @@ -304,7 +313,10 @@ func newDelete() *cobra.Command { Terminates the Spark cluster with the specified ID. The cluster is removed asynchronously. Once the termination has completed, the cluster will be in a TERMINATED state. If the cluster is already in a TERMINATING or - TERMINATED state, nothing will happen.` + TERMINATED state, nothing will happen. + + Arguments: + CLUSTER_ID: The cluster to be terminated.` cmd.Annotations = make(map[string]string) @@ -442,7 +454,13 @@ func newEdit() *cobra.Command { new attributes will take effect. Any attempt to update a cluster in any other state will be rejected with an INVALID_STATE error code. - Clusters created by the Databricks Jobs service cannot be edited.` + Clusters created by the Databricks Jobs service cannot be edited. + + Arguments: + CLUSTER_ID: ID of the cluser + SPARK_VERSION: The Spark version of the cluster, e.g. 3.3.x-scala2.11. A list of + available Spark versions can be retrieved by using the + :method:clusters/sparkVersions API call.` cmd.Annotations = make(map[string]string) @@ -544,7 +562,10 @@ func newEvents() *cobra.Command { Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more events to read, the response includes all the - nparameters necessary to request the next page of events.` + nparameters necessary to request the next page of events. + + Arguments: + CLUSTER_ID: The ID of the cluster to retrieve events about.` cmd.Annotations = make(map[string]string) @@ -630,7 +651,10 @@ func newGet() *cobra.Command { cmd.Long = `Get cluster info. Retrieves the information for a cluster given its identifier. Clusters can be - described while they are running, or up to 60 days after they are terminated.` + described while they are running, or up to 60 days after they are terminated. + + Arguments: + CLUSTER_ID: The cluster about which to retrieve information.` cmd.Annotations = make(map[string]string) @@ -703,7 +727,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get cluster permission levels.` cmd.Long = `Get cluster permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + CLUSTER_ID: The cluster for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -777,7 +804,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get cluster permissions. Gets the permissions of a cluster. Clusters can inherit permissions from their - root object.` + root object. + + Arguments: + CLUSTER_ID: The cluster for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -1024,7 +1054,10 @@ func newPermanentDelete() *cobra.Command { In addition, users will no longer see permanently deleted clusters in the cluster list, and API users can no longer perform any action on permanently - deleted clusters.` + deleted clusters. + + Arguments: + CLUSTER_ID: The cluster to be deleted.` cmd.Annotations = make(map[string]string) @@ -1108,7 +1141,10 @@ func newPin() *cobra.Command { Pinning a cluster ensures that the cluster will always be returned by the ListClusters API. Pinning a cluster that is already pinned will have no - effect. This API can only be called by workspace admins.` + effect. This API can only be called by workspace admins. + + Arguments: + CLUSTER_ID: ` cmd.Annotations = make(map[string]string) @@ -1199,7 +1235,10 @@ func newResize() *cobra.Command { cmd.Long = `Resize cluster. Resizes a cluster to have a desired number of workers. This will fail unless - the cluster is in a RUNNING state.` + the cluster is in a RUNNING state. + + Arguments: + CLUSTER_ID: The cluster to be resized.` cmd.Annotations = make(map[string]string) @@ -1301,7 +1340,10 @@ func newRestart() *cobra.Command { cmd.Long = `Restart cluster. Restarts a Spark cluster with the supplied ID. If the cluster is not currently - in a RUNNING state, nothing will happen.` + in a RUNNING state, nothing will happen. + + Arguments: + CLUSTER_ID: The cluster to be started.` cmd.Annotations = make(map[string]string) @@ -1398,7 +1440,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set cluster permissions. Sets permissions on a cluster. Clusters can inherit permissions from their - root object.` + root object. + + Arguments: + CLUSTER_ID: The cluster for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -1540,7 +1585,10 @@ func newStart() *cobra.Command { with the last specified cluster size. * If the previous cluster was an autoscaling cluster, the current cluster starts with the minimum number of nodes. * If the cluster is not currently in a TERMINATED state, nothing will - happen. * Clusters launched to run a job cannot be started.` + happen. * Clusters launched to run a job cannot be started. + + Arguments: + CLUSTER_ID: The cluster to be started.` cmd.Annotations = make(map[string]string) @@ -1636,7 +1684,10 @@ func newUnpin() *cobra.Command { Unpinning a cluster will allow the cluster to eventually be removed from the ListClusters API. Unpinning a cluster that is not pinned will have no effect. - This API can only be called by workspace admins.` + This API can only be called by workspace admins. + + Arguments: + CLUSTER_ID: ` cmd.Annotations = make(map[string]string) @@ -1721,7 +1772,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update cluster permissions. Updates the permissions on a cluster. Clusters can inherit permissions from - their root object.` + their root object. + + Arguments: + CLUSTER_ID: The cluster for which to get or manage permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index 26a5eec9..e32830f9 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -138,7 +138,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a connection.` cmd.Long = `Delete a connection. - Deletes the connection that matches the supplied name.` + Deletes the connection that matches the supplied name. + + Arguments: + NAME_ARG: The name of the connection to be deleted.` cmd.Annotations = make(map[string]string) @@ -211,7 +214,10 @@ func newGet() *cobra.Command { cmd.Short = `Get a connection.` cmd.Long = `Get a connection. - Gets a connection from it's name.` + Gets a connection from it's name. + + Arguments: + NAME_ARG: Name of the connection.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 420593a2..3c074620 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -73,7 +73,10 @@ func newCreateExperiment() *cobra.Command { already exist and fails if another experiment with the same name already exists. - Throws RESOURCE_ALREADY_EXISTS if a experiment with the given name exists.` + Throws RESOURCE_ALREADY_EXISTS if a experiment with the given name exists. + + Arguments: + NAME: Experiment name.` cmd.Annotations = make(map[string]string) @@ -229,7 +232,10 @@ func newDeleteExperiment() *cobra.Command { Marks an experiment and associated metadata, runs, metrics, params, and tags for deletion. If the experiment uses FileStore, artifacts associated with - experiment are also deleted.` + experiment are also deleted. + + Arguments: + EXPERIMENT_ID: ID of the associated experiment.` cmd.Annotations = make(map[string]string) @@ -307,7 +313,10 @@ func newDeleteRun() *cobra.Command { cmd.Short = `Delete a run.` cmd.Long = `Delete a run. - Marks a run for deletion.` + Marks a run for deletion. + + Arguments: + RUN_ID: ID of the run to delete.` cmd.Annotations = make(map[string]string) @@ -388,7 +397,13 @@ func newDeleteRuns() *cobra.Command { cmd.Long = `Delete runs by creation time. Bulk delete runs in an experiment that were created prior to or at the - specified timestamp. Deletes at most max_runs per request.` + specified timestamp. Deletes at most max_runs per request. + + Arguments: + EXPERIMENT_ID: The ID of the experiment containing the runs to delete. + MAX_TIMESTAMP_MILLIS: The maximum creation timestamp in milliseconds since the UNIX epoch for + deleting runs. Only runs created prior to or at this timestamp are + deleted.` cmd.Annotations = make(map[string]string) @@ -473,7 +488,11 @@ func newDeleteTag() *cobra.Command { cmd.Long = `Delete a tag. Deletes a tag on a run. Tags are run metadata that can be updated during a run - and after a run completes.` + and after a run completes. + + Arguments: + RUN_ID: ID of the run that the tag was logged under. Must be provided. + KEY: Name of the tag. Maximum size is 255 bytes. Must be provided.` cmd.Annotations = make(map[string]string) @@ -560,7 +579,10 @@ func newGetByName() *cobra.Command { them. Throws RESOURCE_DOES_NOT_EXIST if no experiment with the specified name - exists.` + exists. + + Arguments: + EXPERIMENT_NAME: Name of the associated experiment.` cmd.Annotations = make(map[string]string) @@ -621,7 +643,10 @@ func newGetExperiment() *cobra.Command { cmd.Short = `Get an experiment.` cmd.Long = `Get an experiment. - Gets metadata for an experiment. This method works on deleted experiments.` + Gets metadata for an experiment. This method works on deleted experiments. + + Arguments: + EXPERIMENT_ID: ID of the associated experiment.` cmd.Annotations = make(map[string]string) @@ -687,7 +712,10 @@ func newGetHistory() *cobra.Command { cmd.Short = `Get history of a given metric within a run.` cmd.Long = `Get history of a given metric within a run. - Gets a list of all values for the specified metric for a given run.` + Gets a list of all values for the specified metric for a given run. + + Arguments: + METRIC_KEY: Name of the metric.` cmd.Annotations = make(map[string]string) @@ -748,7 +776,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get experiment permission levels.` cmd.Long = `Get experiment permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + EXPERIMENT_ID: The experiment for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -810,7 +841,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get experiment permissions. Gets the permissions of an experiment. Experiments can inherit permissions - from their root object.` + from their root object. + + Arguments: + EXPERIMENT_ID: The experiment for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -878,7 +912,10 @@ func newGetRun() *cobra.Command { with the latest timestamp. If there are multiple values with the latest timestamp, return the maximum of - these values.` + these values. + + Arguments: + RUN_ID: ID of the run to fetch. Must be provided.` cmd.Annotations = make(map[string]string) @@ -1256,7 +1293,12 @@ func newLogMetric() *cobra.Command { Logs a metric for a run. A metric is a key-value pair (string key, float value) with an associated timestamp. Examples include the various metrics that - represent ML model accuracy. A metric can be logged multiple times.` + represent ML model accuracy. A metric can be logged multiple times. + + Arguments: + KEY: Name of the metric. + VALUE: Double value of the metric being logged. + TIMESTAMP: Unix timestamp in milliseconds at the time metric was logged.` cmd.Annotations = make(map[string]string) @@ -1424,7 +1466,11 @@ func newLogParam() *cobra.Command { Logs a param used for a run. A param is a key-value pair (string key, string value). Examples include hyperparameters used for ML model training and constant dates and values used in an ETL pipeline. A param can be logged only - once for a run.` + once for a run. + + Arguments: + KEY: Name of the param. Maximum size is 255 bytes. + VALUE: String value of the param being logged. Maximum size is 500 bytes.` cmd.Annotations = make(map[string]string) @@ -1510,7 +1556,10 @@ func newRestoreExperiment() *cobra.Command { underlying artifacts associated with experiment are also restored. Throws RESOURCE_DOES_NOT_EXIST if experiment was never created or was - permanently deleted.` + permanently deleted. + + Arguments: + EXPERIMENT_ID: ID of the associated experiment.` cmd.Annotations = make(map[string]string) @@ -1588,7 +1637,10 @@ func newRestoreRun() *cobra.Command { cmd.Short = `Restore a run.` cmd.Long = `Restore a run. - Restores a deleted run.` + Restores a deleted run. + + Arguments: + RUN_ID: ID of the run to restore.` cmd.Annotations = make(map[string]string) @@ -1669,7 +1721,13 @@ func newRestoreRuns() *cobra.Command { cmd.Long = `Restore runs by deletion time. Bulk restore runs in an experiment that were deleted no earlier than the - specified timestamp. Restores at most max_runs per request.` + specified timestamp. Restores at most max_runs per request. + + Arguments: + EXPERIMENT_ID: The ID of the experiment containing the runs to restore. + MIN_TIMESTAMP_MILLIS: The minimum deletion timestamp in milliseconds since the UNIX epoch for + restoring runs. Only runs deleted no earlier than this timestamp are + restored.` cmd.Annotations = make(map[string]string) @@ -1904,7 +1962,15 @@ func newSetExperimentTag() *cobra.Command { cmd.Short = `Set a tag.` cmd.Long = `Set a tag. - Sets a tag on an experiment. Experiment tags are metadata that can be updated.` + Sets a tag on an experiment. Experiment tags are metadata that can be updated. + + Arguments: + EXPERIMENT_ID: ID of the experiment under which to log the tag. Must be provided. + KEY: Name of the tag. Maximum size depends on storage backend. All storage + backends are guaranteed to support key values up to 250 bytes in size. + VALUE: String value of the tag being logged. Maximum size depends on storage + backend. All storage backends are guaranteed to support key values up to + 5000 bytes in size.` cmd.Annotations = make(map[string]string) @@ -1991,7 +2057,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set experiment permissions. Sets permissions on an experiment. Experiments can inherit permissions from - their root object.` + their root object. + + Arguments: + EXPERIMENT_ID: The experiment for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -2064,7 +2133,14 @@ func newSetTag() *cobra.Command { cmd.Long = `Set a tag. Sets a tag on a run. Tags are run metadata that can be updated during a run - and after a run completes.` + and after a run completes. + + Arguments: + KEY: Name of the tag. Maximum size depends on storage backend. All storage + backends are guaranteed to support key values up to 250 bytes in size. + VALUE: String value of the tag being logged. Maximum size depends on storage + backend. All storage backends are guaranteed to support key values up to + 5000 bytes in size.` cmd.Annotations = make(map[string]string) @@ -2147,7 +2223,10 @@ func newUpdateExperiment() *cobra.Command { cmd.Short = `Update an experiment.` cmd.Long = `Update an experiment. - Updates experiment metadata.` + Updates experiment metadata. + + Arguments: + EXPERIMENT_ID: ID of the associated experiment.` cmd.Annotations = make(map[string]string) @@ -2228,7 +2307,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update experiment permissions. Updates the permissions on an experiment. Experiments can inherit permissions - from their root object.` + from their root object. + + Arguments: + EXPERIMENT_ID: The experiment for which to get or manage permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 2803f186..0d3682bb 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -77,7 +77,12 @@ func newCreate() *cobra.Command { Creates a new external location entry in the metastore. The caller must be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the - metastore and the associated storage credential.` + metastore and the associated storage credential. + + Arguments: + NAME: Name of the external location. + URL: Path URL of the external location. + CREDENTIAL_NAME: Name of the storage credential used with this location.` cmd.Annotations = make(map[string]string) @@ -162,7 +167,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete an external location. Deletes the specified external location from the metastore. The caller must be - the owner of the external location.` + the owner of the external location. + + Arguments: + NAME: Name of the external location.` cmd.Annotations = make(map[string]string) @@ -225,7 +233,10 @@ func newGet() *cobra.Command { Gets an external location from the metastore. The caller must be either a metastore admin, the owner of the external location, or a user that has some - privilege on the external location.` + privilege on the external location. + + Arguments: + NAME: Name of the external location.` cmd.Annotations = make(map[string]string) @@ -352,7 +363,10 @@ func newUpdate() *cobra.Command { Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. In the second case, the admin - can only update the name of the external location.` + can only update the name of the external location. + + Arguments: + NAME: Name of the external location.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index 6510fce6..07580f0b 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -136,7 +136,11 @@ func newDelete() *cobra.Command { of the function's parent catalog - Is the owner of the function's parent schema and have the **USE_CATALOG** privilege on its parent catalog - Is the owner of the function itself and have both the **USE_CATALOG** privilege on - its parent catalog and the **USE_SCHEMA** privilege on its parent schema` + its parent catalog and the **USE_SCHEMA** privilege on its parent schema + + Arguments: + NAME: The fully-qualified name of the function (of the form + __catalog_name__.__schema_name__.__function__name__).` cmd.Annotations = make(map[string]string) @@ -215,7 +219,11 @@ func newGet() *cobra.Command { **USE_CATALOG** privilege on the function's parent catalog and be the owner of the function - Have the **USE_CATALOG** privilege on the function's parent catalog, the **USE_SCHEMA** privilege on the function's parent schema, and the - **EXECUTE** privilege on the function itself` + **EXECUTE** privilege on the function itself + + Arguments: + NAME: The fully-qualified name of the function (of the form + __catalog_name__.__schema_name__.__function__name__).` cmd.Annotations = make(map[string]string) @@ -294,7 +302,11 @@ func newList() *cobra.Command { **USE_SCHEMA** privilege on the schema, and the output list contains only functions for which either the user has the **EXECUTE** privilege or the user is the owner. There is no guarantee of a specific ordering of the elements in - the array.` + the array. + + Arguments: + CATALOG_NAME: Name of parent catalog for functions of interest. + SCHEMA_NAME: Parent schema of functions.` cmd.Annotations = make(map[string]string) @@ -367,7 +379,11 @@ func newUpdate() *cobra.Command { function's parent schema and has the **USE_CATALOG** privilege on its parent catalog - Is the owner of the function itself and has the **USE_CATALOG** privilege on its parent catalog as well as the **USE_SCHEMA** privilege on the - function's parent schema.` + function's parent schema. + + Arguments: + NAME: The fully-qualified name of the function (of the form + __catalog_name__.__schema_name__.__function__name__).` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index 1d9e64a0..ca256564 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -68,7 +68,12 @@ func newCreate() *cobra.Command { Creates a Git credential entry for the user. Only one Git credential per user is supported, so any attempts to create credentials if an entry already exists will fail. Use the PATCH endpoint to update existing credentials, or the - DELETE endpoint to delete existing credentials.` + DELETE endpoint to delete existing credentials. + + Arguments: + GIT_PROVIDER: Git provider. This field is case-insensitive. The available Git providers + are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, gitHubEnterprise, + bitbucketServer, gitLabEnterpriseEdition and awsCodeCommit.` cmd.Annotations = make(map[string]string) @@ -144,7 +149,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a credential.` cmd.Long = `Delete a credential. - Deletes the specified Git credential.` + Deletes the specified Git credential. + + Arguments: + CREDENTIAL_ID: The ID for the corresponding credential to access.` cmd.Annotations = make(map[string]string) @@ -220,7 +228,10 @@ func newGet() *cobra.Command { cmd.Short = `Get a credential entry.` cmd.Long = `Get a credential entry. - Gets the Git credential with the specified credential ID.` + Gets the Git credential with the specified credential ID. + + Arguments: + CREDENTIAL_ID: The ID for the corresponding credential to access.` cmd.Annotations = make(map[string]string) @@ -351,7 +362,10 @@ func newUpdate() *cobra.Command { cmd.Short = `Update a credential.` cmd.Long = `Update a credential. - Updates the specified Git credential.` + Updates the specified Git credential. + + Arguments: + CREDENTIAL_ID: The ID for the corresponding credential to access.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/global-init-scripts/global-init-scripts.go b/cmd/workspace/global-init-scripts/global-init-scripts.go index 3674d405..1479381d 100755 --- a/cmd/workspace/global-init-scripts/global-init-scripts.go +++ b/cmd/workspace/global-init-scripts/global-init-scripts.go @@ -68,7 +68,11 @@ func newCreate() *cobra.Command { cmd.Short = `Create init script.` cmd.Long = `Create init script. - Creates a new global init script in this workspace.` + Creates a new global init script in this workspace. + + Arguments: + NAME: The name of the script + SCRIPT: The Base64-encoded content of the script.` cmd.Annotations = make(map[string]string) @@ -147,7 +151,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete init script.` cmd.Long = `Delete init script. - Deletes a global init script.` + Deletes a global init script. + + Arguments: + SCRIPT_ID: The ID of the global init script.` cmd.Annotations = make(map[string]string) @@ -220,7 +227,10 @@ func newGet() *cobra.Command { cmd.Short = `Get an init script.` cmd.Long = `Get an init script. - Gets all the details of a script, including its Base64-encoded contents.` + Gets all the details of a script, including its Base64-encoded contents. + + Arguments: + SCRIPT_ID: The ID of the global init script.` cmd.Annotations = make(map[string]string) @@ -350,7 +360,12 @@ func newUpdate() *cobra.Command { cmd.Long = `Update init script. Updates a global init script, specifying only the fields to change. All fields - are optional. Unspecified fields retain their current value.` + are optional. Unspecified fields retain their current value. + + Arguments: + SCRIPT_ID: The ID of the global init script. + NAME: The name of the script + SCRIPT: The Base64-encoded content of the script.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/grants/grants.go b/cmd/workspace/grants/grants.go index a5ebd733..020e0bf8 100755 --- a/cmd/workspace/grants/grants.go +++ b/cmd/workspace/grants/grants.go @@ -67,7 +67,11 @@ func newGet() *cobra.Command { cmd.Short = `Get permissions.` cmd.Long = `Get permissions. - Gets the permissions for a securable.` + Gets the permissions for a securable. + + Arguments: + SECURABLE_TYPE: Type of securable. + FULL_NAME: Full name of securable.` cmd.Annotations = make(map[string]string) @@ -134,7 +138,11 @@ func newGetEffective() *cobra.Command { cmd.Short = `Get effective permissions.` cmd.Long = `Get effective permissions. - Gets the effective permissions for a securable.` + Gets the effective permissions for a securable. + + Arguments: + SECURABLE_TYPE: Type of securable. + FULL_NAME: Full name of securable.` cmd.Annotations = make(map[string]string) @@ -203,7 +211,11 @@ func newUpdate() *cobra.Command { cmd.Short = `Update permissions.` cmd.Long = `Update permissions. - Updates the permissions for a securable.` + Updates the permissions for a securable. + + Arguments: + SECURABLE_TYPE: Type of securable. + FULL_NAME: Full name of securable.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index ac7f7ba4..f2888485 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -140,7 +140,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a group.` cmd.Long = `Delete a group. - Deletes a group from the Databricks workspace.` + Deletes a group from the Databricks workspace. + + Arguments: + ID: Unique ID for a group in the Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -213,7 +216,10 @@ func newGet() *cobra.Command { cmd.Short = `Get group details.` cmd.Long = `Get group details. - Gets the information for a specific group in the Databricks workspace.` + Gets the information for a specific group in the Databricks workspace. + + Arguments: + ID: Unique ID for a group in the Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -358,7 +364,10 @@ func newPatch() *cobra.Command { cmd.Short = `Update group details.` cmd.Long = `Update group details. - Partially updates the details of a group.` + Partially updates the details of a group. + + Arguments: + ID: Unique ID for a group in the Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -449,7 +458,10 @@ func newUpdate() *cobra.Command { cmd.Short = `Replace a group.` cmd.Long = `Replace a group. - Updates the details of a group by replacing the entire group entity.` + Updates the details of a group by replacing the entire group entity. + + Arguments: + ID: Databricks group ID` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index ae23eac0..ab88f112 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -86,7 +86,16 @@ func newCreate() *cobra.Command { cmd.Short = `Create a new instance pool.` cmd.Long = `Create a new instance pool. - Creates a new instance pool using idle and ready-to-use cloud instances.` + Creates a new instance pool using idle and ready-to-use cloud instances. + + Arguments: + INSTANCE_POOL_NAME: Pool name requested by the user. Pool name must be unique. Length must be + between 1 and 100 characters. + NODE_TYPE_ID: This field encodes, through a single value, the resources available to + each of the Spark nodes in this cluster. For example, the Spark nodes can + be provisioned and optimized for memory or compute intensive workloads. A + list of available node types can be retrieved by using the + :method:clusters/listNodeTypes API call.` cmd.Annotations = make(map[string]string) @@ -168,7 +177,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete an instance pool. Deletes the instance pool permanently. The idle instances in the pool are - terminated asynchronously.` + terminated asynchronously. + + Arguments: + INSTANCE_POOL_ID: The instance pool to be terminated.` cmd.Annotations = make(map[string]string) @@ -255,7 +267,17 @@ func newEdit() *cobra.Command { cmd.Short = `Edit an existing instance pool.` cmd.Long = `Edit an existing instance pool. - Modifies the configuration of an existing instance pool.` + Modifies the configuration of an existing instance pool. + + Arguments: + INSTANCE_POOL_ID: Instance pool ID + INSTANCE_POOL_NAME: Pool name requested by the user. Pool name must be unique. Length must be + between 1 and 100 characters. + NODE_TYPE_ID: This field encodes, through a single value, the resources available to + each of the Spark nodes in this cluster. For example, the Spark nodes can + be provisioned and optimized for memory or compute intensive workloads. A + list of available node types can be retrieved by using the + :method:clusters/listNodeTypes API call.` cmd.Annotations = make(map[string]string) @@ -337,7 +359,10 @@ func newGet() *cobra.Command { cmd.Short = `Get instance pool information.` cmd.Long = `Get instance pool information. - Retrieve the information for an instance pool based on its identifier.` + Retrieve the information for an instance pool based on its identifier. + + Arguments: + INSTANCE_POOL_ID: The canonical unique identifier for the instance pool.` cmd.Annotations = make(map[string]string) @@ -410,7 +435,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get instance pool permission levels.` cmd.Long = `Get instance pool permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + INSTANCE_POOL_ID: The instance pool for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -484,7 +512,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get instance pool permissions. Gets the permissions of an instance pool. Instance pools can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + INSTANCE_POOL_ID: The instance pool for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -610,7 +641,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set instance pool permissions. Sets permissions on an instance pool. Instance pools can inherit permissions - from their root object.` + from their root object. + + Arguments: + INSTANCE_POOL_ID: The instance pool for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -694,7 +728,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update instance pool permissions. Updates the permissions on an instance pool. Instance pools can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + INSTANCE_POOL_ID: The instance pool for which to get or manage permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/instance-profiles/instance-profiles.go b/cmd/workspace/instance-profiles/instance-profiles.go index 085707b7..ca78a15f 100755 --- a/cmd/workspace/instance-profiles/instance-profiles.go +++ b/cmd/workspace/instance-profiles/instance-profiles.go @@ -67,7 +67,11 @@ func newAdd() *cobra.Command { cmd.Long = `Register an instance profile. In the UI, you can select the instance profile when launching clusters. This - API is only available to admin users.` + API is only available to admin users. + + Arguments: + INSTANCE_PROFILE_ARN: The AWS ARN of the instance profile to register with Databricks. This + field is required.` cmd.Annotations = make(map[string]string) @@ -162,7 +166,11 @@ func newEdit() *cobra.Command { This API is only available to admin users. [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html - [Enable serverless SQL warehouses]: https://docs.databricks.com/sql/admin/serverless.html` + [Enable serverless SQL warehouses]: https://docs.databricks.com/sql/admin/serverless.html + + Arguments: + INSTANCE_PROFILE_ARN: The AWS ARN of the instance profile to register with Databricks. This + field is required.` cmd.Annotations = make(map[string]string) @@ -293,7 +301,10 @@ func newRemove() *cobra.Command { Remove the instance profile with the provided ARN. Existing clusters with this instance profile will continue to function. - This API is only accessible to admin users.` + This API is only accessible to admin users. + + Arguments: + INSTANCE_PROFILE_ARN: The ARN of the instance profile to remove. This field is required.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index 7bda0ef0..7f66f417 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -155,7 +155,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete access list.` cmd.Long = `Delete access list. - Deletes an IP access list, specified by its list ID.` + Deletes an IP access list, specified by its list ID. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list to modify.` cmd.Annotations = make(map[string]string) @@ -228,7 +231,10 @@ func newGet() *cobra.Command { cmd.Short = `Get access list.` cmd.Long = `Get access list. - Gets an IP access list, specified by its list ID.` + Gets an IP access list, specified by its list ID. + + Arguments: + IP_ACCESS_LIST_ID: The ID for the corresponding IP access list to modify.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 218975ec..6183c282 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -153,7 +153,10 @@ func newCancelRun() *cobra.Command { cmd.Long = `Cancel a run. Cancels a job run or a task run. The run is canceled asynchronously, so it may - still be running when this request completes.` + still be running when this request completes. + + Arguments: + RUN_ID: This field is required.` cmd.Annotations = make(map[string]string) @@ -322,7 +325,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a job.` cmd.Long = `Delete a job. - Deletes a job.` + Deletes a job. + + Arguments: + JOB_ID: The canonical identifier of the job to delete. This field is required.` cmd.Annotations = make(map[string]string) @@ -407,7 +413,10 @@ func newDeleteRun() *cobra.Command { cmd.Short = `Delete a job run.` cmd.Long = `Delete a job run. - Deletes a non-active run. Returns an error if the run is active.` + Deletes a non-active run. Returns an error if the run is active. + + Arguments: + RUN_ID: The canonical identifier of the run for which to retrieve the metadata.` cmd.Annotations = make(map[string]string) @@ -492,7 +501,10 @@ func newExportRun() *cobra.Command { cmd.Short = `Export and retrieve a job run.` cmd.Long = `Export and retrieve a job run. - Export and retrieve the job run task.` + Export and retrieve the job run task. + + Arguments: + RUN_ID: The canonical identifier for the run. This field is required.` cmd.Annotations = make(map[string]string) @@ -568,7 +580,11 @@ func newGet() *cobra.Command { cmd.Short = `Get a single job.` cmd.Long = `Get a single job. - Retrieves the details for a single job.` + Retrieves the details for a single job. + + Arguments: + JOB_ID: The canonical identifier of the job to retrieve information about. This + field is required.` cmd.Annotations = make(map[string]string) @@ -644,7 +660,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get job permission levels.` cmd.Long = `Get job permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + JOB_ID: The job for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -718,7 +737,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get job permissions. Gets the permissions of a job. Jobs can inherit permissions from their root - object.` + object. + + Arguments: + JOB_ID: The job for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -799,7 +821,11 @@ func newGetRun() *cobra.Command { cmd.Short = `Get a single job run.` cmd.Long = `Get a single job run. - Retrieve the metadata of a run.` + Retrieve the metadata of a run. + + Arguments: + RUN_ID: The canonical identifier of the run for which to retrieve the metadata. + This field is required.` cmd.Annotations = make(map[string]string) @@ -884,7 +910,10 @@ func newGetRunOutput() *cobra.Command { This endpoint validates that the __run_id__ parameter is valid and returns an HTTP status code 400 if the __run_id__ parameter is invalid. Runs are automatically removed after 60 days. If you to want to reference them beyond - 60 days, you must save old run results before they expire.` + 60 days, you must save old run results before they expire. + + Arguments: + RUN_ID: The canonical identifier for the run. This field is required.` cmd.Annotations = make(map[string]string) @@ -1118,7 +1147,10 @@ func newRepairRun() *cobra.Command { Re-run one or more tasks. Tasks are re-run as part of the original job run. They use the current job and task settings, and can be viewed in the history - for the original job run.` + for the original job run. + + Arguments: + RUN_ID: The job run ID of the run to repair. The run must not be in progress.` cmd.Annotations = make(map[string]string) @@ -1305,7 +1337,10 @@ func newRunNow() *cobra.Command { cmd.Short = `Trigger a new job run.` cmd.Long = `Trigger a new job run. - Run a job and return the run_id of the triggered run.` + Run a job and return the run_id of the triggered run. + + Arguments: + JOB_ID: The ID of the job to be executed` cmd.Annotations = make(map[string]string) @@ -1412,7 +1447,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set job permissions. Sets permissions on a job. Jobs can inherit permissions from their root - object.` + object. + + Arguments: + JOB_ID: The job for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -1604,7 +1642,10 @@ func newUpdate() *cobra.Command { cmd.Long = `Partially update a job. Add, update, or remove specific settings of an existing job. Use the ResetJob - to overwrite all job settings.` + to overwrite all job settings. + + Arguments: + JOB_ID: The canonical identifier of the job to update. This field is required.` cmd.Annotations = make(map[string]string) @@ -1692,7 +1733,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update job permissions. Updates the permissions on a job. Jobs can inherit permissions from their root - object.` + object. + + Arguments: + JOB_ID: The job for which to get or manage permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index 92671dc3..1e742892 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -138,7 +138,10 @@ func newClusterStatus() *cobra.Command { 3. Libraries that were previously requested on this cluster or on all clusters, but now marked for removal. Within this group there is no order - guarantee.` + guarantee. + + Arguments: + CLUSTER_ID: Unique identifier of the cluster whose status should be retrieved.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 85b1b286..3ca6fb55 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -71,7 +71,12 @@ func newAssign() *cobra.Command { Creates a new metastore assignment. If an assignment for the same __workspace_id__ exists, it will be overwritten by the new __metastore_id__ - and __default_catalog_name__. The caller must be an account admin.` + and __default_catalog_name__. The caller must be an account admin. + + Arguments: + WORKSPACE_ID: A workspace ID. + METASTORE_ID: The unique ID of the metastore. + DEFAULT_CATALOG_NAME: The name of the default catalog in the metastore.` cmd.Annotations = make(map[string]string) @@ -163,7 +168,10 @@ func newCreate() *cobra.Command { path. By default (if the __owner__ field is not set), the owner of the new metastore is the user calling the __createMetastore__ API. If the __owner__ field is set to the empty string (**""**), the ownership is assigned to the - System User instead.` + System User instead. + + Arguments: + NAME: The user-specified name of the metastore.` cmd.Annotations = make(map[string]string) @@ -289,7 +297,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a metastore.` cmd.Long = `Delete a metastore. - Deletes a metastore. The caller must be a metastore admin.` + Deletes a metastore. The caller must be a metastore admin. + + Arguments: + ID: Unique ID of the metastore.` cmd.Annotations = make(map[string]string) @@ -363,7 +374,10 @@ func newGet() *cobra.Command { cmd.Long = `Get a metastore. Gets a metastore that matches the supplied ID. The caller must be a metastore - admin to retrieve this info.` + admin to retrieve this info. + + Arguments: + ID: Unique ID of the metastore.` cmd.Annotations = make(map[string]string) @@ -535,7 +549,11 @@ func newUnassign() *cobra.Command { cmd.Short = `Delete an assignment.` cmd.Long = `Delete an assignment. - Deletes a metastore assignment. The caller must be an account administrator.` + Deletes a metastore assignment. The caller must be an account administrator. + + Arguments: + WORKSPACE_ID: A workspace ID. + METASTORE_ID: Query for the ID of the metastore to delete.` cmd.Annotations = make(map[string]string) @@ -612,7 +630,10 @@ func newUpdate() *cobra.Command { Updates information for a specific metastore. The caller must be a metastore admin. If the __owner__ field is set to the empty string (**""**), the - ownership is updated to the System User.` + ownership is updated to the System User. + + Arguments: + ID: Unique ID of the metastore.` cmd.Annotations = make(map[string]string) @@ -699,7 +720,10 @@ func newUpdateAssignment() *cobra.Command { Updates a metastore assignment. This operation can be used to update __metastore_id__ or __default_catalog_name__ for a specified Workspace, if the Workspace is already assigned a metastore. The caller must be an account admin - to update __metastore_id__; otherwise, the caller can be a Workspace admin.` + to update __metastore_id__; otherwise, the caller can be a Workspace admin. + + Arguments: + WORKSPACE_ID: A workspace ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index 1ae5c8eb..241e885b 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -66,7 +66,22 @@ func newApproveTransitionRequest() *cobra.Command { cmd.Short = `Approve transition request.` cmd.Long = `Approve transition request. - Approves a model version stage transition request.` + Approves a model version stage transition request. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage. + ARCHIVE_EXISTING_VERSIONS: Specifies whether to archive all current model versions in the target + stage.` cmd.Annotations = make(map[string]string) @@ -161,7 +176,12 @@ func newCreateComment() *cobra.Command { Posts a comment on a model version. A comment can be submitted either by a user or programmatically to display relevant information about the model. For - example, test results or deployment errors.` + example, test results or deployment errors. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + COMMENT: User-provided comment on the action.` cmd.Annotations = make(map[string]string) @@ -251,7 +271,10 @@ func newCreateModel() *cobra.Command { Creates a new registered model with the name specified in the request body. Throws RESOURCE_ALREADY_EXISTS if a registered model with the given name - exists.` + exists. + + Arguments: + NAME: Register models under this name` cmd.Annotations = make(map[string]string) @@ -334,7 +357,11 @@ func newCreateModelVersion() *cobra.Command { cmd.Short = `Create a model version.` cmd.Long = `Create a model version. - Creates a model version.` + Creates a model version. + + Arguments: + NAME: Register model under this name + SOURCE: URI indicating the location of the model artifacts.` cmd.Annotations = make(map[string]string) @@ -417,7 +444,20 @@ func newCreateTransitionRequest() *cobra.Command { cmd.Short = `Make a transition request.` cmd.Long = `Make a transition request. - Creates a model version stage transition request.` + Creates a model version stage transition request. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage.` cmd.Annotations = make(map[string]string) @@ -636,7 +676,10 @@ func newDeleteModel() *cobra.Command { cmd.Short = `Delete a model.` cmd.Long = `Delete a model. - Deletes a registered model.` + Deletes a registered model. + + Arguments: + NAME: Registered model unique name identifier.` cmd.Annotations = make(map[string]string) @@ -697,7 +740,12 @@ func newDeleteModelTag() *cobra.Command { cmd.Short = `Delete a model tag.` cmd.Long = `Delete a model tag. - Deletes the tag for a registered model.` + Deletes the tag for a registered model. + + Arguments: + NAME: Name of the registered model that the tag was logged under. + KEY: Name of the tag. The name must be an exact match; wild-card deletion is + not supported. Maximum size is 250 bytes.` cmd.Annotations = make(map[string]string) @@ -759,7 +807,11 @@ func newDeleteModelVersion() *cobra.Command { cmd.Short = `Delete a model version.` cmd.Long = `Delete a model version. - Deletes a model version.` + Deletes a model version. + + Arguments: + NAME: Name of the registered model + VERSION: Model version number` cmd.Annotations = make(map[string]string) @@ -821,7 +873,13 @@ func newDeleteModelVersionTag() *cobra.Command { cmd.Short = `Delete a model version tag.` cmd.Long = `Delete a model version tag. - Deletes a model version tag.` + Deletes a model version tag. + + Arguments: + NAME: Name of the registered model that the tag was logged under. + VERSION: Model version number that the tag was logged under. + KEY: Name of the tag. The name must be an exact match; wild-card deletion is + not supported. Maximum size is 250 bytes.` cmd.Annotations = make(map[string]string) @@ -886,7 +944,23 @@ func newDeleteTransitionRequest() *cobra.Command { cmd.Short = `Delete a transition request.` cmd.Long = `Delete a transition request. - Cancels a model version stage transition request.` + Cancels a model version stage transition request. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition request. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage. + CREATOR: Username of the user who created this request. Of the transition requests + matching the specified details, only the one transition created by this + user will be deleted.` cmd.Annotations = make(map[string]string) @@ -1020,7 +1094,10 @@ func newGetLatestVersions() *cobra.Command { cmd.Short = `Get the latest version.` cmd.Long = `Get the latest version. - Gets the latest version of a registered model.` + Gets the latest version of a registered model. + + Arguments: + NAME: Registered model unique name identifier.` cmd.Annotations = make(map[string]string) @@ -1100,7 +1177,10 @@ func newGetModel() *cobra.Command { [MLflow endpoint] that also returns the model's Databricks workspace ID and the permission level of the requesting user on the model. - [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel` + [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel + + Arguments: + NAME: Registered model unique name identifier.` cmd.Annotations = make(map[string]string) @@ -1161,7 +1241,11 @@ func newGetModelVersion() *cobra.Command { cmd.Short = `Get a model version.` cmd.Long = `Get a model version. - Get a model version.` + Get a model version. + + Arguments: + NAME: Name of the registered model + VERSION: Model version number` cmd.Annotations = make(map[string]string) @@ -1223,7 +1307,11 @@ func newGetModelVersionDownloadUri() *cobra.Command { cmd.Short = `Get a model version URI.` cmd.Long = `Get a model version URI. - Gets a URI to download the model version.` + Gets a URI to download the model version. + + Arguments: + NAME: Name of the registered model + VERSION: Model version number` cmd.Annotations = make(map[string]string) @@ -1285,7 +1373,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get registered model permission levels.` cmd.Long = `Get registered model permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + REGISTERED_MODEL_ID: The registered model for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -1347,7 +1438,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get registered model permissions. Gets the permissions of a registered model. Registered models can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + REGISTERED_MODEL_ID: The registered model for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -1471,7 +1565,11 @@ func newListTransitionRequests() *cobra.Command { cmd.Short = `List transition requests.` cmd.Long = `List transition requests. - Gets a list of all open stage transition requests for the model version.` + Gets a list of all open stage transition requests for the model version. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model.` cmd.Annotations = make(map[string]string) @@ -1602,7 +1700,20 @@ func newRejectTransitionRequest() *cobra.Command { cmd.Short = `Reject a transition request.` cmd.Long = `Reject a transition request. - Rejects a model version stage transition request.` + Rejects a model version stage transition request. + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage.` cmd.Annotations = make(map[string]string) @@ -1691,7 +1802,10 @@ func newRenameModel() *cobra.Command { cmd.Short = `Rename a model.` cmd.Long = `Rename a model. - Renames a registered model.` + Renames a registered model. + + Arguments: + NAME: Registered model unique name identifier.` cmd.Annotations = make(map[string]string) @@ -1897,7 +2011,17 @@ func newSetModelTag() *cobra.Command { cmd.Short = `Set a tag.` cmd.Long = `Set a tag. - Sets a tag on a registered model.` + Sets a tag on a registered model. + + Arguments: + NAME: Unique name of the model. + KEY: Name of the tag. Maximum size depends on storage backend. If a tag with + this name already exists, its preexisting value will be replaced by the + specified value. All storage backends are guaranteed to support key + values up to 250 bytes in size. + VALUE: String value of the tag being logged. Maximum size depends on storage + backend. All storage backends are guaranteed to support key values up to + 5000 bytes in size.` cmd.Annotations = make(map[string]string) @@ -1981,7 +2105,18 @@ func newSetModelVersionTag() *cobra.Command { cmd.Short = `Set a version tag.` cmd.Long = `Set a version tag. - Sets a model version tag.` + Sets a model version tag. + + Arguments: + NAME: Unique name of the model. + VERSION: Model version number. + KEY: Name of the tag. Maximum size depends on storage backend. If a tag with + this name already exists, its preexisting value will be replaced by the + specified value. All storage backends are guaranteed to support key + values up to 250 bytes in size. + VALUE: String value of the tag being logged. Maximum size depends on storage + backend. All storage backends are guaranteed to support key values up to + 5000 bytes in size.` cmd.Annotations = make(map[string]string) @@ -2071,7 +2206,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set registered model permissions. Sets permissions on a registered model. Registered models can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + REGISTERED_MODEL_ID: The registered model for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -2144,7 +2282,10 @@ func newTestRegistryWebhook() *cobra.Command { **NOTE:** This endpoint is in Public Preview. - Tests a registry webhook.` + Tests a registry webhook. + + Arguments: + ID: Webhook ID` cmd.Annotations = make(map[string]string) @@ -2228,7 +2369,22 @@ func newTransitionStage() *cobra.Command { the [MLflow endpoint] that also accepts a comment associated with the transition to be recorded.", - [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage` + [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage + + Arguments: + NAME: Name of the model. + VERSION: Version of the model. + STAGE: Target stage of the transition. Valid values are: + + * None: The initial stage of a model version. + + * Staging: Staging or pre-production stage. + + * Production: Production stage. + + * Archived: Archived stage. + ARCHIVE_EXISTING_VERSIONS: Specifies whether to archive all current model versions in the target + stage.` cmd.Annotations = make(map[string]string) @@ -2321,7 +2477,11 @@ func newUpdateComment() *cobra.Command { cmd.Short = `Update a comment.` cmd.Long = `Update a comment. - Post an edit to a comment on a model version.` + Post an edit to a comment on a model version. + + Arguments: + ID: Unique identifier of an activity + COMMENT: User-provided comment on the action.` cmd.Annotations = make(map[string]string) @@ -2404,7 +2564,10 @@ func newUpdateModel() *cobra.Command { cmd.Short = `Update model.` cmd.Long = `Update model. - Updates a registered model.` + Updates a registered model. + + Arguments: + NAME: Registered model unique name identifier.` cmd.Annotations = make(map[string]string) @@ -2484,7 +2647,11 @@ func newUpdateModelVersion() *cobra.Command { cmd.Short = `Update model version.` cmd.Long = `Update model version. - Updates the model version.` + Updates the model version. + + Arguments: + NAME: Name of the registered model + VERSION: Model version number` cmd.Annotations = make(map[string]string) @@ -2568,7 +2735,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update registered model permissions. Updates the permissions on a registered model. Registered models can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + REGISTERED_MODEL_ID: The registered model for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -2645,7 +2815,10 @@ func newUpdateWebhook() *cobra.Command { **NOTE:** This endpoint is in Public Preview. - Updates a registry webhook.` + Updates a registry webhook. + + Arguments: + ID: Webhook ID` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index 67583a6a..348ea527 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -67,7 +67,11 @@ func newDelete() *cobra.Command { The caller must be a metastore admin or an owner of the parent registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** - privilege on the parent schema.` + privilege on the parent schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the model version + VERSION: The integer version number of the model version` cmd.Annotations = make(map[string]string) @@ -137,7 +141,11 @@ func newGet() *cobra.Command { The caller must be a metastore admin or an owner of (or have the **EXECUTE** privilege on) the parent registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent - catalog and the **USE_SCHEMA** privilege on the parent schema.` + catalog and the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the model version + VERSION: The integer version number of the model version` cmd.Annotations = make(map[string]string) @@ -207,7 +215,11 @@ func newGetByAlias() *cobra.Command { The caller must be a metastore admin or an owner of (or have the **EXECUTE** privilege on) the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and - the **USE_SCHEMA** privilege on the parent schema.` + the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model + ALIAS: The name of the alias` cmd.Annotations = make(map[string]string) @@ -283,7 +295,11 @@ func newList() *cobra.Command { privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - There is no guarantee of a specific ordering of the elements in the response.` + There is no guarantee of a specific ordering of the elements in the response. + + Arguments: + FULL_NAME: The full three-level name of the registered model under which to list + model versions` cmd.Annotations = make(map[string]string) @@ -355,7 +371,11 @@ func newUpdate() *cobra.Command { **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - Currently only the comment of the model version can be updated.` + Currently only the comment of the model version can be updated. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the model version + VERSION: The integer version number of the model version` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index c168a1a4..61edb009 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -100,7 +100,11 @@ func newGet() *cobra.Command { cmd.Long = `Get object permissions. Gets the permissions of an object. Objects can inherit permissions from their - parent objects or root object.` + parent objects or root object. + + Arguments: + REQUEST_OBJECT_TYPE: + REQUEST_OBJECT_ID: ` cmd.Annotations = make(map[string]string) @@ -162,7 +166,11 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get object permission levels.` cmd.Long = `Get object permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + REQUEST_OBJECT_TYPE: + REQUEST_OBJECT_ID: ` cmd.Annotations = make(map[string]string) @@ -229,7 +237,11 @@ func newSet() *cobra.Command { cmd.Long = `Set object permissions. Sets permissions on an object. Objects can inherit permissions from their - parent objects or root object.` + parent objects or root object. + + Arguments: + REQUEST_OBJECT_TYPE: + REQUEST_OBJECT_ID: ` cmd.Annotations = make(map[string]string) @@ -302,7 +314,11 @@ func newUpdate() *cobra.Command { cmd.Long = `Update object permissions. Updates the permissions on an object. Objects can inherit permissions from - their parent objects or root object.` + their parent objects or root object. + + Arguments: + REQUEST_OBJECT_TYPE: + REQUEST_OBJECT_ID: ` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index dd370905..a2f1868b 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -284,7 +284,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get pipeline permission levels.` cmd.Long = `Get pipeline permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + PIPELINE_ID: The pipeline for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -358,7 +361,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get pipeline permissions. Gets the permissions of a pipeline. Pipelines can inherit permissions from - their root object.` + their root object. + + Arguments: + PIPELINE_ID: The pipeline for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -431,7 +437,11 @@ func newGetUpdate() *cobra.Command { cmd.Short = `Get a pipeline update.` cmd.Long = `Get a pipeline update. - Gets an update from an active pipeline.` + Gets an update from an active pipeline. + + Arguments: + PIPELINE_ID: The ID of the pipeline. + UPDATE_ID: The ID of the update.` cmd.Annotations = make(map[string]string) @@ -639,7 +649,10 @@ func newListUpdates() *cobra.Command { cmd.Short = `List pipeline updates.` cmd.Long = `List pipeline updates. - List updates for an active pipeline.` + List updates for an active pipeline. + + Arguments: + PIPELINE_ID: The pipeline to return updates for.` cmd.Annotations = make(map[string]string) @@ -807,7 +820,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set pipeline permissions. Sets permissions on a pipeline. Pipelines can inherit permissions from their - root object.` + root object. + + Arguments: + PIPELINE_ID: The pipeline for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -1087,7 +1103,10 @@ func newUpdate() *cobra.Command { cmd.Short = `Edit a pipeline.` cmd.Long = `Edit a pipeline. - Updates a pipeline with the supplied configuration.` + Updates a pipeline with the supplied configuration. + + Arguments: + PIPELINE_ID: Unique identifier for this pipeline.` cmd.Annotations = make(map[string]string) @@ -1171,7 +1190,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update pipeline permissions. Updates the permissions on a pipeline. Pipelines can inherit permissions from - their root object.` + their root object. + + Arguments: + PIPELINE_ID: The pipeline for which to get or manage permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index 1da8202d..38612089 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -63,7 +63,11 @@ func newCreate() *cobra.Command { cmd.Long = `Create an auth provider. Creates a new authentication provider minimally based on a name and - authentication type. The caller must be an admin on the metastore.` + authentication type. The caller must be an admin on the metastore. + + Arguments: + NAME: The name of the Provider. + AUTHENTICATION_TYPE: The delta sharing authentication type.` cmd.Annotations = make(map[string]string) @@ -146,7 +150,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a provider. Deletes an authentication provider, if the caller is a metastore admin or is - the owner of the provider.` + the owner of the provider. + + Arguments: + NAME: Name of the provider.` cmd.Annotations = make(map[string]string) @@ -221,7 +228,10 @@ func newGet() *cobra.Command { Gets a specific authentication provider. The caller must supply the name of the provider, and must either be a metastore admin or the owner of the - provider.` + provider. + + Arguments: + NAME: Name of the provider.` cmd.Annotations = make(map[string]string) @@ -360,7 +370,10 @@ func newListShares() *cobra.Command { Gets an array of a specified provider's shares within the metastore where: - * the caller is a metastore admin, or * the caller is the owner.` + * the caller is a metastore admin, or * the caller is the owner. + + Arguments: + NAME: Name of the provider in which to list shares.` cmd.Annotations = make(map[string]string) @@ -443,7 +456,10 @@ func newUpdate() *cobra.Command { Updates the information for an authentication provider, if the caller is a metastore admin or is the owner of the provider. If the update changes the provider name, the caller must be both a metastore admin and the owner of the - provider.` + provider. + + Arguments: + NAME: The name of the Provider.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/recipient-activation/recipient-activation.go b/cmd/workspace/recipient-activation/recipient-activation.go index c73b4b4a..5fb5c7b9 100755 --- a/cmd/workspace/recipient-activation/recipient-activation.go +++ b/cmd/workspace/recipient-activation/recipient-activation.go @@ -61,7 +61,10 @@ func newGetActivationUrlInfo() *cobra.Command { cmd.Short = `Get a share activation URL.` cmd.Long = `Get a share activation URL. - Gets an activation URL for a share.` + Gets an activation URL for a share. + + Arguments: + ACTIVATION_URL: The one time activation url. It also accepts activation token.` cmd.Annotations = make(map[string]string) @@ -123,7 +126,10 @@ func newRetrieveToken() *cobra.Command { cmd.Long = `Get an access token. Retrieve access token with an activation url. This is a public API without any - authentication.` + authentication. + + Arguments: + ACTIVATION_URL: The one time activation url. It also accepts activation token.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 260729cb..7498e5cb 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -82,7 +82,11 @@ func newCreate() *cobra.Command { Creates a new recipient with the delta sharing authentication type in the metastore. The caller must be a metastore admin or has the - **CREATE_RECIPIENT** privilege on the metastore.` + **CREATE_RECIPIENT** privilege on the metastore. + + Arguments: + NAME: Name of Recipient. + AUTHENTICATION_TYPE: The delta sharing authentication type.` cmd.Annotations = make(map[string]string) @@ -165,7 +169,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a share recipient. Deletes the specified recipient from the metastore. The caller must be the - owner of the recipient.` + owner of the recipient. + + Arguments: + NAME: Name of the recipient.` cmd.Annotations = make(map[string]string) @@ -240,7 +247,10 @@ func newGet() *cobra.Command { Gets a share recipient from the metastore if: - * the caller is the owner of the share recipient, or: * is a metastore admin` + * the caller is the owner of the share recipient, or: * is a metastore admin + + Arguments: + NAME: Name of the recipient.` cmd.Annotations = make(map[string]string) @@ -380,7 +390,14 @@ func newRotateToken() *cobra.Command { cmd.Long = `Rotate a token. Refreshes the specified recipient's delta sharing authentication token with - the provided token info. The caller must be the owner of the recipient.` + the provided token info. The caller must be the owner of the recipient. + + Arguments: + NAME: The name of the recipient. + EXISTING_TOKEN_EXPIRE_IN_SECONDS: The expiration time of the bearer token in ISO 8601 format. This will set + the expiration_time of existing token only to a smaller timestamp, it + cannot extend the expiration_time. Use 0 to expire the existing token + immediately, negative number will return an error.` cmd.Annotations = make(map[string]string) @@ -461,7 +478,10 @@ func newSharePermissions() *cobra.Command { cmd.Long = `Get recipient share permissions. Gets the share permissions for the specified Recipient. The caller must be a - metastore admin or the owner of the Recipient.` + metastore admin or the owner of the Recipient. + + Arguments: + NAME: The name of the Recipient.` cmd.Annotations = make(map[string]string) @@ -544,7 +564,10 @@ func newUpdate() *cobra.Command { Updates an existing recipient in the metastore. The caller must be a metastore admin or the owner of the recipient. If the recipient name will be updated, - the user must be both a metastore admin and the owner of the recipient.` + the user must be both a metastore admin and the owner of the recipient. + + Arguments: + NAME: Name of Recipient.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index e594f2eb..ddb47e55 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -99,7 +99,12 @@ func newCreate() *cobra.Command { parent catalog and schema, or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller must have the **CREATE MODEL** or **CREATE FUNCTION** privilege on the parent - schema.` + schema. + + Arguments: + CATALOG_NAME: The name of the catalog where the schema and the registered model reside + SCHEMA_NAME: The name of the schema where the registered model resides + NAME: The name of the registered model` cmd.Annotations = make(map[string]string) @@ -187,7 +192,10 @@ func newDelete() *cobra.Command { The caller must be a metastore admin or an owner of the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent - schema.` + schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model` cmd.Annotations = make(map[string]string) @@ -265,7 +273,11 @@ func newDeleteAlias() *cobra.Command { The caller must be a metastore admin or an owner of the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent - schema.` + schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model + ALIAS: The name of the alias` cmd.Annotations = make(map[string]string) @@ -332,7 +344,10 @@ func newGet() *cobra.Command { The caller must be a metastore admin or an owner of (or have the **EXECUTE** privilege on) the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and - the **USE_SCHEMA** privilege on the parent schema.` + the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model` cmd.Annotations = make(map[string]string) @@ -487,7 +502,12 @@ func newSetAlias() *cobra.Command { The caller must be a metastore admin or an owner of the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent - schema.` + schema. + + Arguments: + FULL_NAME: Full name of the registered model + ALIAS: The name of the alias + VERSION_NUM: The version number of the model version to which the alias points` cmd.Annotations = make(map[string]string) @@ -582,7 +602,10 @@ func newUpdate() *cobra.Command { schema. Currently only the name, the owner or the comment of the registered model can - be updated.` + be updated. + + Arguments: + FULL_NAME: The three-level (fully qualified) name of the registered model` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index e8261c01..62f63750 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -71,7 +71,13 @@ func newCreate() *cobra.Command { Creates a repo in the workspace and links it to the remote Git repo specified. Note that repos created programmatically must be linked to a remote Git repo, - unlike repos created in the browser.` + unlike repos created in the browser. + + Arguments: + URL: URL of the Git repository to be linked. + PROVIDER: Git provider. This field is case-insensitive. The available Git providers + are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, gitHubEnterprise, + bitbucketServer, gitLabEnterpriseEdition and awsCodeCommit.` cmd.Annotations = make(map[string]string) @@ -150,7 +156,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a repo.` cmd.Long = `Delete a repo. - Deletes the specified repo.` + Deletes the specified repo. + + Arguments: + REPO_ID: The ID for the corresponding repo to access.` cmd.Annotations = make(map[string]string) @@ -226,7 +235,10 @@ func newGet() *cobra.Command { cmd.Short = `Get a repo.` cmd.Long = `Get a repo. - Returns the repo with the given repo ID.` + Returns the repo with the given repo ID. + + Arguments: + REPO_ID: The ID for the corresponding repo to access.` cmd.Annotations = make(map[string]string) @@ -302,7 +314,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get repo permission levels.` cmd.Long = `Get repo permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + REPO_ID: The repo for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -376,7 +391,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get repo permissions. Gets the permissions of a repo. Repos can inherit permissions from their root - object.` + object. + + Arguments: + REPO_ID: The repo for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -517,7 +535,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set repo permissions. Sets permissions on a repo. Repos can inherit permissions from their root - object.` + object. + + Arguments: + REPO_ID: The repo for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -603,7 +624,10 @@ func newUpdate() *cobra.Command { cmd.Long = `Update a repo. Updates the repo to a different branch or tag, or updates the repo to the - latest commit on the same branch.` + latest commit on the same branch. + + Arguments: + REPO_ID: The ID for the corresponding repo to access.` cmd.Annotations = make(map[string]string) @@ -690,7 +714,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update repo permissions. Updates the permissions on a repo. Repos can inherit permissions from their - root object.` + root object. + + Arguments: + REPO_ID: The repo for which to get or manage permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index 3313bfdb..59554edc 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -67,7 +67,11 @@ func newCreate() *cobra.Command { Creates a new schema for catalog in the Metatastore. The caller must be a metastore admin, or have the **CREATE_SCHEMA** privilege in the parent - catalog.` + catalog. + + Arguments: + NAME: Name of schema, relative to parent catalog. + CATALOG_NAME: Name of parent catalog.` cmd.Annotations = make(map[string]string) @@ -147,7 +151,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a schema. Deletes the specified schema from the parent catalog. The caller must be the - owner of the schema or an owner of the parent catalog.` + owner of the schema or an owner of the parent catalog. + + Arguments: + FULL_NAME: Full name of the schema.` cmd.Annotations = make(map[string]string) @@ -222,7 +229,10 @@ func newGet() *cobra.Command { Gets the specified schema within the metastore. The caller must be a metastore admin, the owner of the schema, or a user that has the **USE_SCHEMA** - privilege on the schema.` + privilege on the schema. + + Arguments: + FULL_NAME: Full name of the schema.` cmd.Annotations = make(map[string]string) @@ -299,7 +309,10 @@ func newList() *cobra.Command { metastore admin or the owner of the parent catalog, all schemas for the catalog will be retrieved. Otherwise, only schemas owned by the caller (or for which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is - no guarantee of a specific ordering of the elements in the array.` + no guarantee of a specific ordering of the elements in the array. + + Arguments: + CATALOG_NAME: Parent catalog for schemas of interest.` cmd.Annotations = make(map[string]string) @@ -372,7 +385,10 @@ func newUpdate() *cobra.Command { a metastore admin. If the caller is a metastore admin, only the __owner__ field can be changed in the update. If the __name__ field must be updated, the caller must be a metastore admin or have the **CREATE_SCHEMA** privilege on - the parent catalog.` + the parent catalog. + + Arguments: + FULL_NAME: Full name of the schema.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index c124e7ef..502f233f 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -74,7 +74,10 @@ func newCreateScope() *cobra.Command { The scope name must consist of alphanumeric characters, dashes, underscores, and periods, and may not exceed 128 characters. The maximum number of scopes - in a workspace is 100.` + in a workspace is 100. + + Arguments: + SCOPE: Scope name requested by the user. Scope names are unique.` cmd.Annotations = make(map[string]string) @@ -157,7 +160,11 @@ func newDeleteAcl() *cobra.Command { Users must have the MANAGE permission to invoke this API. Throws RESOURCE_DOES_NOT_EXIST if no such secret scope, principal, or ACL exists. Throws PERMISSION_DENIED if the user does not have permission to make this - API call.` + API call. + + Arguments: + SCOPE: The name of the scope to remove permissions from. + PRINCIPAL: The principal to remove an existing ACL from.` cmd.Annotations = make(map[string]string) @@ -242,7 +249,10 @@ func newDeleteScope() *cobra.Command { Throws RESOURCE_DOES_NOT_EXIST if the scope does not exist. Throws PERMISSION_DENIED if the user does not have permission to make this API - call.` + call. + + Arguments: + SCOPE: Name of the scope to delete.` cmd.Annotations = make(map[string]string) @@ -325,7 +335,11 @@ func newDeleteSecret() *cobra.Command { Throws RESOURCE_DOES_NOT_EXIST if no such secret scope or secret exists. Throws PERMISSION_DENIED if the user does not have permission to make this - API call.` + API call. + + Arguments: + SCOPE: The name of the scope that contains the secret to delete. + KEY: Name of the secret to delete.` cmd.Annotations = make(map[string]string) @@ -409,7 +423,11 @@ func newGetAcl() *cobra.Command { Throws RESOURCE_DOES_NOT_EXIST if no such secret scope exists. Throws PERMISSION_DENIED if the user does not have permission to make this API - call.` + call. + + Arguments: + SCOPE: The name of the scope to fetch ACL information from. + PRINCIPAL: The principal to fetch ACL information for.` cmd.Annotations = make(map[string]string) @@ -482,7 +500,11 @@ func newGetSecret() *cobra.Command { Throws PERMISSION_DENIED if the user does not have permission to make this API call. Throws RESOURCE_DOES_NOT_EXIST if no such secret or secret scope - exists.` + exists. + + Arguments: + SCOPE: The name of the scope to fetch secret information from. + KEY: The key to fetch secret for.` cmd.Annotations = make(map[string]string) @@ -549,7 +571,10 @@ func newListAcls() *cobra.Command { Throws RESOURCE_DOES_NOT_EXIST if no such secret scope exists. Throws PERMISSION_DENIED if the user does not have permission to make this API - call.` + call. + + Arguments: + SCOPE: The name of the scope to fetch ACL information from.` cmd.Annotations = make(map[string]string) @@ -668,7 +693,10 @@ func newListSecrets() *cobra.Command { The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws RESOURCE_DOES_NOT_EXIST if no such secret scope exists. Throws PERMISSION_DENIED if the user does not have permission to make this API - call.` + call. + + Arguments: + SCOPE: The name of the scope to list secrets within.` cmd.Annotations = make(map[string]string) @@ -756,7 +784,12 @@ func newPutAcl() *cobra.Command { RESOURCE_ALREADY_EXISTS if a permission for the principal already exists. Throws INVALID_PARAMETER_VALUE if the permission or principal is invalid. Throws PERMISSION_DENIED if the user does not have permission to make this - API call.` + API call. + + Arguments: + SCOPE: The name of the scope to apply permissions to. + PRINCIPAL: The principal in which the permission is applied. + PERMISSION: The permission level applied to the principal.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index 4068698b..60762954 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -138,7 +138,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a service principal.` cmd.Long = `Delete a service principal. - Delete a single service principal in the Databricks workspace.` + Delete a single service principal in the Databricks workspace. + + Arguments: + ID: Unique ID for a service principal in the Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -212,7 +215,10 @@ func newGet() *cobra.Command { cmd.Long = `Get service principal details. Gets the details for a single service principal define in the Databricks - workspace.` + workspace. + + Arguments: + ID: Unique ID for a service principal in the Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -358,7 +364,10 @@ func newPatch() *cobra.Command { cmd.Long = `Update service principal details. Partially updates the details of a single service principal in the Databricks - workspace.` + workspace. + + Arguments: + ID: Unique ID for a service principal in the Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -451,7 +460,10 @@ func newUpdate() *cobra.Command { Updates the details of a single service principal. - This action replaces the existing service principal with the same name.` + This action replaces the existing service principal with the same name. + + Arguments: + ID: Databricks service principal ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 67614b72..8f8349a8 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -69,7 +69,13 @@ func newBuildLogs() *cobra.Command { cmd.Long = `Retrieve the logs associated with building the model's environment for a given serving endpoint's served model. - Retrieves the build logs associated with the provided served model.` + Retrieves the build logs associated with the provided served model. + + Arguments: + NAME: The name of the serving endpoint that the served model belongs to. This + field is required. + SERVED_MODEL_NAME: The name of the served model that build logs will be retrieved for. This + field is required.` cmd.Annotations = make(map[string]string) @@ -212,7 +218,10 @@ func newDelete() *cobra.Command { cmd.Use = "delete NAME" cmd.Short = `Delete a serving endpoint.` - cmd.Long = `Delete a serving endpoint.` + cmd.Long = `Delete a serving endpoint. + + Arguments: + NAME: The name of the serving endpoint. This field is required.` cmd.Annotations = make(map[string]string) @@ -274,7 +283,11 @@ func newExportMetrics() *cobra.Command { cmd.Long = `Retrieve the metrics associated with a serving endpoint. Retrieves the metrics associated with the provided serving endpoint in either - Prometheus or OpenMetrics exposition format.` + Prometheus or OpenMetrics exposition format. + + Arguments: + NAME: The name of the serving endpoint to retrieve metrics for. This field is + required.` cmd.Annotations = make(map[string]string) @@ -335,7 +348,10 @@ func newGet() *cobra.Command { cmd.Short = `Get a single serving endpoint.` cmd.Long = `Get a single serving endpoint. - Retrieves the details for a single serving endpoint.` + Retrieves the details for a single serving endpoint. + + Arguments: + NAME: The name of the serving endpoint. This field is required.` cmd.Annotations = make(map[string]string) @@ -396,7 +412,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get serving endpoint permission levels.` cmd.Long = `Get serving endpoint permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + SERVING_ENDPOINT_ID: The serving endpoint for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -458,7 +477,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get serving endpoint permissions. Gets the permissions of a serving endpoint. Serving endpoints can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + SERVING_ENDPOINT_ID: The serving endpoint for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -566,7 +588,13 @@ func newLogs() *cobra.Command { cmd.Long = `Retrieve the most recent log lines associated with a given serving endpoint's served model. - Retrieves the service logs associated with the provided served model.` + Retrieves the service logs associated with the provided served model. + + Arguments: + NAME: The name of the serving endpoint that the served model belongs to. This + field is required. + SERVED_MODEL_NAME: The name of the served model that logs will be retrieved for. This field + is required.` cmd.Annotations = make(map[string]string) @@ -634,7 +662,11 @@ func newPatch() *cobra.Command { cmd.Long = `Patch the tags of a serving endpoint. Used to batch add and delete tags from a serving endpoint with a single API - call.` + call. + + Arguments: + NAME: The name of the serving endpoint who's tags to patch. This field is + required.` cmd.Annotations = make(map[string]string) @@ -706,7 +738,10 @@ func newQuery() *cobra.Command { cmd.Use = "query NAME" cmd.Short = `Query a serving endpoint with provided model input.` - cmd.Long = `Query a serving endpoint with provided model input.` + cmd.Long = `Query a serving endpoint with provided model input. + + Arguments: + NAME: The name of the serving endpoint. This field is required.` cmd.Annotations = make(map[string]string) @@ -778,7 +813,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set serving endpoint permissions. Sets permissions on a serving endpoint. Serving endpoints can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + SERVING_ENDPOINT_ID: The serving endpoint for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -938,7 +976,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update serving endpoint permissions. Updates the permissions on a serving endpoint. Serving endpoints can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + SERVING_ENDPOINT_ID: The serving endpoint for which to get or manage permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index c1d669de..193434d4 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -68,7 +68,16 @@ func newDeleteDefaultWorkspaceNamespace() *cobra.Command { be provided in DELETE requests (as a query parameter). The etag can be retrieved by making a GET request before the DELETE request. If the setting is updated/deleted concurrently, DELETE will fail with 409 and the request will - need to be retried by using the fresh etag in the 409 response.` + need to be retried by using the fresh etag in the 409 response. + + Arguments: + ETAG: etag used for versioning. The response is at least as fresh as the eTag + provided. This is used for optimistic concurrency control as a way to help + prevent simultaneous writes of a setting overwriting each other. It is + strongly suggested that systems make use of the etag in the read -> delete + pattern to perform setting deletions in order to avoid race conditions. + That is, get an etag from a GET request, and pass it with the DELETE + request to identify the rule set version you are deleting.` cmd.Annotations = make(map[string]string) @@ -129,7 +138,16 @@ func newReadDefaultWorkspaceNamespace() *cobra.Command { cmd.Short = `Get the default namespace setting.` cmd.Long = `Get the default namespace setting. - Gets the default namespace setting.` + Gets the default namespace setting. + + Arguments: + ETAG: etag used for versioning. The response is at least as fresh as the eTag + provided. This is used for optimistic concurrency control as a way to help + prevent simultaneous writes of a setting overwriting each other. It is + strongly suggested that systems make use of the etag in the read -> delete + pattern to perform setting deletions in order to avoid race conditions. + That is, get an etag from a GET request, and pass it with the DELETE + request to identify the rule set version you are deleting.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index de6cc5df..8b983c4e 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -65,7 +65,10 @@ func newCreate() *cobra.Command { Creates a new share for data objects. Data objects can be added after creation with **update**. The caller must be a metastore admin or have the - **CREATE_SHARE** privilege on the metastore.` + **CREATE_SHARE** privilege on the metastore. + + Arguments: + NAME: Name of the share.` cmd.Annotations = make(map[string]string) @@ -142,7 +145,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a share. Deletes a data object share from the metastore. The caller must be an owner of - the share.` + the share. + + Arguments: + NAME: The name of the share.` cmd.Annotations = make(map[string]string) @@ -206,7 +212,10 @@ func newGet() *cobra.Command { cmd.Long = `Get a share. Gets a data object share from the metastore. The caller must be a metastore - admin or the owner of the share.` + admin or the owner of the share. + + Arguments: + NAME: The name of the share.` cmd.Annotations = make(map[string]string) @@ -318,7 +327,10 @@ func newSharePermissions() *cobra.Command { cmd.Long = `Get permissions. Gets the permissions for a data share from the metastore. The caller must be a - metastore admin or the owner of the share.` + metastore admin or the owner of the share. + + Arguments: + NAME: The name of the share.` cmd.Annotations = make(map[string]string) @@ -399,7 +411,10 @@ func newUpdate() *cobra.Command { indefinitely for recipients to be able to access the table. Typically, you should use a group as the share owner. - Table removals through **update** do not require additional privileges.` + Table removals through **update** do not require additional privileges. + + Arguments: + NAME: Name of the share.` cmd.Annotations = make(map[string]string) @@ -474,7 +489,10 @@ func newUpdatePermissions() *cobra.Command { a metastore admin or an owner of the share. For new recipient grants, the user must also be the owner of the recipients. - recipient revocations do not require additional privileges.` + recipient revocations do not require additional privileges. + + Arguments: + NAME: The name of the share.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index b70d949a..2c6efd82 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -77,7 +77,10 @@ func newCreate() *cobra.Command { cmd.Short = `Create a storage credential.` cmd.Long = `Create a storage credential. - Creates a new storage credential.` + Creates a new storage credential. + + Arguments: + NAME: The credential name. The name must be unique within the metastore.` cmd.Annotations = make(map[string]string) @@ -156,7 +159,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a credential. Deletes a storage credential from the metastore. The caller must be an owner - of the storage credential.` + of the storage credential. + + Arguments: + NAME: Name of the storage credential.` cmd.Annotations = make(map[string]string) @@ -231,7 +237,10 @@ func newGet() *cobra.Command { Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have some permission on the - storage credential.` + storage credential. + + Arguments: + NAME: Name of the storage credential.` cmd.Annotations = make(map[string]string) @@ -369,7 +378,10 @@ func newUpdate() *cobra.Command { cmd.Short = `Update a credential.` cmd.Long = `Update a credential. - Updates a storage credential on the metastore.` + Updates a storage credential on the metastore. + + Arguments: + NAME: The credential name. The name must be unique within the metastore.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index f4347098..6dbad5a3 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -57,7 +57,11 @@ func newDisable() *cobra.Command { cmd.Long = `Disable a system schema. Disables the system schema and removes it from the system catalog. The caller - must be an account admin or a metastore admin.` + must be an account admin or a metastore admin. + + Arguments: + METASTORE_ID: The metastore ID under which the system schema lives. + SCHEMA_NAME: Full name of the system schema.` cmd.Annotations = make(map[string]string) @@ -123,7 +127,11 @@ func newEnable() *cobra.Command { cmd.Long = `Enable a system schema. Enables the system schema and adds it to the system catalog. The caller must - be an account admin or a metastore admin.` + be an account admin or a metastore admin. + + Arguments: + METASTORE_ID: The metastore ID under which the system schema lives. + SCHEMA_NAME: Full name of the system schema.` cmd.Annotations = make(map[string]string) @@ -189,7 +197,10 @@ func newList() *cobra.Command { cmd.Long = `List system schemas. Gets an array of system schemas for a metastore. The caller must be an account - admin or a metastore admin.` + admin or a metastore admin. + + Arguments: + METASTORE_ID: The ID for the metastore in which the system schema resides.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/table-constraints/table-constraints.go b/cmd/workspace/table-constraints/table-constraints.go index 023846a6..e17b9540 100755 --- a/cmd/workspace/table-constraints/table-constraints.go +++ b/cmd/workspace/table-constraints/table-constraints.go @@ -149,7 +149,14 @@ func newDelete() *cobra.Command { schema, and be the owner of the table. - if __cascade__ argument is **true**, the user must have the following permissions on all of the child tables: the **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** privilege - on the table's schema, and be the owner of the table.` + on the table's schema, and be the owner of the table. + + Arguments: + FULL_NAME: Full name of the table referenced by the constraint. + CONSTRAINT_NAME: The name of the constraint to delete. + CASCADE: If true, try deleting all child constraints of the current constraint. If + false, reject this operation if the current constraint has any child + constraints.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index a7375f97..15c05872 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -67,7 +67,10 @@ func newDelete() *cobra.Command { be the owner of the parent catalog, have the **USE_CATALOG** privilege on the parent catalog and be the owner of the parent schema, or be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the - **USE_SCHEMA** privilege on the parent schema.` + **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME: Full name of the table.` cmd.Annotations = make(map[string]string) @@ -146,7 +149,10 @@ func newGet() *cobra.Command { must be a metastore admin, be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, or be the owner of the table and have the - **SELECT** privilege on it as well.` + **SELECT** privilege on it as well. + + Arguments: + FULL_NAME: Full name of the table.` cmd.Annotations = make(map[string]string) @@ -228,7 +234,11 @@ func newList() *cobra.Command { **SELECT** privilege on) the table. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. There is no guarantee of a - specific ordering of the elements in the array.` + specific ordering of the elements in the array. + + Arguments: + CATALOG_NAME: Name of parent catalog for tables of interest. + SCHEMA_NAME: Parent schema of tables.` cmd.Annotations = make(map[string]string) @@ -305,7 +315,10 @@ func newListSummaries() *cobra.Command { or **USE_SCHEMA** privilege on the schema, provided that the user also has ownership or the **USE_CATALOG** privilege on the parent catalog. - There is no guarantee of a specific ordering of the elements in the array.` + There is no guarantee of a specific ordering of the elements in the array. + + Arguments: + CATALOG_NAME: Name of parent catalog for tables of interest.` cmd.Annotations = make(map[string]string) @@ -386,7 +399,10 @@ func newUpdate() *cobra.Command { catalog, have the **USE_CATALOG** privilege on the parent catalog and be the owner of the parent schema, or be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** - privilege on the parent schema.` + privilege on the parent schema. + + Arguments: + FULL_NAME: Full name of the table.` // This command is being previewed; hide from help output. cmd.Hidden = true diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index b74b0483..d4616e0b 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -61,7 +61,11 @@ func newCreateOboToken() *cobra.Command { cmd.Short = `Create on-behalf token.` cmd.Long = `Create on-behalf token. - Creates a token on behalf of a service principal.` + Creates a token on behalf of a service principal. + + Arguments: + APPLICATION_ID: Application ID of the service principal. + LIFETIME_SECONDS: The number of seconds before the token expires.` cmd.Annotations = make(map[string]string) @@ -143,7 +147,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a token.` cmd.Long = `Delete a token. - Deletes a token, specified by its ID.` + Deletes a token, specified by its ID. + + Arguments: + TOKEN_ID: The ID of the token to get.` cmd.Annotations = make(map[string]string) @@ -216,7 +223,10 @@ func newGet() *cobra.Command { cmd.Short = `Get token info.` cmd.Long = `Get token info. - Gets information about a token, specified by its ID.` + Gets information about a token, specified by its ID. + + Arguments: + TOKEN_ID: The ID of the token to get.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/tokens/tokens.go b/cmd/workspace/tokens/tokens.go index dad790c5..5e6b89e8 100755 --- a/cmd/workspace/tokens/tokens.go +++ b/cmd/workspace/tokens/tokens.go @@ -135,7 +135,10 @@ func newDelete() *cobra.Command { Revokes an access token. If a token with the specified ID is not valid, this call returns an error - **RESOURCE_DOES_NOT_EXIST**.` + **RESOURCE_DOES_NOT_EXIST**. + + Arguments: + TOKEN_ID: The ID of the token to be revoked.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index b44237cf..19afad2b 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -147,7 +147,10 @@ func newDelete() *cobra.Command { cmd.Long = `Delete a user. Deletes a user. Deleting a user from a Databricks workspace also removes - objects associated with the user.` + objects associated with the user. + + Arguments: + ID: Unique ID for a user in the Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -228,7 +231,10 @@ func newGet() *cobra.Command { cmd.Short = `Get user details.` cmd.Long = `Get user details. - Gets information for a specific user in Databricks workspace.` + Gets information for a specific user in Databricks workspace. + + Arguments: + ID: Unique ID for a user in the Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -471,7 +477,10 @@ func newPatch() *cobra.Command { cmd.Long = `Update user details. Partially updates a user resource by applying the supplied operations on - specific user attributes.` + specific user attributes. + + Arguments: + ID: Unique ID for a user in the Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -635,7 +644,11 @@ func newUpdate() *cobra.Command { cmd.Short = `Replace a user.` cmd.Long = `Replace a user. - Replaces a user's information with the data supplied in request.` + Replaces a user's information with the data supplied in request. + + Arguments: + ID: Databricks user ID. This is automatically set by Databricks. Any value + provided by the client will be ignored.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index ef90eec5..427bdb58 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -84,7 +84,13 @@ func newCreate() *cobra.Command { must have **CREATE EXTERNAL VOLUME** privilege on the external location. - There are no other tables, nor volumes existing in the specified storage location. - The specified storage location is not under the location of other - tables, nor volumes, or catalogs or schemas.` + tables, nor volumes, or catalogs or schemas. + + Arguments: + CATALOG_NAME: The name of the catalog where the schema and the volume are + SCHEMA_NAME: The name of the schema where the volume is + NAME: The name of the volume + VOLUME_TYPE: ` cmd.Annotations = make(map[string]string) @@ -176,7 +182,10 @@ func newDelete() *cobra.Command { The caller must be a metastore admin or an owner of the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege - on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.` + on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME_ARG: The three-level (fully qualified) name of the volume` cmd.Annotations = make(map[string]string) @@ -259,7 +268,11 @@ func newList() *cobra.Command { also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - There is no guarantee of a specific ordering of the elements in the array.` + There is no guarantee of a specific ordering of the elements in the array. + + Arguments: + CATALOG_NAME: The identifier of the catalog + SCHEMA_NAME: The identifier of the schema` // This command is being previewed; hide from help output. cmd.Hidden = true @@ -329,7 +342,10 @@ func newRead() *cobra.Command { The caller must be a metastore admin or an owner of (or have the **READ VOLUME** privilege on) the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and - the **USE_SCHEMA** privilege on the parent schema.` + the **USE_SCHEMA** privilege on the parent schema. + + Arguments: + FULL_NAME_ARG: The three-level (fully qualified) name of the volume` cmd.Annotations = make(map[string]string) @@ -415,7 +431,10 @@ func newUpdate() *cobra.Command { on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. Currently only the name, the owner or the comment of the volume could be - updated.` + updated. + + Arguments: + FULL_NAME_ARG: The three-level (fully qualified) name of the volume` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index c7930e29..6133d5ed 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -164,7 +164,10 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a warehouse.` cmd.Long = `Delete a warehouse. - Deletes a SQL warehouse.` + Deletes a SQL warehouse. + + Arguments: + ID: Required. Id of the SQL warehouse.` cmd.Annotations = make(map[string]string) @@ -258,7 +261,10 @@ func newEdit() *cobra.Command { cmd.Short = `Update a warehouse.` cmd.Long = `Update a warehouse. - Updates the configuration for a SQL warehouse.` + Updates the configuration for a SQL warehouse. + + Arguments: + ID: Required. Id of the warehouse to configure.` cmd.Annotations = make(map[string]string) @@ -361,7 +367,10 @@ func newGet() *cobra.Command { cmd.Short = `Get warehouse info.` cmd.Long = `Get warehouse info. - Gets the information for a single SQL warehouse.` + Gets the information for a single SQL warehouse. + + Arguments: + ID: Required. Id of the SQL warehouse.` cmd.Annotations = make(map[string]string) @@ -434,7 +443,10 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get SQL warehouse permission levels.` cmd.Long = `Get SQL warehouse permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -508,7 +520,10 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get SQL warehouse permissions. Gets the permissions of a SQL warehouse. SQL warehouses can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -696,7 +711,10 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set SQL warehouse permissions. Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions - from their root object.` + from their root object. + + Arguments: + WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -859,7 +877,10 @@ func newStart() *cobra.Command { cmd.Short = `Start a warehouse.` cmd.Long = `Start a warehouse. - Starts a SQL warehouse.` + Starts a SQL warehouse. + + Arguments: + ID: Required. Id of the SQL warehouse.` cmd.Annotations = make(map[string]string) @@ -956,7 +977,10 @@ func newStop() *cobra.Command { cmd.Short = `Stop a warehouse.` cmd.Long = `Stop a warehouse. - Stops a SQL warehouse.` + Stops a SQL warehouse. + + Arguments: + ID: Required. Id of the SQL warehouse.` cmd.Annotations = make(map[string]string) @@ -1053,7 +1077,10 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update SQL warehouse permissions. Updates the permissions on a SQL warehouse. SQL warehouses can inherit - permissions from their root object.` + permissions from their root object. + + Arguments: + WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/workspace-bindings/workspace-bindings.go b/cmd/workspace/workspace-bindings/workspace-bindings.go index 2d2bb5ed..f8d31fa4 100755 --- a/cmd/workspace/workspace-bindings/workspace-bindings.go +++ b/cmd/workspace/workspace-bindings/workspace-bindings.go @@ -71,7 +71,10 @@ func newGet() *cobra.Command { cmd.Long = `Get catalog workspace bindings. Gets workspace bindings of the catalog. The caller must be a metastore admin - or an owner of the catalog.` + or an owner of the catalog. + + Arguments: + NAME: The name of the catalog.` cmd.Annotations = make(map[string]string) @@ -133,7 +136,11 @@ func newGetBindings() *cobra.Command { cmd.Long = `Get securable workspace bindings. Gets workspace bindings of the securable. The caller must be a metastore admin - or an owner of the securable.` + or an owner of the securable. + + Arguments: + SECURABLE_TYPE: The type of the securable. + SECURABLE_NAME: The name of the securable.` cmd.Annotations = make(map[string]string) @@ -201,7 +208,10 @@ func newUpdate() *cobra.Command { cmd.Long = `Update catalog workspace bindings. Updates workspace bindings of the catalog. The caller must be a metastore - admin or an owner of the catalog.` + admin or an owner of the catalog. + + Arguments: + NAME: The name of the catalog.` cmd.Annotations = make(map[string]string) @@ -274,7 +284,11 @@ func newUpdateBindings() *cobra.Command { cmd.Long = `Update securable workspace bindings. Updates workspace bindings of the securable. The caller must be a metastore - admin or an owner of the securable.` + admin or an owner of the securable. + + Arguments: + SECURABLE_TYPE: The type of the securable. + SECURABLE_NAME: The name of the securable.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index dcfb7147..8944638e 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -70,7 +70,10 @@ func newDelete() *cobra.Command { DIRECTORY_NOT_EMPTY. Object deletion cannot be undone and deleting a directory recursively is not - atomic.` + atomic. + + Arguments: + PATH: The absolute path of the notebook or directory.` cmd.Annotations = make(map[string]string) @@ -159,7 +162,11 @@ func newExport() *cobra.Command { If the exported data would exceed size limit, this call returns MAX_NOTEBOOK_SIZE_EXCEEDED. Currently, this API does not support exporting a - library.` + library. + + Arguments: + PATH: The absolute path of the object or directory. Exporting a directory is + only supported for the DBC, SOURCE, and AUTO format.` cmd.Annotations = make(map[string]string) @@ -232,7 +239,11 @@ func newGetPermissionLevels() *cobra.Command { cmd.Short = `Get workspace object permission levels.` cmd.Long = `Get workspace object permission levels. - Gets the permission levels that a user can have on an object.` + Gets the permission levels that a user can have on an object. + + Arguments: + WORKSPACE_OBJECT_TYPE: The workspace object type for which to get or manage permissions. + WORKSPACE_OBJECT_ID: The workspace object for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -295,7 +306,11 @@ func newGetPermissions() *cobra.Command { cmd.Long = `Get workspace object permissions. Gets the permissions of a workspace object. Workspace objects can inherit - permissions from their parent objects or root object.` + permissions from their parent objects or root object. + + Arguments: + WORKSPACE_OBJECT_TYPE: The workspace object type for which to get or manage permissions. + WORKSPACE_OBJECT_ID: The workspace object for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -358,7 +373,10 @@ func newGetStatus() *cobra.Command { cmd.Long = `Get status. Gets the status of an object or a directory. If path does not exist, this - call returns an error RESOURCE_DOES_NOT_EXIST.` + call returns an error RESOURCE_DOES_NOT_EXIST. + + Arguments: + PATH: The absolute path of the notebook or directory.` cmd.Annotations = make(map[string]string) @@ -431,7 +449,11 @@ func newImport() *cobra.Command { false, this call returns an error RESOURCE_ALREADY_EXISTS. To import a directory, you can use either the DBC format or the SOURCE format with the language field unset. To import a single file as SOURCE, you must set the - language field.` + language field. + + Arguments: + PATH: The absolute path of the object or directory. Importing a directory is + only supported for the DBC and SOURCE formats.` cmd.Annotations = make(map[string]string) @@ -511,7 +533,10 @@ func newList() *cobra.Command { Lists the contents of a directory, or the object if it is not a directory. If the input path does not exist, this call returns an error - RESOURCE_DOES_NOT_EXIST.` + RESOURCE_DOES_NOT_EXIST. + + Arguments: + PATH: The absolute path of the notebook or directory.` cmd.Annotations = make(map[string]string) @@ -579,7 +604,12 @@ func newMkdirs() *cobra.Command { path, this call returns an error RESOURCE_ALREADY_EXISTS. Note that if this operation fails it may have succeeded in creating some of - the necessary parent directories.` + the necessary parent directories. + + Arguments: + PATH: The absolute path of the directory. If the parent directories do not + exist, it will also create them. If the directory already exists, this + command will do nothing and succeed.` cmd.Annotations = make(map[string]string) @@ -664,7 +694,11 @@ func newSetPermissions() *cobra.Command { cmd.Long = `Set workspace object permissions. Sets permissions on a workspace object. Workspace objects can inherit - permissions from their parent objects or root object.` + permissions from their parent objects or root object. + + Arguments: + WORKSPACE_OBJECT_TYPE: The workspace object type for which to get or manage permissions. + WORKSPACE_OBJECT_ID: The workspace object for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -737,7 +771,11 @@ func newUpdatePermissions() *cobra.Command { cmd.Long = `Update workspace object permissions. Updates the permissions on a workspace object. Workspace objects can inherit - permissions from their parent objects or root object.` + permissions from their parent objects or root object. + + Arguments: + WORKSPACE_OBJECT_TYPE: The workspace object type for which to get or manage permissions. + WORKSPACE_OBJECT_ID: The workspace object for which to get or manage permissions.` cmd.Annotations = make(map[string]string) From 83d50001fc0474b04d640f99f8f96eab022d8f1d Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 1 Dec 2023 11:35:20 +0100 Subject: [PATCH 273/310] Pass parameters to task when run with `--python-params` and `python_wheel_wrapper` is true (#1037) ## Changes It makes the behaviour consistent with or without `python_wheel_wrapper` on when job is run with `--python-params` flag. In `python_wheel_wrapper` mode it converts dynamic `python_params` in a dynamic specially named `notebook_param` and the wrapper reads them with `dbutils` and pass to `sys.argv` Fixes #1000 ## Tests Added an integration test. Integration tests pass. --- bundle/python/transform.go | 13 ++++++++ bundle/run/job.go | 45 +++++++++++++++++++++++++ bundle/run/job_test.go | 49 ++++++++++++++++++++++++++++ internal/bundle/helpers.go | 12 +++++++ internal/bundle/python_wheel_test.go | 6 ++++ 5 files changed, 125 insertions(+) create mode 100644 bundle/run/job_test.go diff --git a/bundle/python/transform.go b/bundle/python/transform.go index f6207a59..a3fea2e8 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -31,8 +31,21 @@ except ImportError: # for Python<3.8 from contextlib import redirect_stdout import io import sys +import json + +params = [] +try: + python_params = dbutils.widgets.get("__python_params") + if python_params: + params = json.loads(python_params) +except Exception as e: + print(e) + sys.argv = [{{.Params}}] +if params: + sys.argv = [sys.argv[0]] + params + entry = [ep for ep in metadata.distribution("{{.Task.PackageName}}").entry_points if ep.name == "{{.Task.EntryPoint}}"] f = io.StringIO() diff --git a/bundle/run/job.go b/bundle/run/job.go index b94e8fef..a6343b97 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -2,6 +2,7 @@ package run import ( "context" + "encoding/json" "fmt" "strconv" "time" @@ -221,6 +222,11 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e runId := new(int64) + err = r.convertPythonParams(opts) + if err != nil { + return nil, err + } + // construct request payload from cmd line flags args req, err := opts.Job.toPayload(jobID) if err != nil { @@ -299,3 +305,42 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e return nil, err } + +func (r *jobRunner) convertPythonParams(opts *Options) error { + if r.bundle.Config.Experimental != nil && !r.bundle.Config.Experimental.PythonWheelWrapper { + return nil + } + + needConvert := false + for _, task := range r.job.Tasks { + if task.PythonWheelTask != nil { + needConvert = true + break + } + } + + if !needConvert { + return nil + } + + if len(opts.Job.pythonParams) == 0 { + return nil + } + + if opts.Job.notebookParams == nil { + opts.Job.notebookParams = make(map[string]string) + } + + if len(opts.Job.pythonParams) > 0 { + if _, ok := opts.Job.notebookParams["__python_params"]; ok { + return fmt.Errorf("can't use __python_params as notebook param, the name is reserved for internal use") + } + p, err := json.Marshal(opts.Job.pythonParams) + if err != nil { + return err + } + opts.Job.notebookParams["__python_params"] = string(p) + } + + return nil +} diff --git a/bundle/run/job_test.go b/bundle/run/job_test.go new file mode 100644 index 00000000..e4cb4e7e --- /dev/null +++ b/bundle/run/job_test.go @@ -0,0 +1,49 @@ +package run + +import ( + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestConvertPythonParams(t *testing.T) { + job := &resources.Job{ + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + {PythonWheelTask: &jobs.PythonWheelTask{ + PackageName: "my_test_code", + EntryPoint: "run", + }}, + }, + }, + } + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test_job": job, + }, + }, + }, + } + runner := jobRunner{key: "test", bundle: b, job: job} + + opts := &Options{ + Job: JobOptions{}, + } + runner.convertPythonParams(opts) + require.NotContains(t, opts.Job.notebookParams, "__python_params") + + opts = &Options{ + Job: JobOptions{ + pythonParams: []string{"param1", "param2", "param3"}, + }, + } + runner.convertPythonParams(opts) + require.Contains(t, opts.Job.notebookParams, "__python_params") + require.Equal(t, opts.Job.notebookParams["__python_params"], `["param1","param2","param3"]`) +} diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 3fd4eabc..681edc2d 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -62,6 +62,18 @@ func runResource(t *testing.T, path string, key string) (string, error) { return stdout.String(), err } +func runResourceWithParams(t *testing.T, path string, key string, params ...string) (string, error) { + ctx := context.Background() + ctx = cmdio.NewContext(ctx, cmdio.Default()) + + args := make([]string, 0) + args = append(args, "bundle", "run", key) + args = append(args, params...) + c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + stdout, _, err := c.Run() + return stdout.String(), err +} + func destroyBundle(t *testing.T, path string) error { t.Setenv("BUNDLE_ROOT", path) c := internal.NewCobraTestRunner(t, "bundle", "destroy", "--auto-approve") diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index bfc2d8b2..c94ed93a 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -41,6 +41,12 @@ func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bo require.Contains(t, out, "Hello from my func") require.Contains(t, out, "Got arguments:") require.Contains(t, out, "['my_test_code', 'one', 'two']") + + out, err = runResourceWithParams(t, bundleRoot, "some_other_job", "--python-params=param1,param2") + require.NoError(t, err) + require.Contains(t, out, "Hello from my func") + require.Contains(t, out, "Got arguments:") + require.Contains(t, out, "['my_test_code', 'param1', 'param2']") } func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { From 60a8abdcd7882e63ad6947401651e52bc636a52c Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 1 Dec 2023 13:17:04 +0100 Subject: [PATCH 274/310] Rewrite the friendly log handler (#1038) ## Changes It wasn't working because it deferred to the regular `slog.TextHandler` for the `WithAttr` and `WithGroup` functions. Both of these functions don't mutate the handler but return a new one. When the top-level logger called one of these, log records in that context used the standard handler instead of ours. To implement tracking of attributes and groups, I followed the guide at https://github.com/golang/example/blob/master/slog-handler-guide/README.md for writing custom handlers. ## Tests The new tests demonstrate formatting through `t.Log` and look good. --- cmd/root/logger.go | 60 +------- libs/log/handler/colors.go | 56 +++++++ libs/log/handler/colors_test.go | 31 ++++ libs/log/handler/friendly.go | 248 ++++++++++++++++++++++++++++++ libs/log/handler/friendly_test.go | 110 +++++++++++++ libs/log/handler/options.go | 15 ++ 6 files changed, 466 insertions(+), 54 deletions(-) create mode 100644 libs/log/handler/colors.go create mode 100644 libs/log/handler/colors_test.go create mode 100644 libs/log/handler/friendly.go create mode 100644 libs/log/handler/friendly_test.go create mode 100644 libs/log/handler/options.go diff --git a/cmd/root/logger.go b/cmd/root/logger.go index be342a7a..494b28fc 100644 --- a/cmd/root/logger.go +++ b/cmd/root/logger.go @@ -3,7 +3,6 @@ package root import ( "context" "fmt" - "io" "log/slog" "os" @@ -11,7 +10,7 @@ import ( "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/log" - "github.com/fatih/color" + "github.com/databricks/cli/libs/log/handler" "github.com/spf13/cobra" ) @@ -21,54 +20,6 @@ const ( envLogFormat = "DATABRICKS_LOG_FORMAT" ) -type friendlyHandler struct { - slog.Handler - w io.Writer -} - -var ( - levelTrace = color.New(color.FgYellow).Sprint("TRACE") - levelDebug = color.New(color.FgYellow).Sprint("DEBUG") - levelInfo = color.New(color.FgGreen).Sprintf("%5s", "INFO") - levelWarn = color.New(color.FgMagenta).Sprintf("%5s", "WARN") - levelError = color.New(color.FgRed).Sprint("ERROR") -) - -func (l *friendlyHandler) coloredLevel(rec slog.Record) string { - switch rec.Level { - case log.LevelTrace: - return levelTrace - case slog.LevelDebug: - return levelDebug - case slog.LevelInfo: - return levelInfo - case slog.LevelWarn: - return levelWarn - case log.LevelError: - return levelError - } - return "" -} - -func (l *friendlyHandler) Handle(ctx context.Context, rec slog.Record) error { - t := fmt.Sprintf("%02d:%02d", rec.Time.Hour(), rec.Time.Minute()) - attrs := "" - rec.Attrs(func(a slog.Attr) bool { - attrs += fmt.Sprintf(" %s%s%s", - color.CyanString(a.Key), - color.CyanString("="), - color.YellowString(a.Value.String())) - return true - }) - msg := fmt.Sprintf("%s %s %s%s\n", - color.MagentaString(t), - l.coloredLevel(rec), - rec.Message, - attrs) - _, err := l.w.Write([]byte(msg)) - return err -} - type logFlags struct { file flags.LogFileFlag level flags.LogLevelFlag @@ -83,10 +34,11 @@ func (f *logFlags) makeLogHandler(opts slog.HandlerOptions) (slog.Handler, error case flags.OutputText: w := f.file.Writer() if cmdio.IsTTY(w) { - return &friendlyHandler{ - Handler: slog.NewTextHandler(w, &opts), - w: w, - }, nil + return handler.NewFriendlyHandler(w, &handler.Options{ + Color: true, + Level: opts.Level, + ReplaceAttr: opts.ReplaceAttr, + }), nil } return slog.NewTextHandler(w, &opts), nil diff --git a/libs/log/handler/colors.go b/libs/log/handler/colors.go new file mode 100644 index 00000000..a1b8e849 --- /dev/null +++ b/libs/log/handler/colors.go @@ -0,0 +1,56 @@ +package handler + +import "github.com/fatih/color" + +// ttyColors is a slice of colors that can be enabled or disabled. +// This adds one level of indirection to the colors such that they +// can be easily be enabled or disabled together, regardless of +// global settings in the color package. +type ttyColors []*color.Color + +// ttyColor is an enum for the colors in ttyColors. +type ttyColor int + +const ( + ttyColorInvalid ttyColor = iota + ttyColorTime + ttyColorMessage + ttyColorAttrKey + ttyColorAttrSeparator + ttyColorAttrValue + ttyColorLevelTrace + ttyColorLevelDebug + ttyColorLevelInfo + ttyColorLevelWarn + ttyColorLevelError + + // Marker for the last value to know how many colors there are. + ttyColorLevelLast +) + +func newColors(enable bool) ttyColors { + ttyColors := make(ttyColors, ttyColorLevelLast) + ttyColors[ttyColorInvalid] = color.New(color.FgWhite) + ttyColors[ttyColorTime] = color.New(color.FgBlack, color.Bold) + ttyColors[ttyColorMessage] = color.New(color.Reset) + ttyColors[ttyColorAttrKey] = color.New(color.Faint) + ttyColors[ttyColorAttrSeparator] = color.New(color.Faint) + ttyColors[ttyColorAttrValue] = color.New(color.Reset) + ttyColors[ttyColorLevelTrace] = color.New(color.FgMagenta) + ttyColors[ttyColorLevelDebug] = color.New(color.FgCyan) + ttyColors[ttyColorLevelInfo] = color.New(color.FgBlue) + ttyColors[ttyColorLevelWarn] = color.New(color.FgYellow) + ttyColors[ttyColorLevelError] = color.New(color.FgRed) + + if enable { + for _, color := range ttyColors { + color.EnableColor() + } + } else { + for _, color := range ttyColors { + color.DisableColor() + } + } + + return ttyColors +} diff --git a/libs/log/handler/colors_test.go b/libs/log/handler/colors_test.go new file mode 100644 index 00000000..aa042fb0 --- /dev/null +++ b/libs/log/handler/colors_test.go @@ -0,0 +1,31 @@ +package handler + +import ( + "fmt" + "testing" +) + +func showColors(t *testing.T, colors ttyColors) { + t.Log(colors[ttyColorInvalid].Sprint("invalid")) + t.Log(colors[ttyColorTime].Sprint("time")) + t.Log( + fmt.Sprint( + colors[ttyColorAttrKey].Sprint("key"), + colors[ttyColorAttrSeparator].Sprint("="), + colors[ttyColorAttrValue].Sprint("value"), + ), + ) + t.Log(colors[ttyColorLevelTrace].Sprint("trace")) + t.Log(colors[ttyColorLevelDebug].Sprint("debug")) + t.Log(colors[ttyColorLevelInfo].Sprint("info")) + t.Log(colors[ttyColorLevelWarn].Sprint("warn")) + t.Log(colors[ttyColorLevelError].Sprint("error")) +} + +func TestTTYColorsEnabled(t *testing.T) { + showColors(t, newColors(true)) +} + +func TestTTYColorsDisabled(t *testing.T) { + showColors(t, newColors(false)) +} diff --git a/libs/log/handler/friendly.go b/libs/log/handler/friendly.go new file mode 100644 index 00000000..33b88a9e --- /dev/null +++ b/libs/log/handler/friendly.go @@ -0,0 +1,248 @@ +package handler + +import ( + "context" + "fmt" + "io" + "log/slog" + "strings" + "sync" + "time" + + "github.com/databricks/cli/libs/log" +) + +// friendlyHandler implements a custom [slog.Handler] that writes +// human readable (and colorized) log lines to a terminal. +// +// The implementation is based on the guide at: +// https://github.com/golang/example/blob/master/slog-handler-guide/README.md +type friendlyHandler struct { + opts Options + goas []groupOrAttrs + mu *sync.Mutex + out io.Writer + + // List of colors to use for formatting. + ttyColors + + // Cache (colorized) level strings. + levelTrace string + levelDebug string + levelInfo string + levelWarn string + levelError string +} + +// groupOrAttrs holds either a group name or a list of slog.Attrs. +type groupOrAttrs struct { + group string // group name if non-empty + attrs []slog.Attr // attrs if non-empty +} + +func NewFriendlyHandler(out io.Writer, opts *Options) slog.Handler { + h := &friendlyHandler{out: out, mu: &sync.Mutex{}} + if opts != nil { + h.opts = *opts + } + if h.opts.Level == nil { + h.opts.Level = slog.LevelInfo + } + + h.ttyColors = newColors(opts.Color) + + // Cache (colorized) level strings. + // The colors to use for each level are configured in `colors.go`. + h.levelTrace = h.sprintf(ttyColorLevelTrace, "%5s", "TRACE") + h.levelDebug = h.sprintf(ttyColorLevelDebug, "%5s", "DEBUG") + h.levelInfo = h.sprintf(ttyColorLevelInfo, "%5s", "INFO") + h.levelWarn = h.sprintf(ttyColorLevelWarn, "%5s", "WARN") + h.levelError = h.sprintf(ttyColorLevelError, "%5s", "ERROR") + return h +} + +func (h *friendlyHandler) sprint(color ttyColor, args ...any) string { + return h.ttyColors[color].Sprint(args...) +} + +func (h *friendlyHandler) sprintf(color ttyColor, format string, args ...any) string { + return h.ttyColors[color].Sprintf(format, args...) +} + +func (h *friendlyHandler) coloredLevel(r slog.Record) string { + switch r.Level { + case log.LevelTrace: + return h.levelTrace + case log.LevelDebug: + return h.levelDebug + case log.LevelInfo: + return h.levelInfo + case log.LevelWarn: + return h.levelWarn + case log.LevelError: + return h.levelError + } + return "" +} + +// Enabled implements slog.Handler. +func (h *friendlyHandler) Enabled(ctx context.Context, level slog.Level) bool { + return level >= h.opts.Level.Level() +} + +type handleState struct { + h *friendlyHandler + + buf []byte + prefix string + + // Keep stack of groups to pass to [slog.ReplaceAttr] function. + groups []string +} + +func (h *friendlyHandler) handleState() *handleState { + return &handleState{ + h: h, + + buf: make([]byte, 0, 1024), + prefix: "", + } +} + +func (s *handleState) openGroup(name string) { + s.groups = append(s.groups, name) + s.prefix += name + "." +} + +func (s *handleState) closeGroup(name string) { + s.prefix = s.prefix[:len(s.prefix)-len(name)-1] + s.groups = s.groups[:len(s.groups)-1] +} + +func (s *handleState) append(args ...any) { + s.buf = fmt.Append(s.buf, args...) +} + +func (s *handleState) appendf(format string, args ...any) { + s.buf = fmt.Appendf(s.buf, format, args...) +} + +func (s *handleState) appendAttr(a slog.Attr) { + if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != slog.KindGroup { + // Resolve before calling ReplaceAttr, so the user doesn't have to. + a.Value = a.Value.Resolve() + a = rep(s.groups, a) + } + + // Resolve the Attr's value before doing anything else. + a.Value = a.Value.Resolve() + + // Ignore empty Attrs. + if a.Equal(slog.Attr{}) { + return + } + + switch a.Value.Kind() { + case slog.KindGroup: + attrs := a.Value.Group() + // Output only non-empty groups. + if len(attrs) > 0 { + if a.Key != "" { + s.openGroup(a.Key) + } + for _, aa := range attrs { + s.appendAttr(aa) + } + if a.Key != "" { + s.closeGroup(a.Key) + } + } + case slog.KindTime: + s.append( + " ", + s.h.sprint(ttyColorAttrKey, s.prefix, a.Key), + s.h.sprint(ttyColorAttrSeparator, "="), + s.h.sprint(ttyColorAttrValue, a.Value.Time().Format(time.RFC3339Nano)), + ) + default: + str := a.Value.String() + format := "%s" + + // Quote values wih spaces, to make them easy to parse. + if strings.ContainsAny(str, " \t\n") { + format = "%q" + } + + s.append( + " ", + s.h.sprint(ttyColorAttrKey, s.prefix, a.Key), + s.h.sprint(ttyColorAttrSeparator, "="), + s.h.sprint(ttyColorAttrValue, fmt.Sprintf(format, str)), + ) + } +} + +// Handle implements slog.Handler. +func (h *friendlyHandler) Handle(ctx context.Context, r slog.Record) error { + state := h.handleState() + state.append(h.sprintf(ttyColorTime, "%02d:%02d:%02d ", r.Time.Hour(), r.Time.Minute(), r.Time.Second())) + state.appendf("%s ", h.coloredLevel(r)) + state.append(h.sprint(ttyColorMessage, r.Message)) + + // Handle state from WithGroup and WithAttrs. + goas := h.goas + if r.NumAttrs() == 0 { + // If the record has no Attrs, remove groups at the end of the list; they are empty. + for len(goas) > 0 && goas[len(goas)-1].group != "" { + goas = goas[:len(goas)-1] + } + } + for _, goa := range goas { + if goa.group != "" { + state.openGroup(goa.group) + } else { + for _, a := range goa.attrs { + state.appendAttr(a) + } + } + } + + // Add attributes from the record. + r.Attrs(func(a slog.Attr) bool { + state.appendAttr(a) + return true + }) + + // Add newline. + state.append("\n") + + // Write the log line. + h.mu.Lock() + defer h.mu.Unlock() + _, err := h.out.Write(state.buf) + return err +} + +func (h *friendlyHandler) withGroupOrAttrs(goa groupOrAttrs) *friendlyHandler { + h2 := *h + h2.goas = make([]groupOrAttrs, len(h.goas)+1) + copy(h2.goas, h.goas) + h2.goas[len(h2.goas)-1] = goa + return &h2 +} + +// WithGroup implements slog.Handler. +func (h *friendlyHandler) WithGroup(name string) slog.Handler { + if name == "" { + return h + } + return h.withGroupOrAttrs(groupOrAttrs{group: name}) +} + +// WithAttrs implements slog.Handler. +func (h *friendlyHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if len(attrs) == 0 { + return h + } + return h.withGroupOrAttrs(groupOrAttrs{attrs: attrs}) +} diff --git a/libs/log/handler/friendly_test.go b/libs/log/handler/friendly_test.go new file mode 100644 index 00000000..ca6e823e --- /dev/null +++ b/libs/log/handler/friendly_test.go @@ -0,0 +1,110 @@ +package handler + +import ( + "bytes" + "context" + "log/slog" + "strings" + "testing" + "time" + + "github.com/databricks/cli/libs/log" +) + +func TestFriendlyHandler(t *testing.T) { + var out bytes.Buffer + + handler := NewFriendlyHandler(&out, &Options{ + Color: true, + Level: log.LevelTrace, + }) + + logger := slog.New(handler) + + // Helper function to run a test case and print the output. + run := func(fn func()) { + out.Reset() + fn() + t.Log(strings.TrimSpace(out.String())) + } + + // One line per level. + for _, level := range []slog.Level{ + log.LevelTrace, + log.LevelDebug, + log.LevelInfo, + log.LevelWarn, + log.LevelError, + } { + run(func() { + logger.Log(context.Background(), level, "simple message") + }) + } + + // Single key/value pair. + run(func() { + logger.Info("simple message", "key", "value") + }) + + // Multiple key/value pairs. + run(func() { + logger.Info("simple message", "key1", "value", "key2", "value") + }) + + // Multiple key/value pairs with duplicate keys. + run(func() { + logger.Info("simple message", "key", "value", "key", "value") + }) + + // Log message with time. + run(func() { + logger.Info("simple message", "time", time.Now()) + }) + + // Log message with grouped key/value pairs. + run(func() { + logger.Info("simple message", slog.Group("group", slog.String("key", "value"))) + }) + + // Add key/value pairs to logger. + run(func() { + logger.With("logger_key", "value").Info("simple message") + }) + + // Add group to logger. + run(func() { + logger.WithGroup("logger_group").Info("simple message", "key", "value") + }) + + // Add group and key/value pairs to logger. + run(func() { + logger.WithGroup("logger_group").With("logger_key", "value").Info("simple message") + }) +} + +func TestFriendlyHandlerReplaceAttr(t *testing.T) { + var out bytes.Buffer + + handler := NewFriendlyHandler(&out, &Options{ + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == "key" { + a.Key = "replaced" + } + return a + }, + }) + + logger := slog.New(handler) + + // Helper function to run a test case and print the output. + run := func(fn func()) { + out.Reset() + fn() + t.Log(strings.TrimSpace(out.String())) + } + + // ReplaceAttr replaces attributes. + run(func() { + logger.Info("simple message", "key", "value") + }) +} diff --git a/libs/log/handler/options.go b/libs/log/handler/options.go new file mode 100644 index 00000000..0b8cfbe2 --- /dev/null +++ b/libs/log/handler/options.go @@ -0,0 +1,15 @@ +package handler + +import "log/slog" + +type Options struct { + // Color enables colorized output. + Color bool + + // Level is the minimum enabled logging level. + Level slog.Leveler + + // ReplaceAttr is a function that can be used to replace attributes. + // Semantics are identical to [slog.ReplaceAttr]. + ReplaceAttr func(groups []string, a slog.Attr) slog.Attr +} From cdf29da27bcdbc1cc68c5c23f219f9397ff65d03 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 1 Dec 2023 14:24:55 +0100 Subject: [PATCH 275/310] Change default_python template to auto-update version on each wheel build (#1034) ## Changes Change default_python template to auto-update version on each wheel build --- .../default-python/template/{{.project_name}}/setup.py.tmpl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl index 4eb6b8f9..a0852c72 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/setup.py.tmpl @@ -10,11 +10,14 @@ from setuptools import setup, find_packages import sys sys.path.append('./src') +import datetime import {{.project_name}} setup( name="{{.project_name}}", - version={{.project_name}}.__version__, + # We use timestamp as Local version identifier (https://peps.python.org/pep-0440/#local-version-identifiers.) + # to ensure that changes to wheel package are picked up when used on all-purpose clusters + version={{.project_name}}.__version__ + "+" + datetime.datetime.utcnow().strftime("%Y%m%d.%H%M%S"), url="https://databricks.com", author="{{user_name}}", description="wheel file based on {{.project_name}}/src", From 66e923261dc177866d294c4ac115fc700c80ba15 Mon Sep 17 00:00:00 2001 From: Fabian Jakobs Date: Mon, 4 Dec 2023 15:40:52 +0000 Subject: [PATCH 276/310] Ask for host when .databrickscfg doesn't exist (#1041) ## Changes Ask for host when .databrickscfg doesn't exist This fixes a regression introduced by https://github.com/databricks/cli/pull/1003 --- cmd/auth/login.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/auth/login.go b/cmd/auth/login.go index bbc88c12..b0bc7a85 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -133,7 +133,7 @@ func setHost(ctx context.Context, profileName string, persistentAuth *auth.Persi return p.Name == profileName }) // Tolerate ErrNoConfiguration here, as we will write out a configuration as part of the login flow. - if !errors.Is(err, databrickscfg.ErrNoConfiguration) { + if err != nil && !errors.Is(err, databrickscfg.ErrNoConfiguration) { return err } if persistentAuth.Host == "" { From e9ed828119d867c176b2f6bea8c3488239431db8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:36:14 +0100 Subject: [PATCH 277/310] Bump github.com/databricks/databricks-sdk-go from 0.26.0 to 0.26.1 (#1040) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.26.0 to 0.26.1.
Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

0.26.1

Minor changes:

  • Support overriding DatabricksEnvironment (#723).
  • Detect Accept header in httpclient.WithResponseUnmarshal (#710).
  • Detect Content-Type header in newRequestBody for httpclient (#711).

Bug fixes:

  • Retry request on REQUEST_LIMIT_EXCEEDED error returned by the SCIM API (#721).
  • Match retry logic of pre-refactor SDK (#722).
Commits
  • e86cbfd Release v0.26.1 (#725)
  • 89952ab Wrap url.Error in APIError (#722)
  • 6f60032 Detect Content-Type header in newRequestBody for httpclient (#711)
  • 9527c7e Detect Accept header in httpclient.WithResponseUnmarshal (#710)
  • 6fd1ca7 Support overriding DatabricksEnvironment (#723)
  • 379b6e9 Retry request on REQUEST_LIMIT_EXCEEDED error returned by the SCIM API (#721)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.26.0&new-version=0.26.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8d09c3df..099c94aa 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.26.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.26.1 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.4.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 4b4ade21..7ce8ed2a 100644 --- a/go.sum +++ b/go.sum @@ -31,8 +31,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.26.0 h1:RItNgdWm+5kWYSzgtflWFp5T+OvIEVNxPnPbPYsXaaY= -github.com/databricks/databricks-sdk-go v0.26.0/go.mod h1:cyFYsqaDiIdaKPdNAuh+YsMUL1k9Lt02JB/72+zgCxg= +github.com/databricks/databricks-sdk-go v0.26.1 h1:Wumg1H1K7Y3bNSRWERLE+9+BbCGljZAEwv/xc+xhT6s= +github.com/databricks/databricks-sdk-go v0.26.1/go.mod h1:cyFYsqaDiIdaKPdNAuh+YsMUL1k9Lt02JB/72+zgCxg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From a6752a538882ff15338a37bf76d346b87134f12d Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 6 Dec 2023 13:42:17 +0530 Subject: [PATCH 278/310] Add list of supported values for flags that represent an enum field (#1036) ## Changes This PR adds the list of supported values for flags that represent an enum field in the flag's documentation. --- .codegen/service.go.tmpl | 9 +++- cmd/account/groups/groups.go | 2 +- cmd/account/log-delivery/log-delivery.go | 2 +- cmd/account/private-access/private-access.go | 4 +- .../service-principals/service-principals.go | 2 +- cmd/account/users/users.go | 4 +- cmd/account/workspaces/workspaces.go | 9 +++- cmd/workspace/catalogs/catalogs.go | 4 +- .../cluster-policies/cluster-policies.go | 4 +- cmd/workspace/clusters/clusters.go | 44 ++++++++++++++++--- cmd/workspace/dashboards/dashboards.go | 2 +- cmd/workspace/experiments/experiments.go | 6 +-- cmd/workspace/groups/groups.go | 2 +- cmd/workspace/jobs/jobs.go | 4 +- cmd/workspace/metastores/metastores.go | 2 +- .../model-registry/model-registry.go | 19 ++++++-- cmd/workspace/pipelines/pipelines.go | 9 +++- cmd/workspace/schemas/schemas.go | 2 +- cmd/workspace/secrets/secrets.go | 2 +- .../service-principals/service-principals.go | 2 +- cmd/workspace/users/users.go | 4 +- cmd/workspace/warehouses/warehouses.go | 10 ++--- cmd/workspace/workspace/workspace.go | 20 +++++++-- 23 files changed, 123 insertions(+), 45 deletions(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index b5916cbe..fb2332d5 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -26,6 +26,13 @@ import ( {{skipThisFile}} {{end}} +{{define "printArray" -}} +{{if le (len .) 5 -}} + [{{range $index, $element := .}}{{if ne $index 0}}, {{end}}{{$element.Name}}{{end}}] +{{- else -}}[{{range $index, $element := .}} + {{$element.Name}},{{end}} +]{{end}}{{end}} + {{define "service"}} // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. @@ -106,7 +113,7 @@ func new{{.PascalName}}() *cobra.Command { {{else if .Entity.ArrayValue }}// TODO: array: {{.Name}} {{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}} {{else if .Entity.IsEmpty }}// TODO: output-only field - {{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`"}}`) + {{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`) {{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`) {{end}} {{- end -}} diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index 42333c18..ed1fa164 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -293,7 +293,7 @@ func newList() *cobra.Command { cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) - cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results. Supported values: [ascending, descending]`) cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index 782d71ac..1846e0fd 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -281,7 +281,7 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.CredentialsId, "credentials-id", listReq.CredentialsId, `Filter by credential configuration ID.`) - cmd.Flags().Var(&listReq.Status, "status", `Filter by status ENABLED or DISABLED.`) + cmd.Flags().Var(&listReq.Status, "status", `Filter by status ENABLED or DISABLED. Supported values: [DISABLED, ENABLED]`) cmd.Flags().StringVar(&listReq.StorageConfigurationId, "storage-configuration-id", listReq.StorageConfigurationId, `Filter by storage configuration ID.`) cmd.Use = "list" diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index 2f3a1a7e..8470415c 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -54,7 +54,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: allowed_vpc_endpoint_ids - cmd.Flags().Var(&createReq.PrivateAccessLevel, "private-access-level", `The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object.`) + cmd.Flags().Var(&createReq.PrivateAccessLevel, "private-access-level", `The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. Supported values: [ACCOUNT, ENDPOINT]`) cmd.Flags().BoolVar(&createReq.PublicAccessEnabled, "public-access-enabled", createReq.PublicAccessEnabled, `Determines if the workspace can be accessed over public internet.`) cmd.Use = "create PRIVATE_ACCESS_SETTINGS_NAME REGION" @@ -373,7 +373,7 @@ func newReplace() *cobra.Command { cmd.Flags().Var(&replaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: allowed_vpc_endpoint_ids - cmd.Flags().Var(&replaceReq.PrivateAccessLevel, "private-access-level", `The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object.`) + cmd.Flags().Var(&replaceReq.PrivateAccessLevel, "private-access-level", `The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. Supported values: [ACCOUNT, ENDPOINT]`) cmd.Flags().BoolVar(&replaceReq.PublicAccessEnabled, "public-access-enabled", replaceReq.PublicAccessEnabled, `Determines if the workspace can be accessed over public internet.`) cmd.Use = "replace PRIVATE_ACCESS_SETTINGS_ID PRIVATE_ACCESS_SETTINGS_NAME REGION" diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index 79098217..80f1bf46 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -292,7 +292,7 @@ func newList() *cobra.Command { cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) - cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results. Supported values: [ascending, descending]`) cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 294aba1c..551766e8 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -224,7 +224,7 @@ func newGet() *cobra.Command { cmd.Flags().StringVar(&getReq.ExcludedAttributes, "excluded-attributes", getReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&getReq.Filter, "filter", getReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&getReq.SortBy, "sort-by", getReq.SortBy, `Attribute to sort the results.`) - cmd.Flags().Var(&getReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().Var(&getReq.SortOrder, "sort-order", `The order to sort the results. Supported values: [ascending, descending]`) cmd.Flags().IntVar(&getReq.StartIndex, "start-index", getReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "get ID" @@ -308,7 +308,7 @@ func newList() *cobra.Command { cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) - cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results. Supported values: [ascending, descending]`) cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 332f5262..500a7b77 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -77,7 +77,14 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account.`) cmd.Flags().StringVar(&createReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", createReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, ``) - cmd.Flags().Var(&createReq.PricingTier, "pricing-tier", `The pricing tier of the workspace.`) + cmd.Flags().Var(&createReq.PricingTier, "pricing-tier", `The pricing tier of the workspace. Supported values: [ + COMMUNITY_EDITION, + DEDICATED, + ENTERPRISE, + PREMIUM, + STANDARD, + UNKNOWN, +]`) cmd.Flags().StringVar(&createReq.PrivateAccessSettingsId, "private-access-settings-id", createReq.PrivateAccessSettingsId, `ID of the workspace's private access settings object.`) cmd.Flags().StringVar(&createReq.StorageConfigurationId, "storage-configuration-id", createReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`) cmd.Flags().StringVar(&createReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", createReq.StorageCustomerManagedKeyId, `The ID of the workspace's storage encryption key configuration object.`) diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 58e85bda..f66934da 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -336,8 +336,8 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) - cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it.`) - cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces.`) + cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATED, OPEN]`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of catalog.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of catalog.`) // TODO: map via StringToStringVar: properties diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index a5464479..18f15570 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -570,8 +570,8 @@ func newList() *cobra.Command { // TODO: short flags - cmd.Flags().Var(&listReq.SortColumn, "sort-column", `The cluster policy attribute to sort by.`) - cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order in which the policies get listed.`) + cmd.Flags().Var(&listReq.SortColumn, "sort-column", `The cluster policy attribute to sort by. Supported values: [POLICY_CREATION_TIME, POLICY_NAME]`) + cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order in which the policies get listed. Supported values: [ASC, DESC]`) cmd.Use = "list" cmd.Short = `List cluster policies.` diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index 627e2275..76f15270 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -178,9 +178,24 @@ func newCreate() *cobra.Command { // TODO: complex arg: azure_attributes // TODO: complex arg: cluster_log_conf cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`) - cmd.Flags().Var(&createReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.`) + cmd.Flags().Var(&createReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [ + API, + JOB, + MODELS, + PIPELINE, + PIPELINE_MAINTENANCE, + SQL, + UI, +]`) // TODO: map via StringToStringVar: custom_tags - cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster.`) + cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + LEGACY_PASSTHROUGH, + LEGACY_SINGLE_USER, + LEGACY_TABLE_ACL, + NONE, + SINGLE_USER, + USER_ISOLATION, +]`) // TODO: complex arg: docker_image cmd.Flags().StringVar(&createReq.DriverInstancePoolId, "driver-instance-pool-id", createReq.DriverInstancePoolId, `The optional ID of the instance pool for the driver of the cluster belongs.`) cmd.Flags().StringVar(&createReq.DriverNodeTypeId, "driver-node-type-id", createReq.DriverNodeTypeId, `The node type of the Spark driver.`) @@ -192,7 +207,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.NodeTypeId, "node-type-id", createReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`) cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`) cmd.Flags().StringVar(&createReq.PolicyId, "policy-id", createReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`) - cmd.Flags().Var(&createReq.RuntimeEngine, "runtime-engine", `Decides which runtime engine to be use, e.g.`) + cmd.Flags().Var(&createReq.RuntimeEngine, "runtime-engine", `Decides which runtime engine to be use, e.g. Supported values: [NULL, PHOTON, STANDARD]`) cmd.Flags().StringVar(&createReq.SingleUserName, "single-user-name", createReq.SingleUserName, `Single user name if data_security_mode is SINGLE_USER.`) // TODO: map via StringToStringVar: spark_conf // TODO: map via StringToStringVar: spark_env_vars @@ -418,9 +433,24 @@ func newEdit() *cobra.Command { // TODO: complex arg: azure_attributes // TODO: complex arg: cluster_log_conf cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`) - cmd.Flags().Var(&editReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.`) + cmd.Flags().Var(&editReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [ + API, + JOB, + MODELS, + PIPELINE, + PIPELINE_MAINTENANCE, + SQL, + UI, +]`) // TODO: map via StringToStringVar: custom_tags - cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster.`) + cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + LEGACY_PASSTHROUGH, + LEGACY_SINGLE_USER, + LEGACY_TABLE_ACL, + NONE, + SINGLE_USER, + USER_ISOLATION, +]`) // TODO: complex arg: docker_image cmd.Flags().StringVar(&editReq.DriverInstancePoolId, "driver-instance-pool-id", editReq.DriverInstancePoolId, `The optional ID of the instance pool for the driver of the cluster belongs.`) cmd.Flags().StringVar(&editReq.DriverNodeTypeId, "driver-node-type-id", editReq.DriverNodeTypeId, `The node type of the Spark driver.`) @@ -432,7 +462,7 @@ func newEdit() *cobra.Command { cmd.Flags().StringVar(&editReq.NodeTypeId, "node-type-id", editReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`) cmd.Flags().IntVar(&editReq.NumWorkers, "num-workers", editReq.NumWorkers, `Number of worker nodes that this cluster should have.`) cmd.Flags().StringVar(&editReq.PolicyId, "policy-id", editReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`) - cmd.Flags().Var(&editReq.RuntimeEngine, "runtime-engine", `Decides which runtime engine to be use, e.g.`) + cmd.Flags().Var(&editReq.RuntimeEngine, "runtime-engine", `Decides which runtime engine to be use, e.g. Supported values: [NULL, PHOTON, STANDARD]`) cmd.Flags().StringVar(&editReq.SingleUserName, "single-user-name", editReq.SingleUserName, `Single user name if data_security_mode is SINGLE_USER.`) // TODO: map via StringToStringVar: spark_conf // TODO: map via StringToStringVar: spark_env_vars @@ -553,7 +583,7 @@ func newEvents() *cobra.Command { // TODO: array: event_types cmd.Flags().Int64Var(&eventsReq.Limit, "limit", eventsReq.Limit, `The maximum number of events to include in a page of events.`) cmd.Flags().Int64Var(&eventsReq.Offset, "offset", eventsReq.Offset, `The offset in the result set.`) - cmd.Flags().Var(&eventsReq.Order, "order", `The order to list events in; either "ASC" or "DESC".`) + cmd.Flags().Var(&eventsReq.Order, "order", `The order to list events in; either "ASC" or "DESC". Supported values: [ASC, DESC]`) cmd.Flags().Int64Var(&eventsReq.StartTime, "start-time", eventsReq.StartTime, `The start time in epoch milliseconds.`) cmd.Use = "events CLUSTER_ID" diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index 8823ef53..3346a5e0 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -267,7 +267,7 @@ func newList() *cobra.Command { // TODO: short flags - cmd.Flags().Var(&listReq.Order, "order", `Name of dashboard attribute to order by.`) + cmd.Flags().Var(&listReq.Order, "order", `Name of dashboard attribute to order by. Supported values: [created_at, name]`) cmd.Flags().IntVar(&listReq.Page, "page", listReq.Page, `Page number to retrieve.`) cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Number of dashboards to return per page.`) cmd.Flags().StringVar(&listReq.Q, "q", listReq.Q, `Full text search term.`) diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 3c074620..79828714 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -1811,7 +1811,7 @@ func newSearchExperiments() *cobra.Command { cmd.Flags().Int64Var(&searchExperimentsReq.MaxResults, "max-results", searchExperimentsReq.MaxResults, `Maximum number of experiments desired.`) // TODO: array: order_by cmd.Flags().StringVar(&searchExperimentsReq.PageToken, "page-token", searchExperimentsReq.PageToken, `Token indicating the page of experiments to fetch.`) - cmd.Flags().Var(&searchExperimentsReq.ViewType, "view-type", `Qualifier for type of experiments to be returned.`) + cmd.Flags().Var(&searchExperimentsReq.ViewType, "view-type", `Qualifier for type of experiments to be returned. Supported values: [ACTIVE_ONLY, ALL, DELETED_ONLY]`) cmd.Use = "search-experiments" cmd.Short = `Search experiments.` @@ -1886,7 +1886,7 @@ func newSearchRuns() *cobra.Command { cmd.Flags().IntVar(&searchRunsReq.MaxResults, "max-results", searchRunsReq.MaxResults, `Maximum number of runs desired.`) // TODO: array: order_by cmd.Flags().StringVar(&searchRunsReq.PageToken, "page-token", searchRunsReq.PageToken, `Token for the current page of runs.`) - cmd.Flags().Var(&searchRunsReq.RunViewType, "run-view-type", `Whether to display only active, only deleted, or all runs.`) + cmd.Flags().Var(&searchRunsReq.RunViewType, "run-view-type", `Whether to display only active, only deleted, or all runs. Supported values: [ACTIVE_ONLY, ALL, DELETED_ONLY]`) cmd.Use = "search-runs" cmd.Short = `Search for runs.` @@ -2378,7 +2378,7 @@ func newUpdateRun() *cobra.Command { cmd.Flags().Int64Var(&updateRunReq.EndTime, "end-time", updateRunReq.EndTime, `Unix timestamp in milliseconds of when the run ended.`) cmd.Flags().StringVar(&updateRunReq.RunId, "run-id", updateRunReq.RunId, `ID of the run to update.`) cmd.Flags().StringVar(&updateRunReq.RunUuid, "run-uuid", updateRunReq.RunUuid, `[Deprecated, use run_id instead] ID of the run to update.`) - cmd.Flags().Var(&updateRunReq.Status, "status", `Updated status of the run.`) + cmd.Flags().Var(&updateRunReq.Status, "status", `Updated status of the run. Supported values: [FAILED, FINISHED, KILLED, RUNNING, SCHEDULED]`) cmd.Use = "update-run" cmd.Short = `Update a run.` diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index f2888485..588bce31 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -293,7 +293,7 @@ func newList() *cobra.Command { cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) - cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results. Supported values: [ascending, descending]`) cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 6183c282..c7dfdf2b 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -495,7 +495,7 @@ func newExportRun() *cobra.Command { // TODO: short flags - cmd.Flags().Var(&exportRunReq.ViewsToExport, "views-to-export", `Which views to export (CODE, DASHBOARDS, or ALL).`) + cmd.Flags().Var(&exportRunReq.ViewsToExport, "views-to-export", `Which views to export (CODE, DASHBOARDS, or ALL). Supported values: [ALL, CODE, DASHBOARDS]`) cmd.Use = "export-run RUN_ID" cmd.Short = `Export and retrieve a job run.` @@ -1057,7 +1057,7 @@ func newListRuns() *cobra.Command { cmd.Flags().IntVar(&listRunsReq.Limit, "limit", listRunsReq.Limit, `The number of runs to return.`) cmd.Flags().IntVar(&listRunsReq.Offset, "offset", listRunsReq.Offset, `The offset of the first run to return, relative to the most recent run.`) cmd.Flags().StringVar(&listRunsReq.PageToken, "page-token", listRunsReq.PageToken, `Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of runs respectively.`) - cmd.Flags().Var(&listRunsReq.RunType, "run-type", `The type of runs to return.`) + cmd.Flags().Var(&listRunsReq.RunType, "run-type", `The type of runs to return. Supported values: [JOB_RUN, SUBMIT_RUN, WORKFLOW_RUN]`) cmd.Flags().IntVar(&listRunsReq.StartTimeFrom, "start-time-from", listRunsReq.StartTimeFrom, `Show runs that started _at or after_ this value.`) cmd.Flags().IntVar(&listRunsReq.StartTimeTo, "start-time-to", listRunsReq.StartTimeTo, `Show runs that started _at or before_ this value.`) diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 3ca6fb55..2f5d2195 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -618,7 +618,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.DeltaSharingOrganizationName, "delta-sharing-organization-name", updateReq.DeltaSharingOrganizationName, `The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name.`) cmd.Flags().Int64Var(&updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, "delta-sharing-recipient-token-lifetime-in-seconds", updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, `The lifetime of delta sharing recipient token in seconds.`) - cmd.Flags().Var(&updateReq.DeltaSharingScope, "delta-sharing-scope", `The scope of Delta Sharing enabled for the metastore.`) + cmd.Flags().Var(&updateReq.DeltaSharingScope, "delta-sharing-scope", `The scope of Delta Sharing enabled for the metastore. Supported values: [INTERNAL, INTERNAL_AND_EXTERNAL]`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The user-specified name of the metastore.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the metastore.`) cmd.Flags().StringVar(&updateReq.PrivilegeModelVersion, "privilege-model-version", updateReq.PrivilegeModelVersion, `Privilege model version of the metastore, of the form major.minor (e.g., 1.0).`) diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index 241e885b..fade898e 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -544,7 +544,7 @@ func newCreateWebhook() *cobra.Command { // TODO: complex arg: http_url_spec // TODO: complex arg: job_spec cmd.Flags().StringVar(&createWebhookReq.ModelName, "model-name", createWebhookReq.ModelName, `Name of the model whose events would trigger this webhook.`) - cmd.Flags().Var(&createWebhookReq.Status, "status", `Enable or disable triggering the webhook, or put the webhook into test mode.`) + cmd.Flags().Var(&createWebhookReq.Status, "status", `Enable or disable triggering the webhook, or put the webhook into test mode. Supported values: [ACTIVE, DISABLED, TEST_MODE]`) cmd.Use = "create-webhook" cmd.Short = `Create a webhook.` @@ -2274,7 +2274,20 @@ func newTestRegistryWebhook() *cobra.Command { // TODO: short flags cmd.Flags().Var(&testRegistryWebhookJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().Var(&testRegistryWebhookReq.Event, "event", `If event is specified, the test trigger uses the specified event.`) + cmd.Flags().Var(&testRegistryWebhookReq.Event, "event", `If event is specified, the test trigger uses the specified event. Supported values: [ + COMMENT_CREATED, + MODEL_VERSION_CREATED, + MODEL_VERSION_TAG_SET, + MODEL_VERSION_TRANSITIONED_STAGE, + MODEL_VERSION_TRANSITIONED_TO_ARCHIVED, + MODEL_VERSION_TRANSITIONED_TO_PRODUCTION, + MODEL_VERSION_TRANSITIONED_TO_STAGING, + REGISTERED_MODEL_CREATED, + TRANSITION_REQUEST_CREATED, + TRANSITION_REQUEST_TO_ARCHIVED_CREATED, + TRANSITION_REQUEST_TO_PRODUCTION_CREATED, + TRANSITION_REQUEST_TO_STAGING_CREATED, +]`) cmd.Use = "test-registry-webhook ID" cmd.Short = `Test a webhook.` @@ -2807,7 +2820,7 @@ func newUpdateWebhook() *cobra.Command { // TODO: array: events // TODO: complex arg: http_url_spec // TODO: complex arg: job_spec - cmd.Flags().Var(&updateWebhookReq.Status, "status", `Enable or disable triggering the webhook, or put the webhook into test mode.`) + cmd.Flags().Var(&updateWebhookReq.Status, "status", `Enable or disable triggering the webhook, or put the webhook into test mode. Supported values: [ACTIVE, DISABLED, TEST_MODE]`) cmd.Use = "update-webhook ID" cmd.Short = `Update a webhook.` diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index a2f1868b..d35eb3cd 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -900,7 +900,14 @@ func newStartUpdate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&startUpdateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().Var(&startUpdateReq.Cause, "cause", ``) + cmd.Flags().Var(&startUpdateReq.Cause, "cause", `. Supported values: [ + API_CALL, + JOB_TASK, + RETRY_ON_FAILURE, + SCHEMA_CHANGE, + SERVICE_UPGRADE, + USER_ACTION, +]`) cmd.Flags().BoolVar(&startUpdateReq.FullRefresh, "full-refresh", startUpdateReq.FullRefresh, `If true, this update will reset all tables before running.`) // TODO: array: full_refresh_selection // TODO: array: refresh_selection diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index 59554edc..eefb4b38 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -372,7 +372,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) - cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it.`) + cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of schema, relative to parent catalog.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of schema.`) // TODO: map via StringToStringVar: properties diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index 502f233f..270538b0 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -66,7 +66,7 @@ func newCreateScope() *cobra.Command { // TODO: complex arg: backend_azure_keyvault cmd.Flags().StringVar(&createScopeReq.InitialManagePrincipal, "initial-manage-principal", createScopeReq.InitialManagePrincipal, `The principal that is initially granted MANAGE permission to the created scope.`) - cmd.Flags().Var(&createScopeReq.ScopeBackendType, "scope-backend-type", `The backend type the scope will be created with.`) + cmd.Flags().Var(&createScopeReq.ScopeBackendType, "scope-backend-type", `The backend type the scope will be created with. Supported values: [AZURE_KEYVAULT, DATABRICKS]`) cmd.Use = "create-scope SCOPE" cmd.Short = `Create a new secret scope.` diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index 60762954..5e66804d 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -292,7 +292,7 @@ func newList() *cobra.Command { cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) - cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results. Supported values: [ascending, descending]`) cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index 19afad2b..4cc485e9 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -224,7 +224,7 @@ func newGet() *cobra.Command { cmd.Flags().StringVar(&getReq.ExcludedAttributes, "excluded-attributes", getReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&getReq.Filter, "filter", getReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&getReq.SortBy, "sort-by", getReq.SortBy, `Attribute to sort the results.`) - cmd.Flags().Var(&getReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().Var(&getReq.SortOrder, "sort-order", `The order to sort the results. Supported values: [ascending, descending]`) cmd.Flags().IntVar(&getReq.StartIndex, "start-index", getReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "get ID" @@ -405,7 +405,7 @@ func newList() *cobra.Command { cmd.Flags().StringVar(&listReq.ExcludedAttributes, "excluded-attributes", listReq.ExcludedAttributes, `Comma-separated list of attributes to exclude in response.`) cmd.Flags().StringVar(&listReq.Filter, "filter", listReq.Filter, `Query by which the results have to be filtered.`) cmd.Flags().StringVar(&listReq.SortBy, "sort-by", listReq.SortBy, `Attribute to sort the results.`) - cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results.`) + cmd.Flags().Var(&listReq.SortOrder, "sort-order", `The order to sort the results. Supported values: [ascending, descending]`) cmd.Flags().Int64Var(&listReq.StartIndex, "start-index", listReq.StartIndex, `Specifies the index of the first result.`) cmd.Use = "list" diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index 6133d5ed..c64788b8 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -71,9 +71,9 @@ func newCreate() *cobra.Command { cmd.Flags().IntVar(&createReq.MaxNumClusters, "max-num-clusters", createReq.MaxNumClusters, `Maximum number of clusters that the autoscaler will create to handle concurrent queries.`) cmd.Flags().IntVar(&createReq.MinNumClusters, "min-num-clusters", createReq.MinNumClusters, `Minimum number of available clusters that will be maintained for this SQL warehouse.`) cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Logical name for the cluster.`) - cmd.Flags().Var(&createReq.SpotInstancePolicy, "spot-instance-policy", `Configurations whether the warehouse should use spot instances.`) + cmd.Flags().Var(&createReq.SpotInstancePolicy, "spot-instance-policy", `Configurations whether the warehouse should use spot instances. Supported values: [COST_OPTIMIZED, POLICY_UNSPECIFIED, RELIABILITY_OPTIMIZED]`) // TODO: complex arg: tags - cmd.Flags().Var(&createReq.WarehouseType, "warehouse-type", `Warehouse type: PRO or CLASSIC.`) + cmd.Flags().Var(&createReq.WarehouseType, "warehouse-type", `Warehouse type: PRO or CLASSIC. Supported values: [CLASSIC, PRO, TYPE_UNSPECIFIED]`) cmd.Use = "create" cmd.Short = `Create a warehouse.` @@ -253,9 +253,9 @@ func newEdit() *cobra.Command { cmd.Flags().IntVar(&editReq.MaxNumClusters, "max-num-clusters", editReq.MaxNumClusters, `Maximum number of clusters that the autoscaler will create to handle concurrent queries.`) cmd.Flags().IntVar(&editReq.MinNumClusters, "min-num-clusters", editReq.MinNumClusters, `Minimum number of available clusters that will be maintained for this SQL warehouse.`) cmd.Flags().StringVar(&editReq.Name, "name", editReq.Name, `Logical name for the cluster.`) - cmd.Flags().Var(&editReq.SpotInstancePolicy, "spot-instance-policy", `Configurations whether the warehouse should use spot instances.`) + cmd.Flags().Var(&editReq.SpotInstancePolicy, "spot-instance-policy", `Configurations whether the warehouse should use spot instances. Supported values: [COST_OPTIMIZED, POLICY_UNSPECIFIED, RELIABILITY_OPTIMIZED]`) // TODO: complex arg: tags - cmd.Flags().Var(&editReq.WarehouseType, "warehouse-type", `Warehouse type: PRO or CLASSIC.`) + cmd.Flags().Var(&editReq.WarehouseType, "warehouse-type", `Warehouse type: PRO or CLASSIC. Supported values: [CLASSIC, PRO, TYPE_UNSPECIFIED]`) cmd.Use = "edit ID" cmd.Short = `Update a warehouse.` @@ -798,7 +798,7 @@ func newSetWorkspaceWarehouseConfig() *cobra.Command { // TODO: complex arg: global_param cmd.Flags().StringVar(&setWorkspaceWarehouseConfigReq.GoogleServiceAccount, "google-service-account", setWorkspaceWarehouseConfigReq.GoogleServiceAccount, `GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage.`) cmd.Flags().StringVar(&setWorkspaceWarehouseConfigReq.InstanceProfileArn, "instance-profile-arn", setWorkspaceWarehouseConfigReq.InstanceProfileArn, `AWS Only: Instance profile used to pass IAM role to the cluster.`) - cmd.Flags().Var(&setWorkspaceWarehouseConfigReq.SecurityPolicy, "security-policy", `Security policy for warehouses.`) + cmd.Flags().Var(&setWorkspaceWarehouseConfigReq.SecurityPolicy, "security-policy", `Security policy for warehouses. Supported values: [DATA_ACCESS_CONTROL, NONE, PASSTHROUGH]`) // TODO: complex arg: sql_configuration_parameters cmd.Use = "set-workspace-warehouse-config" diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 8944638e..6b9e9f85 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -149,7 +149,14 @@ func newExport() *cobra.Command { // TODO: short flags - cmd.Flags().Var(&exportReq.Format, "format", `This specifies the format of the exported file.`) + cmd.Flags().Var(&exportReq.Format, "format", `This specifies the format of the exported file. Supported values: [ + AUTO, + DBC, + HTML, + JUPYTER, + R_MARKDOWN, + SOURCE, +]`) cmd.Use = "export PATH" cmd.Short = `Export a workspace object.` @@ -436,8 +443,15 @@ func newImport() *cobra.Command { cmd.Flags().Var(&importJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&importReq.Content, "content", importReq.Content, `The base64-encoded content.`) - cmd.Flags().Var(&importReq.Format, "format", `This specifies the format of the file to be imported.`) - cmd.Flags().Var(&importReq.Language, "language", `The language of the object.`) + cmd.Flags().Var(&importReq.Format, "format", `This specifies the format of the file to be imported. Supported values: [ + AUTO, + DBC, + HTML, + JUPYTER, + R_MARKDOWN, + SOURCE, +]`) + cmd.Flags().Var(&importReq.Language, "language", `The language of the object. Supported values: [PYTHON, R, SCALA, SQL]`) cmd.Flags().BoolVar(&importReq.Overwrite, "overwrite", importReq.Overwrite, `The flag that specifies whether to overwrite existing object.`) cmd.Use = "import PATH" From 6002f49c87dfb27b3f58de5d233203efd4eacf65 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:15:18 +0530 Subject: [PATCH 279/310] Move bundle schema update to an internal module (#1012) ## Changes This PR: 1. Move code to load bundle JSON Schema descriptions from the OpenAPI spec to an internal Go module 2. Remove command line flags from the `bundle schema` command. These flags were meant for internal processes and at no point were meant for customer use. 3. Regenerate `bundle_descriptions.json` 4. Add support for `bundle: "deprecated"`. The `environments` field is tagged as deprecated in this PR and consequently will no longer be a part of the bundle schema. ## Tests Tested by regenerating the CLI against its current OpenAPI spec (as defined in `__openapi_sha`). The `bundle_descriptions.json` in this PR was generated from the code generator. Manually checked that the autocompletion / descriptions from the new bundle schema are correct. --- .codegen.json | 8 +- .gitattributes | 3 + bundle/config/root.go | 2 +- bundle/internal/bundle/schema/main.go | 42 + bundle/schema/README.md | 15 +- bundle/schema/docs.go | 77 +- bundle/schema/docs/bundle_descriptions.json | 4643 +++++++++++-------- bundle/schema/schema.go | 8 +- cmd/bundle/schema.go | 36 +- 9 files changed, 2834 insertions(+), 2000 deletions(-) create mode 100644 bundle/internal/bundle/schema/main.go diff --git a/.codegen.json b/.codegen.json index da4f3dd6..1b29f979 100644 --- a/.codegen.json +++ b/.codegen.json @@ -8,6 +8,12 @@ ".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go" }, "toolchain": { - "required": ["go"] + "required": ["go"], + "post_generate": [ + "go run ./bundle/internal/bundle/schema/main.go ./bundle/schema/docs/bundle_descriptions.json", + "echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes", + "echo 'go.sum linguist-generated=true' >> ./.gitattributes", + "echo 'bundle/schema/docs/bundle_descriptions.json linguist-generated=true' >> ./.gitattributes" + ] } } diff --git a/.gitattributes b/.gitattributes index ddd698a0..f3389320 100755 --- a/.gitattributes +++ b/.gitattributes @@ -83,3 +83,6 @@ cmd/workspace/warehouses/warehouses.go linguist-generated=true cmd/workspace/workspace-bindings/workspace-bindings.go linguist-generated=true cmd/workspace/workspace-conf/workspace-conf.go linguist-generated=true cmd/workspace/workspace/workspace.go linguist-generated=true +bundle/internal/tf/schema/\*.go linguist-generated=true +go.sum linguist-generated=true +bundle/schema/docs/bundle_descriptions.json linguist-generated=true diff --git a/bundle/config/root.go b/bundle/config/root.go index 1fb5773b..32baa1a5 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -48,7 +48,7 @@ type Root struct { Targets map[string]*Target `json:"targets,omitempty"` // DEPRECATED. Left for backward compatibility with Targets - Environments map[string]*Target `json:"environments,omitempty"` + Environments map[string]*Target `json:"environments,omitempty" bundle:"deprecated"` // Sync section specifies options for files synchronization Sync Sync `json:"sync,omitempty"` diff --git a/bundle/internal/bundle/schema/main.go b/bundle/internal/bundle/schema/main.go new file mode 100644 index 00000000..c9cc7cd4 --- /dev/null +++ b/bundle/internal/bundle/schema/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "os" + + "github.com/databricks/cli/bundle/schema" +) + +func main() { + if len(os.Args) != 2 { + fmt.Println("Usage: go run main.go ") + os.Exit(1) + } + + // Output file, to write the generated schema descriptions to. + outputFile := os.Args[1] + + // Input file, the databricks openapi spec. + inputFile := os.Getenv("DATABRICKS_OPENAPI_SPEC") + if inputFile == "" { + log.Fatal("DATABRICKS_OPENAPI_SPEC environment variable not set") + } + + // Generate the schema descriptions. + docs, err := schema.UpdateBundleDescriptions(inputFile) + if err != nil { + log.Fatal(err) + } + result, err := json.MarshalIndent(docs, "", " ") + if err != nil { + log.Fatal(err) + } + + // Write the schema descriptions to the output file. + err = os.WriteFile(outputFile, result, 0644) + if err != nil { + log.Fatal(err) + } +} diff --git a/bundle/schema/README.md b/bundle/schema/README.md index fe6b149c..bf6b87df 100644 --- a/bundle/schema/README.md +++ b/bundle/schema/README.md @@ -13,15 +13,6 @@ These descriptions are rendered in the inline documentation in an IDE ### SOP: Add schema descriptions for new fields in bundle config -1. You can autogenerate empty descriptions for the new fields by running -`databricks bundle schema --only-docs > ~/databricks/bundle/schema/docs/bundle_descriptions.json` -2. Manually edit bundle_descriptions.json to add your descriptions -3. Build again to embed the new `bundle_descriptions.json` into the binary (`go build`) -4. Again run `databricks bundle schema --only-docs > ~/databricks/bundle/schema/docs/bundle_descriptions.json` to copy over any applicable descriptions to `targets` -5. push to repo - - -### SOP: Update descriptions in resources from a newer openapi spec - -1. Run `databricks bundle schema --only-docs --openapi PATH_TO_SPEC > ~/databricks/bundle/schema/docs/bundle_descriptions.json` -2. push to repo +Manually edit bundle_descriptions.json to add your descriptions. Note that the +descriptions in `resources` block is generated from the OpenAPI spec, and thus +any changes there will be overwritten. diff --git a/bundle/schema/docs.go b/bundle/schema/docs.go index 4b2fd36a..fe63e432 100644 --- a/bundle/schema/docs.go +++ b/bundle/schema/docs.go @@ -23,39 +23,6 @@ type Docs struct { //go:embed docs/bundle_descriptions.json var bundleDocs []byte -func BundleDocs(openapiSpecPath string) (*Docs, error) { - docs, err := initializeBundleDocs() - if err != nil { - return nil, err - } - if openapiSpecPath != "" { - openapiSpec, err := os.ReadFile(openapiSpecPath) - if err != nil { - return nil, err - } - spec := &openapi.Specification{} - err = json.Unmarshal(openapiSpec, spec) - if err != nil { - return nil, err - } - openapiReader := &OpenapiReader{ - OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), - } - resourcesDocs, err := openapiReader.ResourcesDocs() - if err != nil { - return nil, err - } - resourceSchema, err := New(reflect.TypeOf(config.Resources{}), resourcesDocs) - if err != nil { - return nil, err - } - docs.Properties["resources"] = schemaToDocs(resourceSchema) - } - docs.refreshTargetsDocs() - return docs, nil -} - func (docs *Docs) refreshTargetsDocs() error { targetsDocs, ok := docs.Properties["targets"] if !ok || targetsDocs.AdditionalProperties == nil || @@ -70,21 +37,53 @@ func (docs *Docs) refreshTargetsDocs() error { return nil } -func initializeBundleDocs() (*Docs, error) { - // load embedded descriptions +func LoadBundleDescriptions() (*Docs, error) { embedded := Docs{} err := json.Unmarshal(bundleDocs, &embedded) + return &embedded, err +} + +func UpdateBundleDescriptions(openapiSpecPath string) (*Docs, error) { + embedded, err := LoadBundleDescriptions() if err != nil { return nil, err } - // generate schema with the embedded descriptions - schema, err := New(reflect.TypeOf(config.Root{}), &embedded) + + // Generate schema from the embedded descriptions, and convert it back to docs. + // This creates empty descriptions for any properties that were missing in the + // embedded descriptions. + schema, err := New(reflect.TypeOf(config.Root{}), embedded) if err != nil { return nil, err } - // converting the schema back to docs. This creates empty descriptions - // for any properties that were missing in the embedded descriptions docs := schemaToDocs(schema) + + // Load the Databricks OpenAPI spec + openapiSpec, err := os.ReadFile(openapiSpecPath) + if err != nil { + return nil, err + } + spec := &openapi.Specification{} + err = json.Unmarshal(openapiSpec, spec) + if err != nil { + return nil, err + } + openapiReader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*jsonschema.Schema), + } + + // Generate descriptions for the "resources" field + resourcesDocs, err := openapiReader.ResourcesDocs() + if err != nil { + return nil, err + } + resourceSchema, err := New(reflect.TypeOf(config.Resources{}), resourcesDocs) + if err != nil { + return nil, err + } + docs.Properties["resources"] = schemaToDocs(resourceSchema) + docs.refreshTargetsDocs() return docs, nil } diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 98f3cf8d..09462fb0 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -6,13 +6,25 @@ "additionalproperties": { "description": "", "properties": { - "notebook": { + "build": { + "description": "" + }, + "files": { "description": "", - "properties": { - "path": { - "description": "" + "items": { + "description": "", + "properties": { + "source": { + "description": "" + } } } + }, + "path": { + "description": "" + }, + "type": { + "description": "" } } } @@ -20,6 +32,9 @@ "bundle": { "description": "The details for this bundle.", "properties": { + "compute_id": { + "description": "" + }, "git": { "description": "", "properties": { @@ -36,1883 +51,16 @@ } } }, - "targets": { + "experimental": { "description": "", - "additionalproperties": { - "description": "", - "properties": { - "artifacts": { - "description": "A description of all code artifacts in this bundle.", - "additionalproperties": { - "description": "", - "properties": { - "notebook": { - "description": "", - "properties": { - "path": { - "description": "" - } - } - } - } - } - }, - "bundle": { - "description": "The details for this bundle.", - "properties": { - "git": { - "description": "", - "properties": { - "branch": { - "description": "" - }, - "origin_url": { - "description": "" - } - } - }, - "name": { - "description": "The name of the bundle." - } - } - }, - "default": { + "properties": { + "python_wheel_wrapper": { + "description": "" + }, + "scripts": { + "description": "", + "additionalproperties": { "description": "" - }, - "resources": { - "description": "Collection of Databricks resources to deploy.", - "properties": { - "experiments": { - "description": "List of MLflow experiments", - "additionalproperties": { - "description": "", - "properties": { - "artifact_location": { - "description": "Location where artifacts for the experiment are stored." - }, - "creation_time": { - "description": "Creation time" - }, - "experiment_id": { - "description": "Unique identifier for the experiment." - }, - "last_update_time": { - "description": "Last update time" - }, - "lifecycle_stage": { - "description": "Current life cycle stage of the experiment: \"active\" or \"deleted\".\nDeleted experiments are not returned by APIs." - }, - "name": { - "description": "Human readable name that identifies the experiment." - }, - "permissions": { - "description": "", - "items": { - "description": "", - "properties": { - "group_name": { - "description": "" - }, - "level": { - "description": "" - }, - "service_principal_name": { - "description": "" - }, - "user_name": { - "description": "" - } - } - } - }, - "tags": { - "description": "Tags: Additional metadata key-value pairs.", - "items": { - "description": "", - "properties": { - "key": { - "description": "The tag key." - }, - "value": { - "description": "The tag value." - } - } - } - } - } - } - }, - "jobs": { - "description": "List of Databricks jobs", - "additionalproperties": { - "description": "", - "properties": { - "compute": { - "description": "A list of compute requirements that can be referenced by tasks of this job.", - "items": { - "description": "", - "properties": { - "compute_key": { - "description": "A unique name for the compute requirement. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine the compute requirements for the task execution." - }, - "spec": { - "description": "", - "properties": { - "kind": { - "description": "The kind of compute described by this compute specification." - } - } - } - } - } - }, - "continuous": { - "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", - "properties": { - "pause_status": { - "description": "Whether this trigger is paused or not." - } - } - }, - "email_notifications": { - "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. The default behavior is to not send any emails.", - "properties": { - "no_alert_for_skipped_runs": { - "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." - }, - "on_failure": { - "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", - "items": { - "description": "" - } - }, - "on_start": { - "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", - "items": { - "description": "" - } - }, - "on_success": { - "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", - "items": { - "description": "" - } - } - } - }, - "format": { - "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`." - }, - "git_source": { - "description": "An optional specification for a remote repository containing the notebooks used by this job's notebook tasks.", - "properties": { - "git_branch": { - "description": "Name of the branch to be checked out and used by this job.\nThis field cannot be specified in conjunction with git_tag or git_commit.\n\nThe maximum length is 255 characters.\n" - }, - "git_commit": { - "description": "Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag.\nThe maximum length is 64 characters." - }, - "git_provider": { - "description": "Unique identifier of the service used to host the Git repository. The value is case insensitive." - }, - "git_snapshot": { - "description": "", - "properties": { - "used_commit": { - "description": "Commit that was used to execute the run. If git_branch was specified, this points to the HEAD of the branch at the time of the run; if git_tag was specified, this points to the commit the tag points to." - } - } - }, - "git_tag": { - "description": "Name of the tag to be checked out and used by this job.\nThis field cannot be specified in conjunction with git_branch or git_commit.\n\nThe maximum length is 255 characters.\n" - }, - "git_url": { - "description": "URL of the repository to be cloned by this job.\nThe maximum length is 300 characters." - } - } - }, - "job_clusters": { - "description": "A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.", - "items": { - "description": "", - "properties": { - "job_cluster_key": { - "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." - }, - "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", - "properties": { - "autoscale": { - "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", - "properties": { - "max_workers": { - "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." - }, - "min_workers": { - "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." - } - } - }, - "autotermination_minutes": { - "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination." - }, - "aws_attributes": { - "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", - "properties": { - "availability": { - "description": "" - }, - "ebs_volume_count": { - "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." - }, - "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" - }, - "ebs_volume_size": { - "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." - }, - "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" - }, - "ebs_volume_type": { - "description": "" - }, - "first_on_demand": { - "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." - }, - "instance_profile_arn": { - "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." - }, - "spot_bid_price_percent": { - "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." - }, - "zone_id": { - "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." - } - } - }, - "azure_attributes": { - "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", - "properties": { - "availability": { - "description": "" - }, - "first_on_demand": { - "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." - }, - "log_analytics_info": { - "description": "Defines values necessary to configure and run Azure Log Analytics agent", - "properties": { - "log_analytics_primary_key": { - "description": "\u003cneeds content added\u003e" - }, - "log_analytics_workspace_id": { - "description": "\u003cneeds content added\u003e" - } - } - }, - "spot_bid_max_price": { - "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." - } - } - }, - "cluster_log_conf": { - "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", - "properties": { - "dbfs": { - "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", - "properties": { - "destination": { - "description": "dbfs destination, e.g. `dbfs:/my/path`" - } - } - }, - "s3": { - "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", - "properties": { - "canned_acl": { - "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." - }, - "destination": { - "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." - }, - "enable_encryption": { - "description": "(Optional) Flag to enable server side encryption, `false` by default." - }, - "encryption_type": { - "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." - }, - "endpoint": { - "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." - }, - "kms_key": { - "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." - }, - "region": { - "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." - } - } - } - } - }, - "cluster_name": { - "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" - }, - "cluster_source": { - "description": "" - }, - "custom_tags": { - "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", - "additionalproperties": { - "description": "" - } - }, - "data_security_mode": { - "description": "" - }, - "docker_image": { - "description": "", - "properties": { - "basic_auth": { - "description": "", - "properties": { - "password": { - "description": "Password of the user" - }, - "username": { - "description": "Name of the user" - } - } - }, - "url": { - "description": "URL of the docker image." - } - } - }, - "driver_instance_pool_id": { - "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." - }, - "driver_node_type_id": { - "description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n" - }, - "enable_elastic_disk": { - "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details." - }, - "enable_local_disk_encryption": { - "description": "Whether to enable LUKS on cluster VMs' local disks" - }, - "gcp_attributes": { - "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", - "properties": { - "availability": { - "description": "" - }, - "boot_disk_size": { - "description": "boot disk size in GB" - }, - "google_service_account": { - "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." - }, - "local_ssd_count": { - "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." - } - } - }, - "init_scripts": { - "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", - "items": { - "description": "", - "properties": { - "dbfs": { - "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", - "properties": { - "destination": { - "description": "dbfs destination, e.g. `dbfs:/my/path`" - } - } - }, - "s3": { - "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", - "properties": { - "canned_acl": { - "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." - }, - "destination": { - "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." - }, - "enable_encryption": { - "description": "(Optional) Flag to enable server side encryption, `false` by default." - }, - "encryption_type": { - "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." - }, - "endpoint": { - "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." - }, - "kms_key": { - "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." - }, - "region": { - "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." - } - } - }, - "workspace": { - "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", - "properties": { - "destination": { - "description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`" - } - } - } - } - } - }, - "instance_pool_id": { - "description": "The optional ID of the instance pool to which the cluster belongs." - }, - "node_type_id": { - "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" - }, - "num_workers": { - "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." - }, - "policy_id": { - "description": "The ID of the cluster policy used to create the cluster if applicable." - }, - "runtime_engine": { - "description": "" - }, - "single_user_name": { - "description": "Single user name if data_security_mode is `SINGLE_USER`" - }, - "spark_conf": { - "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n", - "additionalproperties": { - "description": "" - } - }, - "spark_env_vars": { - "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", - "additionalproperties": { - "description": "" - } - }, - "spark_version": { - "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n" - }, - "ssh_public_keys": { - "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", - "items": { - "description": "" - } - }, - "workload_type": { - "description": "", - "properties": { - "clients": { - "description": " defined what type of clients can use the cluster. E.g. Notebooks, Jobs", - "properties": { - "jobs": { - "description": "With jobs set, the cluster can be used for jobs" - }, - "notebooks": { - "description": "With notebooks set, this cluster can be used for notebooks" - } - } - } - } - } - } - } - } - } - }, - "max_concurrent_runs": { - "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000\\. Setting this value to 0 causes all new runs to be skipped. The default behavior is to allow only 1 concurrent run." - }, - "name": { - "description": "An optional name for the job." - }, - "notification_settings": { - "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this job.", - "properties": { - "no_alert_for_canceled_runs": { - "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is canceled." - }, - "no_alert_for_skipped_runs": { - "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is skipped." - } - } - }, - "parameters": { - "description": "Job-level parameter definitions", - "items": { - "description": "", - "properties": { - "default": { - "description": "Default value of the parameter." - }, - "name": { - "description": "The name of the defined parameter. May only contain alphanumeric characters, `_`, `-`, and `.`" - } - } - } - }, - "permissions": { - "description": "", - "items": { - "description": "", - "properties": { - "group_name": { - "description": "" - }, - "level": { - "description": "" - }, - "service_principal_name": { - "description": "" - }, - "user_name": { - "description": "" - } - } - } - }, - "run_as": { - "description": "", - "properties": { - "service_principal_name": { - "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role." - }, - "user_name": { - "description": "The email of an active workspace user. Non-admin users can only set this field to their own email." - } - } - }, - "schedule": { - "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", - "properties": { - "pause_status": { - "description": "Whether this trigger is paused or not." - }, - "quartz_cron_expression": { - "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" - }, - "timezone_id": { - "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n" - } - } - }, - "tags": { - "description": "A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job.", - "additionalproperties": { - "description": "" - } - }, - "tasks": { - "description": "A list of task specifications to be executed by this job.", - "items": { - "description": "", - "properties": { - "compute_key": { - "description": "The key of the compute requirement, specified in `job.settings.compute`, to use for execution of this task." - }, - "condition_task": { - "description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.", - "properties": { - "left": { - "description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference." - }, - "op": { - "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.\n" - }, - "right": { - "description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference." - } - } - }, - "dbt_task": { - "description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", - "properties": { - "catalog": { - "description": "Optional name of the catalog to use. The value is the top level in the 3-level namespace of Unity Catalog (catalog / schema / relation). The catalog value can only be specified if a warehouse_id is specified. Requires dbt-databricks \u003e= 1.1.1." - }, - "commands": { - "description": "A list of dbt commands to execute. All commands must start with `dbt`. This parameter must not be empty. A maximum of up to 10 commands can be provided.", - "items": { - "description": "" - } - }, - "profiles_directory": { - "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." - }, - "project_directory": { - "description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used." - }, - "schema": { - "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." - }, - "warehouse_id": { - "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." - } - } - }, - "depends_on": { - "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete successfully before executing this task.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n", - "items": { - "description": "", - "properties": { - "outcome": { - "description": "Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run." - }, - "task_key": { - "description": "The name of the task this task depends on." - } - } - } - }, - "description": { - "description": "An optional description for this task.\nThe maximum length is 4096 bytes." - }, - "email_notifications": { - "description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.", - "properties": { - "on_failure": { - "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", - "items": { - "description": "" - } - }, - "on_start": { - "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", - "items": { - "description": "" - } - }, - "on_success": { - "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", - "items": { - "description": "" - } - } - } - }, - "existing_cluster_id": { - "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." - }, - "job_cluster_key": { - "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`." - }, - "libraries": { - "description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.", - "items": { - "description": "", - "properties": { - "cran": { - "description": "Specification of a CRAN library to be installed as part of the library", - "properties": { - "package": { - "description": "The name of the CRAN package to install." - }, - "repo": { - "description": "The repository where the package can be found. If not specified, the default CRAN repo is used." - } - } - }, - "egg": { - "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." - }, - "jar": { - "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." - }, - "maven": { - "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", - "properties": { - "coordinates": { - "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\"." - }, - "exclusions": { - "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", - "items": { - "description": "" - } - }, - "repo": { - "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched." - } - } - }, - "pypi": { - "description": "Specification of a PyPi library to be installed. For example:\n`{ \"package\": \"simplejson\" }`", - "properties": { - "package": { - "description": "The name of the pypi package to install. An optional exact version specification is also\nsupported. Examples: \"simplejson\" and \"simplejson==3.8.0\"." - }, - "repo": { - "description": "The repository where the package can be found. If not specified, the default pip index is\nused." - } - } - }, - "whl": { - "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." - } - } - } - }, - "max_retries": { - "description": "An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry." - }, - "min_retry_interval_millis": { - "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." - }, - "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", - "properties": { - "autoscale": { - "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", - "properties": { - "max_workers": { - "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." - }, - "min_workers": { - "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." - } - } - }, - "autotermination_minutes": { - "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination." - }, - "aws_attributes": { - "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", - "properties": { - "availability": { - "description": "" - }, - "ebs_volume_count": { - "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." - }, - "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" - }, - "ebs_volume_size": { - "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." - }, - "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" - }, - "ebs_volume_type": { - "description": "" - }, - "first_on_demand": { - "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." - }, - "instance_profile_arn": { - "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." - }, - "spot_bid_price_percent": { - "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." - }, - "zone_id": { - "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." - } - } - }, - "azure_attributes": { - "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", - "properties": { - "availability": { - "description": "" - }, - "first_on_demand": { - "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." - }, - "log_analytics_info": { - "description": "Defines values necessary to configure and run Azure Log Analytics agent", - "properties": { - "log_analytics_primary_key": { - "description": "\u003cneeds content added\u003e" - }, - "log_analytics_workspace_id": { - "description": "\u003cneeds content added\u003e" - } - } - }, - "spot_bid_max_price": { - "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." - } - } - }, - "cluster_log_conf": { - "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", - "properties": { - "dbfs": { - "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", - "properties": { - "destination": { - "description": "dbfs destination, e.g. `dbfs:/my/path`" - } - } - }, - "s3": { - "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", - "properties": { - "canned_acl": { - "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." - }, - "destination": { - "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." - }, - "enable_encryption": { - "description": "(Optional) Flag to enable server side encryption, `false` by default." - }, - "encryption_type": { - "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." - }, - "endpoint": { - "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." - }, - "kms_key": { - "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." - }, - "region": { - "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." - } - } - } - } - }, - "cluster_name": { - "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" - }, - "cluster_source": { - "description": "" - }, - "custom_tags": { - "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", - "additionalproperties": { - "description": "" - } - }, - "data_security_mode": { - "description": "" - }, - "docker_image": { - "description": "", - "properties": { - "basic_auth": { - "description": "", - "properties": { - "password": { - "description": "Password of the user" - }, - "username": { - "description": "Name of the user" - } - } - }, - "url": { - "description": "URL of the docker image." - } - } - }, - "driver_instance_pool_id": { - "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." - }, - "driver_node_type_id": { - "description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n" - }, - "enable_elastic_disk": { - "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details." - }, - "enable_local_disk_encryption": { - "description": "Whether to enable LUKS on cluster VMs' local disks" - }, - "gcp_attributes": { - "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", - "properties": { - "availability": { - "description": "" - }, - "boot_disk_size": { - "description": "boot disk size in GB" - }, - "google_service_account": { - "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." - }, - "local_ssd_count": { - "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." - } - } - }, - "init_scripts": { - "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", - "items": { - "description": "", - "properties": { - "dbfs": { - "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", - "properties": { - "destination": { - "description": "dbfs destination, e.g. `dbfs:/my/path`" - } - } - }, - "s3": { - "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", - "properties": { - "canned_acl": { - "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." - }, - "destination": { - "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." - }, - "enable_encryption": { - "description": "(Optional) Flag to enable server side encryption, `false` by default." - }, - "encryption_type": { - "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." - }, - "endpoint": { - "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." - }, - "kms_key": { - "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." - }, - "region": { - "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." - } - } - }, - "workspace": { - "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", - "properties": { - "destination": { - "description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`" - } - } - } - } - } - }, - "instance_pool_id": { - "description": "The optional ID of the instance pool to which the cluster belongs." - }, - "node_type_id": { - "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" - }, - "num_workers": { - "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." - }, - "policy_id": { - "description": "The ID of the cluster policy used to create the cluster if applicable." - }, - "runtime_engine": { - "description": "" - }, - "single_user_name": { - "description": "Single user name if data_security_mode is `SINGLE_USER`" - }, - "spark_conf": { - "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n", - "additionalproperties": { - "description": "" - } - }, - "spark_env_vars": { - "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", - "additionalproperties": { - "description": "" - } - }, - "spark_version": { - "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n" - }, - "ssh_public_keys": { - "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", - "items": { - "description": "" - } - }, - "workload_type": { - "description": "", - "properties": { - "clients": { - "description": " defined what type of clients can use the cluster. E.g. Notebooks, Jobs", - "properties": { - "jobs": { - "description": "With jobs set, the cluster can be used for jobs" - }, - "notebooks": { - "description": "With notebooks set, this cluster can be used for notebooks" - } - } - } - } - } - } - }, - "notebook_task": { - "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", - "properties": { - "base_parameters": { - "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n", - "additionalproperties": { - "description": "" - } - }, - "notebook_path": { - "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" - }, - "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" - } - } - }, - "notification_settings": { - "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` for this task.", - "properties": { - "alert_on_last_attempt": { - "description": "If true, do not send notifications to recipients specified in `on_start` for the retried runs and do not send notifications to recipients specified in `on_failure` until the last retry of the run." - }, - "no_alert_for_canceled_runs": { - "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is canceled." - }, - "no_alert_for_skipped_runs": { - "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is skipped." - } - } - }, - "pipeline_task": { - "description": "If pipeline_task, indicates that this task must execute a Pipeline.", - "properties": { - "full_refresh": { - "description": "If true, a full refresh will be triggered on the delta live table." - }, - "pipeline_id": { - "description": "The full name of the pipeline task to execute." - } - } - }, - "python_wheel_task": { - "description": "If python_wheel_task, indicates that this job must execute a PythonWheel.", - "properties": { - "entry_point": { - "description": "Named entry point to use, if it does not exist in the metadata of the package it executes the function from the package directly using `$packageName.$entryPoint()`" - }, - "named_parameters": { - "description": "Command-line parameters passed to Python wheel task in the form of `[\"--name=task\", \"--data=dbfs:/path/to/data.json\"]`. Leave it empty if `parameters` is not null.", - "additionalproperties": { - "description": "" - } - }, - "package_name": { - "description": "Name of the package to execute" - }, - "parameters": { - "description": "Command-line parameters passed to Python wheel task. Leave it empty if `named_parameters` is not null.", - "items": { - "description": "" - } - } - } - }, - "retry_on_timeout": { - "description": "An optional policy to specify whether to retry a task when it times out. The default behavior is to not retry on timeout." - }, - "run_if": { - "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies completed and at least one was executed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n" - }, - "spark_jar_task": { - "description": "If spark_jar_task, indicates that this task must run a JAR.", - "properties": { - "jar_uri": { - "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n" - }, - "main_class_name": { - "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." - }, - "parameters": { - "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", - "items": { - "description": "" - } - } - } - }, - "spark_python_task": { - "description": "If spark_python_task, indicates that this task must run a Python file.", - "properties": { - "parameters": { - "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", - "items": { - "description": "" - } - }, - "python_file": { - "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." - }, - "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" - } - } - }, - "spark_submit_task": { - "description": "If spark_submit_task, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.", - "properties": { - "parameters": { - "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", - "items": { - "description": "" - } - } - } - }, - "sql_task": { - "description": "If sql_task, indicates that this job must execute a SQL task.", - "properties": { - "alert": { - "description": "If alert, indicates that this job must refresh a SQL alert.", - "properties": { - "alert_id": { - "description": "The canonical identifier of the SQL alert." - }, - "pause_subscriptions": { - "description": "If true, the alert notifications are not sent to subscribers." - }, - "subscriptions": { - "description": "If specified, alert notifications are sent to subscribers.", - "items": { - "description": "", - "properties": { - "destination_id": { - "description": "The canonical identifier of the destination to receive email notification." - }, - "user_name": { - "description": "The user name to receive the subscription email." - } - } - } - } - } - }, - "dashboard": { - "description": "If dashboard, indicates that this job must refresh a SQL dashboard.", - "properties": { - "custom_subject": { - "description": "Subject of the email sent to subscribers of this task." - }, - "dashboard_id": { - "description": "The canonical identifier of the SQL dashboard." - }, - "pause_subscriptions": { - "description": "If true, the dashboard snapshot is not taken, and emails are not sent to subscribers." - }, - "subscriptions": { - "description": "If specified, dashboard snapshots are sent to subscriptions.", - "items": { - "description": "", - "properties": { - "destination_id": { - "description": "The canonical identifier of the destination to receive email notification." - }, - "user_name": { - "description": "The user name to receive the subscription email." - } - } - } - } - } - }, - "file": { - "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", - "properties": { - "path": { - "description": "Relative path of the SQL file in the remote Git repository." - } - } - }, - "parameters": { - "description": "Parameters to be used for each run of this job. The SQL alert task does not support custom parameters.", - "additionalproperties": { - "description": "" - } - }, - "query": { - "description": "If query, indicates that this job must execute a SQL query.", - "properties": { - "query_id": { - "description": "The canonical identifier of the SQL query." - } - } - }, - "warehouse_id": { - "description": "The canonical identifier of the SQL warehouse. Only serverless and pro SQL warehouses are supported." - } - } - }, - "task_key": { - "description": "A unique name for the task. This field is used to refer to this task from other tasks.\nThis field is required and must be unique within its parent job.\nOn Update or Reset, this field is used to reference the tasks to be updated or reset.\nThe maximum length is 100 characters." - }, - "timeout_seconds": { - "description": "An optional timeout applied to each run of this job task. The default behavior is to have no timeout." - } - } - } - }, - "timeout_seconds": { - "description": "An optional timeout applied to each run of this job. The default behavior is to have no timeout." - }, - "trigger": { - "description": "Trigger settings for the job. Can be used to trigger a run when new files arrive in an external location. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", - "properties": { - "file_arrival": { - "description": "File arrival trigger settings.", - "properties": { - "min_time_between_triggers_seconds": { - "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n" - }, - "url": { - "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location." - }, - "wait_after_last_change_seconds": { - "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n" - } - } - }, - "pause_status": { - "description": "Whether this trigger is paused or not." - } - } - }, - "webhook_notifications": { - "description": "A collection of system notification IDs to notify when the run begins or completes. The default behavior is to not send any system notifications.", - "properties": { - "on_failure": { - "description": "An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property.", - "items": { - "description": "", - "properties": { - "id": { - "description": "" - } - } - } - }, - "on_start": { - "description": "An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property.", - "items": { - "description": "", - "properties": { - "id": { - "description": "" - } - } - } - }, - "on_success": { - "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", - "items": { - "description": "", - "properties": { - "id": { - "description": "" - } - } - } - } - } - } - } - } - }, - "models": { - "description": "List of MLflow models", - "additionalproperties": { - "description": "", - "properties": { - "creation_timestamp": { - "description": "Timestamp recorded when this `registered_model` was created." - }, - "description": { - "description": "Description of this `registered_model`." - }, - "last_updated_timestamp": { - "description": "Timestamp recorded when metadata for this `registered_model` was last updated." - }, - "latest_versions": { - "description": "Collection of latest model versions for each stage.\nOnly contains models with current `READY` status.", - "items": { - "description": "", - "properties": { - "creation_timestamp": { - "description": "Timestamp recorded when this `model_version` was created." - }, - "current_stage": { - "description": "Current stage for this `model_version`." - }, - "description": { - "description": "Description of this `model_version`." - }, - "last_updated_timestamp": { - "description": "Timestamp recorded when metadata for this `model_version` was last updated." - }, - "name": { - "description": "Unique name of the model" - }, - "run_id": { - "description": "MLflow run ID used when creating `model_version`, if `source` was generated by an\nexperiment run stored in MLflow tracking server." - }, - "run_link": { - "description": "Run Link: Direct link to the run that generated this version" - }, - "source": { - "description": "URI indicating the location of the source model artifacts, used when creating `model_version`" - }, - "status": { - "description": "Current status of `model_version`" - }, - "status_message": { - "description": "Details on current `status`, if it is pending or failed." - }, - "tags": { - "description": "Tags: Additional metadata key-value pairs for this `model_version`.", - "items": { - "description": "", - "properties": { - "key": { - "description": "The tag key." - }, - "value": { - "description": "The tag value." - } - } - } - }, - "user_id": { - "description": "User that created this `model_version`." - }, - "version": { - "description": "Model's version number." - } - } - } - }, - "name": { - "description": "Unique name for the model." - }, - "permissions": { - "description": "", - "items": { - "description": "", - "properties": { - "group_name": { - "description": "" - }, - "level": { - "description": "" - }, - "service_principal_name": { - "description": "" - }, - "user_name": { - "description": "" - } - } - } - }, - "tags": { - "description": "Tags: Additional metadata key-value pairs for this `registered_model`.", - "items": { - "description": "", - "properties": { - "key": { - "description": "The tag key." - }, - "value": { - "description": "The tag value." - } - } - } - }, - "user_id": { - "description": "User that created this `registered_model`" - } - } - } - }, - "model_serving_endpoints": { - "description": "List of Model Serving Endpoints", - "additionalproperties": { - "description": "", - "properties": { - "name": { - "description": "The name of the model serving endpoint. This field is required and must be unique across a workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores. NOTE: Changing this name will delete the existing endpoint and create a new endpoint with the update name." - }, - "permissions": { - "description": "", - "items": { - "description": "", - "properties": { - "group_name": { - "description": "" - }, - "level": { - "description": "" - }, - "service_principal_name": { - "description": "" - }, - "user_name": { - "description": "" - } - } - } - }, - "config": { - "description": "The model serving endpoint configuration.", - "properties": { - "properties": { - "served_models": { - "description": "Each block represents a served model for the endpoint to serve. A model serving endpoint can have up to 10 served models.", - "items": { - "description": "", - "properties" : { - "name": { - "description": "The name of a served model. It must be unique across an endpoint. If not specified, this field will default to modelname-modelversion. A served model name can consist of alphanumeric characters, dashes, and underscores." - }, - "model_name": { - "description": "The name of the model in Databricks Model Registry to be served." - }, - "model_version": { - "description": "The version of the model in Databricks Model Registry to be served." - }, - "workload_size": { - "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency)." - }, - "scale_to_zero_enabled": { - "description": "Whether the compute resources for the served model should scale down to zero. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0." - } - } - } - }, - "traffic_config": { - "description": "A single block represents the traffic split configuration amongst the served models.", - "properties": { - "routes": { - "description": "Each block represents a route that defines traffic to each served model. Each served_models block needs to have a corresponding routes block.", - "items": { - "description": "", - "properties": { - "served_model_name": { - "description": "The name of the served model this route configures traffic for. This needs to match the name of a served_models block." - }, - "traffic_percentage": { - "description": "The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive." - } - } - } - } - } - } - } - } - } - } - } - }, - "pipelines": { - "description": "List of DLT pipelines", - "additionalproperties": { - "description": "", - "properties": { - "catalog": { - "description": "A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog." - }, - "channel": { - "description": "DLT Release Channel that specifies which version to use." - }, - "clusters": { - "description": "Cluster settings for this pipeline deployment.", - "items": { - "description": "", - "properties": { - "apply_policy_default_values": { - "description": "Note: This field won't be persisted. Only API users will check this field." - }, - "autoscale": { - "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", - "properties": { - "max_workers": { - "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." - }, - "min_workers": { - "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." - } - } - }, - "aws_attributes": { - "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", - "properties": { - "availability": { - "description": "" - }, - "ebs_volume_count": { - "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." - }, - "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" - }, - "ebs_volume_size": { - "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." - }, - "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" - }, - "ebs_volume_type": { - "description": "" - }, - "first_on_demand": { - "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." - }, - "instance_profile_arn": { - "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." - }, - "spot_bid_price_percent": { - "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." - }, - "zone_id": { - "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." - } - } - }, - "azure_attributes": { - "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", - "properties": { - "availability": { - "description": "" - }, - "first_on_demand": { - "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." - }, - "log_analytics_info": { - "description": "Defines values necessary to configure and run Azure Log Analytics agent", - "properties": { - "log_analytics_primary_key": { - "description": "\u003cneeds content added\u003e" - }, - "log_analytics_workspace_id": { - "description": "\u003cneeds content added\u003e" - } - } - }, - "spot_bid_max_price": { - "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." - } - } - }, - "cluster_log_conf": { - "description": "The configuration for delivering spark logs to a long-term storage destination.\nOnly dbfs destinations are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.\n", - "properties": { - "dbfs": { - "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", - "properties": { - "destination": { - "description": "dbfs destination, e.g. `dbfs:/my/path`" - } - } - }, - "s3": { - "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", - "properties": { - "canned_acl": { - "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." - }, - "destination": { - "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." - }, - "enable_encryption": { - "description": "(Optional) Flag to enable server side encryption, `false` by default." - }, - "encryption_type": { - "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." - }, - "endpoint": { - "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." - }, - "kms_key": { - "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." - }, - "region": { - "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." - } - } - } - } - }, - "custom_tags": { - "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", - "additionalproperties": { - "description": "" - } - }, - "driver_instance_pool_id": { - "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." - }, - "driver_node_type_id": { - "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above." - }, - "gcp_attributes": { - "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", - "properties": { - "availability": { - "description": "" - }, - "boot_disk_size": { - "description": "boot disk size in GB" - }, - "google_service_account": { - "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." - }, - "local_ssd_count": { - "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." - } - } - }, - "instance_pool_id": { - "description": "The optional ID of the instance pool to which the cluster belongs." - }, - "label": { - "description": "A label for the cluster specification, either `default` to configure the default cluster, or `maintenance` to configure the maintenance cluster. This field is optional. The default value is `default`." - }, - "node_type_id": { - "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" - }, - "num_workers": { - "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." - }, - "policy_id": { - "description": "The ID of the cluster policy used to create the cluster if applicable." - }, - "spark_conf": { - "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nSee :method:clusters/create for more details.\n", - "additionalproperties": { - "description": "" - } - }, - "spark_env_vars": { - "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", - "additionalproperties": { - "description": "" - } - }, - "ssh_public_keys": { - "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", - "items": { - "description": "" - } - } - } - } - }, - "configuration": { - "description": "String-String configuration for this pipeline execution.", - "additionalproperties": { - "description": "" - } - }, - "continuous": { - "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." - }, - "development": { - "description": "Whether the pipeline is in Development mode. Defaults to false." - }, - "edition": { - "description": "Pipeline product edition." - }, - "filters": { - "description": "Filters on which Pipeline packages to include in the deployed graph.", - "properties": { - "exclude": { - "description": "Paths to exclude.", - "items": { - "description": "" - } - }, - "include": { - "description": "Paths to include.", - "items": { - "description": "" - } - } - } - }, - "id": { - "description": "Unique identifier for this pipeline." - }, - "libraries": { - "description": "Libraries or code needed by this deployment.", - "items": { - "description": "", - "properties": { - "file": { - "description": "The path to a file that defines a pipeline and is stored in the Databricks Repos.\n", - "properties": { - "path": { - "description": "The absolute path of the file." - } - } - }, - "jar": { - "description": "URI of the jar to be installed. Currently only DBFS is supported.\n" - }, - "maven": { - "description": "Specification of a maven library to be installed.\n", - "properties": { - "coordinates": { - "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\"." - }, - "exclusions": { - "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", - "items": { - "description": "" - } - }, - "repo": { - "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched." - } - } - }, - "notebook": { - "description": "The path to a notebook that defines a pipeline and is stored in the \u003cDatabricks\u003e workspace.\n", - "properties": { - "path": { - "description": "The absolute path of the notebook." - } - } - }, - "whl": { - "description": "URI of the wheel to be installed.\n" - } - } - } - }, - "name": { - "description": "Friendly identifier for this pipeline." - }, - "permissions": { - "description": "", - "items": { - "description": "", - "properties": { - "group_name": { - "description": "" - }, - "level": { - "description": "" - }, - "service_principal_name": { - "description": "" - }, - "user_name": { - "description": "" - } - } - } - }, - "photon": { - "description": "Whether Photon is enabled for this pipeline." - }, - "serverless": { - "description": "Whether serverless compute is enabled for this pipeline." - }, - "storage": { - "description": "DBFS root directory for storing checkpoints and tables." - }, - "target": { - "description": "Target schema (database) to add tables in this pipeline to. If not specified, no data is published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`." - }, - "trigger": { - "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", - "properties": { - "cron": { - "description": "", - "properties": { - "quartz_cron_schedule": { - "description": "" - }, - "timezone_id": { - "description": "" - } - } - }, - "manual": { - "description": "" - } - } - } - } - } - } - } - }, - "variables": { - "description": "", - "additionalproperties": { - "description": "" - } - }, - "workspace": { - "description": "Configures which workspace to connect to and locations for files, state, and similar locations within the workspace file tree.", - "properties": { - "artifact_path": { - "description": "The remote path to synchronize build artifacts to. This defaults to `${workspace.root}/artifacts`" - }, - "auth_type": { - "description": "When multiple auth attributes are available in the environment, use the auth type specified by this argument" - }, - "azure_client_id": { - "description": "Azure Client ID." - }, - "azure_environment": { - "description": "Azure environment, one of (Public, UsGov, China, Germany)." - }, - "azure_login_app_id": { - "description": "Azure Login Application ID." - }, - "azure_tenant_id": { - "description": "Azure Tenant ID." - }, - "azure_use_msi": { - "description": "Whether to use Managed Service Identity (MSI) to authenticate to Azure Databricks." - }, - "azure_workspace_resource_id": { - "description": "Azure Resource Manager ID for Azure Databricks workspace." - }, - "client_id": { - "description": "OAath client ID for the Databricks workspace." - }, - "file_path": { - "description": "The remote path to synchronize local files artifacts to. This defaults to `${workspace.root}/files`" - }, - "google_service_account": { - }, - "host": { - "description": "Host url of the workspace." - }, - "metadata_service_url": { - "description": "The URL of the metadata service to use for authentication." - }, - "profile": { - "description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg." - }, - "root_path": { - "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`" - }, - "state_path": { - "description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`" - } - } } } } @@ -1923,6 +71,26 @@ "description": "" } }, + "permissions": { + "description": "", + "items": { + "description": "", + "properties": { + "group_name": { + "description": "" + }, + "level": { + "description": "" + }, + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + } + }, "resources": { "description": "Collection of Databricks resources to deploy.", "properties": { @@ -2014,16 +182,39 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." } } }, + "deployment": { + "description": "Deployment information for jobs managed by external sources.", + "properties": { + "kind": { + "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n" + }, + "metadata_file_path": { + "description": "Path of the file that contains deployment metadata." + } + } + }, + "description": { + "description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding." + }, + "edit_mode": { + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.\n" + }, "email_notifications": { - "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. The default behavior is to not send any emails.", + "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.", "properties": { "no_alert_for_skipped_runs": { "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." }, + "on_duration_warning_threshold_exceeded": { + "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", + "items": { + "description": "" + } + }, "on_failure": { "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", "items": { @@ -2048,13 +239,13 @@ "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`." }, "git_source": { - "description": "An optional specification for a remote repository containing the notebooks used by this job's notebook tasks.", + "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", "properties": { "git_branch": { - "description": "Name of the branch to be checked out and used by this job.\nThis field cannot be specified in conjunction with git_tag or git_commit.\n\nThe maximum length is 255 characters.\n" + "description": "Name of the branch to be checked out and used by this job. This field cannot be specified in conjunction with git_tag or git_commit." }, "git_commit": { - "description": "Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag.\nThe maximum length is 64 characters." + "description": "Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag." }, "git_provider": { "description": "Unique identifier of the service used to host the Git repository. The value is case insensitive." @@ -2068,10 +259,46 @@ } }, "git_tag": { - "description": "Name of the tag to be checked out and used by this job.\nThis field cannot be specified in conjunction with git_branch or git_commit.\n\nThe maximum length is 255 characters.\n" + "description": "Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit." }, "git_url": { - "description": "URL of the repository to be cloned by this job.\nThe maximum length is 300 characters." + "description": "URL of the repository to be cloned by this job." + }, + "job_source": { + "description": "The source of the job specification in the remote repository when the job is source controlled.", + "properties": { + "dirty_state": { + "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.\n" + }, + "import_from_git_branch": { + "description": "Name of the branch which the job is imported from." + }, + "job_config_path": { + "description": "Path of the job YAML file that contains the job specification." + } + } + } + } + }, + "health": { + "description": "", + "properties": { + "rules": { + "description": "", + "items": { + "description": "", + "properties": { + "metric": { + "description": "" + }, + "op": { + "description": "" + }, + "value": { + "description": "Specifies the threshold value that the health metric should obey to satisfy the health rule." + } + } + } } } }, @@ -2084,8 +311,11 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { + "apply_policy_default_values": { + "description": "" + }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "properties": { @@ -2131,7 +361,7 @@ "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." }, "zone_id": { - "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nThe list of available zones as well as the default value can be found by using the\n`List Zones` method." } } }, @@ -2275,6 +505,14 @@ } } }, + "file": { + "description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`", + "properties": { + "destination": { + "description": "local file destination, e.g. `file:/my/local/file.sh`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -2301,6 +539,14 @@ } } }, + "volumes": { + "description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`" + } + } + }, "workspace": { "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", "properties": { @@ -2373,10 +619,10 @@ } }, "max_concurrent_runs": { - "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000\\. Setting this value to 0 causes all new runs to be skipped. The default behavior is to allow only 1 concurrent run." + "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." }, "name": { - "description": "An optional name for the job." + "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding." }, "notification_settings": { "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this job.", @@ -2423,6 +669,14 @@ } } }, + "queue": { + "description": "The queue settings of the job.", + "properties": { + "enabled": { + "description": "If true, enable queueing for the job. This is a required field." + } + } + }, "run_as": { "description": "", "properties": { @@ -2438,7 +692,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -2503,7 +757,7 @@ } }, "depends_on": { - "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete successfully before executing this task.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n", + "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n", "items": { "description": "", "properties": { @@ -2517,11 +771,17 @@ } }, "description": { - "description": "An optional description for this task.\nThe maximum length is 4096 bytes." + "description": "An optional description for this task." }, "email_notifications": { "description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.", "properties": { + "on_duration_warning_threshold_exceeded": { + "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", + "items": { + "description": "" + } + }, "on_failure": { "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", "items": { @@ -2545,6 +805,28 @@ "existing_cluster_id": { "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." }, + "health": { + "description": "", + "properties": { + "rules": { + "description": "", + "items": { + "description": "", + "properties": { + "metric": { + "description": "" + }, + "op": { + "description": "" + }, + "value": { + "description": "Specifies the threshold value that the health metric should obey to satisfy the health rule." + } + } + } + } + } + }, "job_cluster_key": { "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`." }, @@ -2605,14 +887,17 @@ } }, "max_retries": { - "description": "An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry." + "description": "An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means to retry indefinitely and the value `0` means to never retry." }, "min_retry_interval_millis": { "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { + "apply_policy_default_values": { + "description": "" + }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "properties": { @@ -2658,7 +943,7 @@ "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." }, "zone_id": { - "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nThe list of available zones as well as the default value can be found by using the\n`List Zones` method." } } }, @@ -2802,6 +1087,14 @@ } } }, + "file": { + "description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`", + "properties": { + "destination": { + "description": "local file destination, e.g. `file:/my/local/file.sh`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -2828,6 +1121,14 @@ } } }, + "volumes": { + "description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`" + } + } + }, "workspace": { "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", "properties": { @@ -2900,7 +1201,7 @@ "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", "properties": { "base_parameters": { - "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n", + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", "additionalproperties": { "description": "" } @@ -2914,7 +1215,7 @@ } }, "notification_settings": { - "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` for this task.", + "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this task.", "properties": { "alert_on_last_attempt": { "description": "If true, do not send notifications to recipients specified in `on_start` for the retried runs and do not send notifications to recipients specified in `on_failure` until the last retry of the run." @@ -2962,10 +1263,21 @@ } }, "retry_on_timeout": { - "description": "An optional policy to specify whether to retry a task when it times out. The default behavior is to not retry on timeout." + "description": "An optional policy to specify whether to retry a task when it times out." }, "run_if": { - "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies completed and at least one was executed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n" + "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n" + }, + "run_job_task": { + "description": "If run_job_task, indicates that this task must execute another job.", + "properties": { + "job_id": { + "description": "ID of the job to trigger." + }, + "job_parameters": { + "description": "" + } + } }, "spark_jar_task": { "description": "If spark_jar_task, indicates that this task must run a JAR.", @@ -3002,7 +1314,7 @@ } }, "spark_submit_task": { - "description": "If spark_submit_task, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.", + "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n", "properties": { "parameters": { "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", @@ -3030,10 +1342,10 @@ "description": "", "properties": { "destination_id": { - "description": "The canonical identifier of the destination to receive email notification." + "description": "The canonical identifier of the destination to receive email notification. This parameter is mutually exclusive with user_name. You cannot set both destination_id and user_name for subscription notifications." }, "user_name": { - "description": "The user name to receive the subscription email." + "description": "The user name to receive the subscription email. This parameter is mutually exclusive with destination_id. You cannot set both destination_id and user_name for subscription notifications." } } } @@ -3058,10 +1370,10 @@ "description": "", "properties": { "destination_id": { - "description": "The canonical identifier of the destination to receive email notification." + "description": "The canonical identifier of the destination to receive email notification. This parameter is mutually exclusive with user_name. You cannot set both destination_id and user_name for subscription notifications." }, "user_name": { - "description": "The user name to receive the subscription email." + "description": "The user name to receive the subscription email. This parameter is mutually exclusive with destination_id. You cannot set both destination_id and user_name for subscription notifications." } } } @@ -3091,21 +1403,70 @@ } }, "warehouse_id": { - "description": "The canonical identifier of the SQL warehouse. Only serverless and pro SQL warehouses are supported." + "description": "The canonical identifier of the SQL warehouse. Recommended to use with serverless or pro SQL warehouses. Classic SQL warehouses are only supported for SQL alert, dashboard and query tasks and are limited to scheduled single-task jobs." } } }, "task_key": { - "description": "A unique name for the task. This field is used to refer to this task from other tasks.\nThis field is required and must be unique within its parent job.\nOn Update or Reset, this field is used to reference the tasks to be updated or reset.\nThe maximum length is 100 characters." + "description": "A unique name for the task. This field is used to refer to this task from other tasks.\nThis field is required and must be unique within its parent job.\nOn Update or Reset, this field is used to reference the tasks to be updated or reset." }, "timeout_seconds": { - "description": "An optional timeout applied to each run of this job task. The default behavior is to have no timeout." + "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." + }, + "webhook_notifications": { + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "properties": { + "on_duration_warning_threshold_exceeded": { + "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_failure": { + "description": "An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_start": { + "description": "An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_success": { + "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + } + } } } } }, "timeout_seconds": { - "description": "An optional timeout applied to each run of this job. The default behavior is to have no timeout." + "description": "An optional timeout applied to each run of this job. A value of `0` means no timeout." }, "trigger": { "description": "Trigger settings for the job. Can be used to trigger a run when new files arrive in an external location. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", @@ -3125,13 +1486,24 @@ } }, "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." } } }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when the run begins or completes. The default behavior is to not send any system notifications.", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", "properties": { + "on_duration_warning_threshold_exceeded": { + "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_failure": { "description": "An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property.", "items": { @@ -3170,6 +1542,110 @@ } } }, + "model_serving_endpoints": { + "description": "List of Model Serving Endpoints", + "additionalproperties": { + "description": "", + "properties": { + "config": { + "description": "The core config of the serving endpoint.", + "properties": { + "served_models": { + "description": "A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", + "items": { + "description": "", + "properties": { + "environment_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this model.\nNote: this is an experimental feature and subject to change. \nExample model environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`", + "additionalproperties": { + "description": "" + } + }, + "instance_profile_arn": { + "description": "ARN of the instance profile that the served model will use to access AWS resources." + }, + "model_name": { + "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, \nin the form of __catalog_name__.__schema_name__.__model_name__.\n" + }, + "model_version": { + "description": "The version of the model in Databricks Model Registry or Unity Catalog to be served." + }, + "name": { + "description": "The name of a served model. It must be unique across an endpoint. If not specified, this field will default to \u003cmodel-name\u003e-\u003cmodel-version\u003e.\nA served model name can consist of alphanumeric characters, dashes, and underscores.\n" + }, + "scale_to_zero_enabled": { + "description": "Whether the compute resources for the served model should scale down to zero." + }, + "workload_size": { + "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n" + }, + "workload_type": { + "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See documentation for all\noptions.\n" + } + } + } + }, + "traffic_config": { + "description": "The traffic config defining how invocations to the serving endpoint should be routed.", + "properties": { + "routes": { + "description": "The list of routes that define traffic to each served model.", + "items": { + "description": "", + "properties": { + "served_model_name": { + "description": "The name of the served model this route configures traffic for." + }, + "traffic_percentage": { + "description": "The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive." + } + } + } + } + } + } + } + }, + "name": { + "description": "The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.\nAn endpoint name can consist of alphanumeric characters, dashes, and underscores.\n" + }, + "permissions": { + "description": "", + "items": { + "description": "", + "properties": { + "group_name": { + "description": "" + }, + "level": { + "description": "" + }, + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + } + }, + "tags": { + "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", + "items": { + "description": "", + "properties": { + "key": { + "description": "Key field for a serving endpoint tag." + }, + "value": { + "description": "Optional value field for a serving endpoint tag." + } + } + } + } + } + } + }, "models": { "description": "List of MLflow models", "additionalproperties": { @@ -3346,7 +1822,7 @@ "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." }, "zone_id": { - "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nThe list of available zones as well as the default value can be found by using the\n`List Zones` method." } } }, @@ -3554,9 +2030,6 @@ "description": "The absolute path of the notebook." } } - }, - "whl": { - "description": "URI of the wheel to be installed.\n" } } } @@ -3564,6 +2037,26 @@ "name": { "description": "Friendly identifier for this pipeline." }, + "notifications": { + "description": "List of notification settings for this pipeline.", + "items": { + "description": "", + "properties": { + "alerts": { + "description": "A list of alerts that trigger the sending of notifications to the configured\ndestinations. The supported alerts are:\n\n* `on-update-success`: A pipeline update completes successfully.\n* `on-update-failure`: Each time a pipeline update fails.\n* `on-update-fatal-failure`: A pipeline update fails with a non-retryable (fatal) error.\n* `on-flow-failure`: A single data flow fails.\n", + "items": { + "description": "" + } + }, + "email_recipients": { + "description": "A list of email addresses notified when a configured alert is triggered.\n", + "items": { + "description": "" + } + } + } + } + }, "permissions": { "description": "", "items": { @@ -3617,6 +2110,2318 @@ } } } + }, + "registered_models": { + "description": "List of Registered Models", + "additionalproperties": { + "description": "", + "properties": { + "catalog_name": { + "description": "The name of the catalog where the schema and the registered model reside" + }, + "comment": { + "description": "The comment attached to the registered model" + }, + "grants": { + "description": "", + "items": { + "description": "", + "properties": { + "principal": { + "description": "" + }, + "privileges": { + "description": "", + "items": { + "description": "" + } + } + } + } + }, + "name": { + "description": "The name of the registered model" + }, + "schema_name": { + "description": "The name of the schema where the registered model resides" + }, + "storage_location": { + "description": "The storage location on the cloud under which model version data files are stored" + } + } + } + } + } + }, + "run_as": { + "description": "", + "properties": { + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + }, + "sync": { + "description": "", + "properties": { + "exclude": { + "description": "", + "items": { + "description": "" + } + }, + "include": { + "description": "", + "items": { + "description": "" + } + } + } + }, + "targets": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "artifacts": { + "description": "A description of all code artifacts in this bundle.", + "additionalproperties": { + "description": "", + "properties": { + "build": { + "description": "" + }, + "files": { + "description": "", + "items": { + "description": "", + "properties": { + "source": { + "description": "" + } + } + } + }, + "path": { + "description": "" + }, + "type": { + "description": "" + } + } + } + }, + "bundle": { + "description": "The details for this bundle.", + "properties": { + "compute_id": { + "description": "" + }, + "git": { + "description": "", + "properties": { + "branch": { + "description": "" + }, + "origin_url": { + "description": "" + } + } + }, + "name": { + "description": "The name of the bundle." + } + } + }, + "compute_id": { + "description": "" + }, + "default": { + "description": "" + }, + "git": { + "description": "", + "properties": { + "branch": { + "description": "" + }, + "origin_url": { + "description": "" + } + } + }, + "mode": { + "description": "" + }, + "permissions": { + "description": "", + "items": { + "description": "", + "properties": { + "group_name": { + "description": "" + }, + "level": { + "description": "" + }, + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + } + }, + "resources": { + "description": "Collection of Databricks resources to deploy.", + "properties": { + "experiments": { + "description": "List of MLflow experiments", + "additionalproperties": { + "description": "", + "properties": { + "artifact_location": { + "description": "Location where artifacts for the experiment are stored." + }, + "creation_time": { + "description": "Creation time" + }, + "experiment_id": { + "description": "Unique identifier for the experiment." + }, + "last_update_time": { + "description": "Last update time" + }, + "lifecycle_stage": { + "description": "Current life cycle stage of the experiment: \"active\" or \"deleted\".\nDeleted experiments are not returned by APIs." + }, + "name": { + "description": "Human readable name that identifies the experiment." + }, + "permissions": { + "description": "", + "items": { + "description": "", + "properties": { + "group_name": { + "description": "" + }, + "level": { + "description": "" + }, + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + } + }, + "tags": { + "description": "Tags: Additional metadata key-value pairs.", + "items": { + "description": "", + "properties": { + "key": { + "description": "The tag key." + }, + "value": { + "description": "The tag value." + } + } + } + } + } + } + }, + "jobs": { + "description": "List of Databricks jobs", + "additionalproperties": { + "description": "", + "properties": { + "compute": { + "description": "A list of compute requirements that can be referenced by tasks of this job.", + "items": { + "description": "", + "properties": { + "compute_key": { + "description": "A unique name for the compute requirement. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine the compute requirements for the task execution." + }, + "spec": { + "description": "", + "properties": { + "kind": { + "description": "The kind of compute described by this compute specification." + } + } + } + } + } + }, + "continuous": { + "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", + "properties": { + "pause_status": { + "description": "Indicate whether this schedule is paused or not." + } + } + }, + "deployment": { + "description": "Deployment information for jobs managed by external sources.", + "properties": { + "kind": { + "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n" + }, + "metadata_file_path": { + "description": "Path of the file that contains deployment metadata." + } + } + }, + "description": { + "description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding." + }, + "edit_mode": { + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.\n" + }, + "email_notifications": { + "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.", + "properties": { + "no_alert_for_skipped_runs": { + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." + }, + "on_duration_warning_threshold_exceeded": { + "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", + "items": { + "description": "" + } + }, + "on_failure": { + "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_start": { + "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_success": { + "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + } + } + }, + "format": { + "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`." + }, + "git_source": { + "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", + "properties": { + "git_branch": { + "description": "Name of the branch to be checked out and used by this job. This field cannot be specified in conjunction with git_tag or git_commit." + }, + "git_commit": { + "description": "Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag." + }, + "git_provider": { + "description": "Unique identifier of the service used to host the Git repository. The value is case insensitive." + }, + "git_snapshot": { + "description": "", + "properties": { + "used_commit": { + "description": "Commit that was used to execute the run. If git_branch was specified, this points to the HEAD of the branch at the time of the run; if git_tag was specified, this points to the commit the tag points to." + } + } + }, + "git_tag": { + "description": "Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit." + }, + "git_url": { + "description": "URL of the repository to be cloned by this job." + }, + "job_source": { + "description": "The source of the job specification in the remote repository when the job is source controlled.", + "properties": { + "dirty_state": { + "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.\n" + }, + "import_from_git_branch": { + "description": "Name of the branch which the job is imported from." + }, + "job_config_path": { + "description": "Path of the job YAML file that contains the job specification." + } + } + } + } + }, + "health": { + "description": "", + "properties": { + "rules": { + "description": "", + "items": { + "description": "", + "properties": { + "metric": { + "description": "" + }, + "op": { + "description": "" + }, + "value": { + "description": "Specifies the threshold value that the health metric should obey to satisfy the health rule." + } + } + } + } + } + }, + "job_clusters": { + "description": "A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.", + "items": { + "description": "", + "properties": { + "job_cluster_key": { + "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." + }, + "new_cluster": { + "description": "If new_cluster, a description of a cluster that is created for each task.", + "properties": { + "apply_policy_default_values": { + "description": "" + }, + "autoscale": { + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "properties": { + "max_workers": { + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + }, + "min_workers": { + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + } + } + }, + "autotermination_minutes": { + "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination." + }, + "aws_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "ebs_volume_count": { + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." + }, + "ebs_volume_iops": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_size": { + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." + }, + "ebs_volume_throughput": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_type": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "instance_profile_arn": { + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." + }, + "spot_bid_price_percent": { + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." + }, + "zone_id": { + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nThe list of available zones as well as the default value can be found by using the\n`List Zones` method." + } + } + }, + "azure_attributes": { + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "log_analytics_info": { + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "properties": { + "log_analytics_primary_key": { + "description": "\u003cneeds content added\u003e" + }, + "log_analytics_workspace_id": { + "description": "\u003cneeds content added\u003e" + } + } + }, + "spot_bid_max_price": { + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." + } + } + }, + "cluster_log_conf": { + "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "s3": { + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + } + } + }, + "cluster_name": { + "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" + }, + "cluster_source": { + "description": "" + }, + "custom_tags": { + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "additionalproperties": { + "description": "" + } + }, + "data_security_mode": { + "description": "" + }, + "docker_image": { + "description": "", + "properties": { + "basic_auth": { + "description": "", + "properties": { + "password": { + "description": "Password of the user" + }, + "username": { + "description": "Name of the user" + } + } + }, + "url": { + "description": "URL of the docker image." + } + } + }, + "driver_instance_pool_id": { + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." + }, + "driver_node_type_id": { + "description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n" + }, + "enable_elastic_disk": { + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details." + }, + "enable_local_disk_encryption": { + "description": "Whether to enable LUKS on cluster VMs' local disks" + }, + "gcp_attributes": { + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "boot_disk_size": { + "description": "boot disk size in GB" + }, + "google_service_account": { + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." + }, + "local_ssd_count": { + "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + } + } + }, + "init_scripts": { + "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "items": { + "description": "", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "file": { + "description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`", + "properties": { + "destination": { + "description": "local file destination, e.g. `file:/my/local/file.sh`" + } + } + }, + "s3": { + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + }, + "volumes": { + "description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`" + } + } + }, + "workspace": { + "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`" + } + } + } + } + } + }, + "instance_pool_id": { + "description": "The optional ID of the instance pool to which the cluster belongs." + }, + "node_type_id": { + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" + }, + "num_workers": { + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." + }, + "policy_id": { + "description": "The ID of the cluster policy used to create the cluster if applicable." + }, + "runtime_engine": { + "description": "" + }, + "single_user_name": { + "description": "Single user name if data_security_mode is `SINGLE_USER`" + }, + "spark_conf": { + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n", + "additionalproperties": { + "description": "" + } + }, + "spark_env_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "additionalproperties": { + "description": "" + } + }, + "spark_version": { + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n" + }, + "ssh_public_keys": { + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "items": { + "description": "" + } + }, + "workload_type": { + "description": "", + "properties": { + "clients": { + "description": " defined what type of clients can use the cluster. E.g. Notebooks, Jobs", + "properties": { + "jobs": { + "description": "With jobs set, the cluster can be used for jobs" + }, + "notebooks": { + "description": "With notebooks set, this cluster can be used for notebooks" + } + } + } + } + } + } + } + } + } + }, + "max_concurrent_runs": { + "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." + }, + "name": { + "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding." + }, + "notification_settings": { + "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this job.", + "properties": { + "no_alert_for_canceled_runs": { + "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is canceled." + }, + "no_alert_for_skipped_runs": { + "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is skipped." + } + } + }, + "parameters": { + "description": "Job-level parameter definitions", + "items": { + "description": "", + "properties": { + "default": { + "description": "Default value of the parameter." + }, + "name": { + "description": "The name of the defined parameter. May only contain alphanumeric characters, `_`, `-`, and `.`" + } + } + } + }, + "permissions": { + "description": "", + "items": { + "description": "", + "properties": { + "group_name": { + "description": "" + }, + "level": { + "description": "" + }, + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + } + }, + "queue": { + "description": "The queue settings of the job.", + "properties": { + "enabled": { + "description": "If true, enable queueing for the job. This is a required field." + } + } + }, + "run_as": { + "description": "", + "properties": { + "service_principal_name": { + "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role." + }, + "user_name": { + "description": "The email of an active workspace user. Non-admin users can only set this field to their own email." + } + } + }, + "schedule": { + "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "properties": { + "pause_status": { + "description": "Indicate whether this schedule is paused or not." + }, + "quartz_cron_expression": { + "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" + }, + "timezone_id": { + "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n" + } + } + }, + "tags": { + "description": "A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job.", + "additionalproperties": { + "description": "" + } + }, + "tasks": { + "description": "A list of task specifications to be executed by this job.", + "items": { + "description": "", + "properties": { + "compute_key": { + "description": "The key of the compute requirement, specified in `job.settings.compute`, to use for execution of this task." + }, + "condition_task": { + "description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.", + "properties": { + "left": { + "description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference." + }, + "op": { + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.\n" + }, + "right": { + "description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference." + } + } + }, + "dbt_task": { + "description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", + "properties": { + "catalog": { + "description": "Optional name of the catalog to use. The value is the top level in the 3-level namespace of Unity Catalog (catalog / schema / relation). The catalog value can only be specified if a warehouse_id is specified. Requires dbt-databricks \u003e= 1.1.1." + }, + "commands": { + "description": "A list of dbt commands to execute. All commands must start with `dbt`. This parameter must not be empty. A maximum of up to 10 commands can be provided.", + "items": { + "description": "" + } + }, + "profiles_directory": { + "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." + }, + "project_directory": { + "description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used." + }, + "schema": { + "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." + }, + "warehouse_id": { + "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." + } + } + }, + "depends_on": { + "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n", + "items": { + "description": "", + "properties": { + "outcome": { + "description": "Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run." + }, + "task_key": { + "description": "The name of the task this task depends on." + } + } + } + }, + "description": { + "description": "An optional description for this task." + }, + "email_notifications": { + "description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.", + "properties": { + "on_duration_warning_threshold_exceeded": { + "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", + "items": { + "description": "" + } + }, + "on_failure": { + "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_start": { + "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_success": { + "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + } + } + }, + "existing_cluster_id": { + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." + }, + "health": { + "description": "", + "properties": { + "rules": { + "description": "", + "items": { + "description": "", + "properties": { + "metric": { + "description": "" + }, + "op": { + "description": "" + }, + "value": { + "description": "Specifies the threshold value that the health metric should obey to satisfy the health rule." + } + } + } + } + } + }, + "job_cluster_key": { + "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`." + }, + "libraries": { + "description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.", + "items": { + "description": "", + "properties": { + "cran": { + "description": "Specification of a CRAN library to be installed as part of the library", + "properties": { + "package": { + "description": "The name of the CRAN package to install." + }, + "repo": { + "description": "The repository where the package can be found. If not specified, the default CRAN repo is used." + } + } + }, + "egg": { + "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + }, + "jar": { + "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + }, + "maven": { + "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", + "properties": { + "coordinates": { + "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\"." + }, + "exclusions": { + "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", + "items": { + "description": "" + } + }, + "repo": { + "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched." + } + } + }, + "pypi": { + "description": "Specification of a PyPi library to be installed. For example:\n`{ \"package\": \"simplejson\" }`", + "properties": { + "package": { + "description": "The name of the pypi package to install. An optional exact version specification is also\nsupported. Examples: \"simplejson\" and \"simplejson==3.8.0\"." + }, + "repo": { + "description": "The repository where the package can be found. If not specified, the default pip index is\nused." + } + } + }, + "whl": { + "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + } + } + } + }, + "max_retries": { + "description": "An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means to retry indefinitely and the value `0` means to never retry." + }, + "min_retry_interval_millis": { + "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." + }, + "new_cluster": { + "description": "If new_cluster, a description of a cluster that is created for each task.", + "properties": { + "apply_policy_default_values": { + "description": "" + }, + "autoscale": { + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "properties": { + "max_workers": { + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + }, + "min_workers": { + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + } + } + }, + "autotermination_minutes": { + "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination." + }, + "aws_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "ebs_volume_count": { + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." + }, + "ebs_volume_iops": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_size": { + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." + }, + "ebs_volume_throughput": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_type": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "instance_profile_arn": { + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." + }, + "spot_bid_price_percent": { + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." + }, + "zone_id": { + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nThe list of available zones as well as the default value can be found by using the\n`List Zones` method." + } + } + }, + "azure_attributes": { + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "log_analytics_info": { + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "properties": { + "log_analytics_primary_key": { + "description": "\u003cneeds content added\u003e" + }, + "log_analytics_workspace_id": { + "description": "\u003cneeds content added\u003e" + } + } + }, + "spot_bid_max_price": { + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." + } + } + }, + "cluster_log_conf": { + "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "s3": { + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + } + } + }, + "cluster_name": { + "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" + }, + "cluster_source": { + "description": "" + }, + "custom_tags": { + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "additionalproperties": { + "description": "" + } + }, + "data_security_mode": { + "description": "" + }, + "docker_image": { + "description": "", + "properties": { + "basic_auth": { + "description": "", + "properties": { + "password": { + "description": "Password of the user" + }, + "username": { + "description": "Name of the user" + } + } + }, + "url": { + "description": "URL of the docker image." + } + } + }, + "driver_instance_pool_id": { + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." + }, + "driver_node_type_id": { + "description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n" + }, + "enable_elastic_disk": { + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details." + }, + "enable_local_disk_encryption": { + "description": "Whether to enable LUKS on cluster VMs' local disks" + }, + "gcp_attributes": { + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "boot_disk_size": { + "description": "boot disk size in GB" + }, + "google_service_account": { + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." + }, + "local_ssd_count": { + "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + } + } + }, + "init_scripts": { + "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "items": { + "description": "", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "file": { + "description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`", + "properties": { + "destination": { + "description": "local file destination, e.g. `file:/my/local/file.sh`" + } + } + }, + "s3": { + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + }, + "volumes": { + "description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`" + } + } + }, + "workspace": { + "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`" + } + } + } + } + } + }, + "instance_pool_id": { + "description": "The optional ID of the instance pool to which the cluster belongs." + }, + "node_type_id": { + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" + }, + "num_workers": { + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." + }, + "policy_id": { + "description": "The ID of the cluster policy used to create the cluster if applicable." + }, + "runtime_engine": { + "description": "" + }, + "single_user_name": { + "description": "Single user name if data_security_mode is `SINGLE_USER`" + }, + "spark_conf": { + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n", + "additionalproperties": { + "description": "" + } + }, + "spark_env_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "additionalproperties": { + "description": "" + } + }, + "spark_version": { + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n" + }, + "ssh_public_keys": { + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "items": { + "description": "" + } + }, + "workload_type": { + "description": "", + "properties": { + "clients": { + "description": " defined what type of clients can use the cluster. E.g. Notebooks, Jobs", + "properties": { + "jobs": { + "description": "With jobs set, the cluster can be used for jobs" + }, + "notebooks": { + "description": "With notebooks set, this cluster can be used for notebooks" + } + } + } + } + } + } + }, + "notebook_task": { + "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", + "properties": { + "base_parameters": { + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", + "additionalproperties": { + "description": "" + } + }, + "notebook_path": { + "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" + }, + "source": { + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + } + } + }, + "notification_settings": { + "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this task.", + "properties": { + "alert_on_last_attempt": { + "description": "If true, do not send notifications to recipients specified in `on_start` for the retried runs and do not send notifications to recipients specified in `on_failure` until the last retry of the run." + }, + "no_alert_for_canceled_runs": { + "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is canceled." + }, + "no_alert_for_skipped_runs": { + "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is skipped." + } + } + }, + "pipeline_task": { + "description": "If pipeline_task, indicates that this task must execute a Pipeline.", + "properties": { + "full_refresh": { + "description": "If true, a full refresh will be triggered on the delta live table." + }, + "pipeline_id": { + "description": "The full name of the pipeline task to execute." + } + } + }, + "python_wheel_task": { + "description": "If python_wheel_task, indicates that this job must execute a PythonWheel.", + "properties": { + "entry_point": { + "description": "Named entry point to use, if it does not exist in the metadata of the package it executes the function from the package directly using `$packageName.$entryPoint()`" + }, + "named_parameters": { + "description": "Command-line parameters passed to Python wheel task in the form of `[\"--name=task\", \"--data=dbfs:/path/to/data.json\"]`. Leave it empty if `parameters` is not null.", + "additionalproperties": { + "description": "" + } + }, + "package_name": { + "description": "Name of the package to execute" + }, + "parameters": { + "description": "Command-line parameters passed to Python wheel task. Leave it empty if `named_parameters` is not null.", + "items": { + "description": "" + } + } + } + }, + "retry_on_timeout": { + "description": "An optional policy to specify whether to retry a task when it times out." + }, + "run_if": { + "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n" + }, + "run_job_task": { + "description": "If run_job_task, indicates that this task must execute another job.", + "properties": { + "job_id": { + "description": "ID of the job to trigger." + }, + "job_parameters": { + "description": "" + } + } + }, + "spark_jar_task": { + "description": "If spark_jar_task, indicates that this task must run a JAR.", + "properties": { + "jar_uri": { + "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n" + }, + "main_class_name": { + "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." + }, + "parameters": { + "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "items": { + "description": "" + } + } + } + }, + "spark_python_task": { + "description": "If spark_python_task, indicates that this task must run a Python file.", + "properties": { + "parameters": { + "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "items": { + "description": "" + } + }, + "python_file": { + "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." + }, + "source": { + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + } + } + }, + "spark_submit_task": { + "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n", + "properties": { + "parameters": { + "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "items": { + "description": "" + } + } + } + }, + "sql_task": { + "description": "If sql_task, indicates that this job must execute a SQL task.", + "properties": { + "alert": { + "description": "If alert, indicates that this job must refresh a SQL alert.", + "properties": { + "alert_id": { + "description": "The canonical identifier of the SQL alert." + }, + "pause_subscriptions": { + "description": "If true, the alert notifications are not sent to subscribers." + }, + "subscriptions": { + "description": "If specified, alert notifications are sent to subscribers.", + "items": { + "description": "", + "properties": { + "destination_id": { + "description": "The canonical identifier of the destination to receive email notification. This parameter is mutually exclusive with user_name. You cannot set both destination_id and user_name for subscription notifications." + }, + "user_name": { + "description": "The user name to receive the subscription email. This parameter is mutually exclusive with destination_id. You cannot set both destination_id and user_name for subscription notifications." + } + } + } + } + } + }, + "dashboard": { + "description": "If dashboard, indicates that this job must refresh a SQL dashboard.", + "properties": { + "custom_subject": { + "description": "Subject of the email sent to subscribers of this task." + }, + "dashboard_id": { + "description": "The canonical identifier of the SQL dashboard." + }, + "pause_subscriptions": { + "description": "If true, the dashboard snapshot is not taken, and emails are not sent to subscribers." + }, + "subscriptions": { + "description": "If specified, dashboard snapshots are sent to subscriptions.", + "items": { + "description": "", + "properties": { + "destination_id": { + "description": "The canonical identifier of the destination to receive email notification. This parameter is mutually exclusive with user_name. You cannot set both destination_id and user_name for subscription notifications." + }, + "user_name": { + "description": "The user name to receive the subscription email. This parameter is mutually exclusive with destination_id. You cannot set both destination_id and user_name for subscription notifications." + } + } + } + } + } + }, + "file": { + "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", + "properties": { + "path": { + "description": "Relative path of the SQL file in the remote Git repository." + } + } + }, + "parameters": { + "description": "Parameters to be used for each run of this job. The SQL alert task does not support custom parameters.", + "additionalproperties": { + "description": "" + } + }, + "query": { + "description": "If query, indicates that this job must execute a SQL query.", + "properties": { + "query_id": { + "description": "The canonical identifier of the SQL query." + } + } + }, + "warehouse_id": { + "description": "The canonical identifier of the SQL warehouse. Recommended to use with serverless or pro SQL warehouses. Classic SQL warehouses are only supported for SQL alert, dashboard and query tasks and are limited to scheduled single-task jobs." + } + } + }, + "task_key": { + "description": "A unique name for the task. This field is used to refer to this task from other tasks.\nThis field is required and must be unique within its parent job.\nOn Update or Reset, this field is used to reference the tasks to be updated or reset." + }, + "timeout_seconds": { + "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." + }, + "webhook_notifications": { + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "properties": { + "on_duration_warning_threshold_exceeded": { + "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_failure": { + "description": "An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_start": { + "description": "An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_success": { + "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + } + } + } + } + } + }, + "timeout_seconds": { + "description": "An optional timeout applied to each run of this job. A value of `0` means no timeout." + }, + "trigger": { + "description": "Trigger settings for the job. Can be used to trigger a run when new files arrive in an external location. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "properties": { + "file_arrival": { + "description": "File arrival trigger settings.", + "properties": { + "min_time_between_triggers_seconds": { + "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n" + }, + "url": { + "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location." + }, + "wait_after_last_change_seconds": { + "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n" + } + } + }, + "pause_status": { + "description": "Indicate whether this schedule is paused or not." + } + } + }, + "webhook_notifications": { + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "properties": { + "on_duration_warning_threshold_exceeded": { + "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_failure": { + "description": "An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_start": { + "description": "An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_success": { + "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + } + } + } + } + } + }, + "model_serving_endpoints": { + "description": "List of Model Serving Endpoints", + "additionalproperties": { + "description": "", + "properties": { + "config": { + "description": "The core config of the serving endpoint.", + "properties": { + "served_models": { + "description": "A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", + "items": { + "description": "", + "properties": { + "environment_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this model.\nNote: this is an experimental feature and subject to change. \nExample model environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`", + "additionalproperties": { + "description": "" + } + }, + "instance_profile_arn": { + "description": "ARN of the instance profile that the served model will use to access AWS resources." + }, + "model_name": { + "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, \nin the form of __catalog_name__.__schema_name__.__model_name__.\n" + }, + "model_version": { + "description": "The version of the model in Databricks Model Registry or Unity Catalog to be served." + }, + "name": { + "description": "The name of a served model. It must be unique across an endpoint. If not specified, this field will default to \u003cmodel-name\u003e-\u003cmodel-version\u003e.\nA served model name can consist of alphanumeric characters, dashes, and underscores.\n" + }, + "scale_to_zero_enabled": { + "description": "Whether the compute resources for the served model should scale down to zero." + }, + "workload_size": { + "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n" + }, + "workload_type": { + "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See documentation for all\noptions.\n" + } + } + } + }, + "traffic_config": { + "description": "The traffic config defining how invocations to the serving endpoint should be routed.", + "properties": { + "routes": { + "description": "The list of routes that define traffic to each served model.", + "items": { + "description": "", + "properties": { + "served_model_name": { + "description": "The name of the served model this route configures traffic for." + }, + "traffic_percentage": { + "description": "The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive." + } + } + } + } + } + } + } + }, + "name": { + "description": "The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.\nAn endpoint name can consist of alphanumeric characters, dashes, and underscores.\n" + }, + "permissions": { + "description": "", + "items": { + "description": "", + "properties": { + "group_name": { + "description": "" + }, + "level": { + "description": "" + }, + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + } + }, + "tags": { + "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", + "items": { + "description": "", + "properties": { + "key": { + "description": "Key field for a serving endpoint tag." + }, + "value": { + "description": "Optional value field for a serving endpoint tag." + } + } + } + } + } + } + }, + "models": { + "description": "List of MLflow models", + "additionalproperties": { + "description": "", + "properties": { + "creation_timestamp": { + "description": "Timestamp recorded when this `registered_model` was created." + }, + "description": { + "description": "Description of this `registered_model`." + }, + "last_updated_timestamp": { + "description": "Timestamp recorded when metadata for this `registered_model` was last updated." + }, + "latest_versions": { + "description": "Collection of latest model versions for each stage.\nOnly contains models with current `READY` status.", + "items": { + "description": "", + "properties": { + "creation_timestamp": { + "description": "Timestamp recorded when this `model_version` was created." + }, + "current_stage": { + "description": "Current stage for this `model_version`." + }, + "description": { + "description": "Description of this `model_version`." + }, + "last_updated_timestamp": { + "description": "Timestamp recorded when metadata for this `model_version` was last updated." + }, + "name": { + "description": "Unique name of the model" + }, + "run_id": { + "description": "MLflow run ID used when creating `model_version`, if `source` was generated by an\nexperiment run stored in MLflow tracking server." + }, + "run_link": { + "description": "Run Link: Direct link to the run that generated this version" + }, + "source": { + "description": "URI indicating the location of the source model artifacts, used when creating `model_version`" + }, + "status": { + "description": "Current status of `model_version`" + }, + "status_message": { + "description": "Details on current `status`, if it is pending or failed." + }, + "tags": { + "description": "Tags: Additional metadata key-value pairs for this `model_version`.", + "items": { + "description": "", + "properties": { + "key": { + "description": "The tag key." + }, + "value": { + "description": "The tag value." + } + } + } + }, + "user_id": { + "description": "User that created this `model_version`." + }, + "version": { + "description": "Model's version number." + } + } + } + }, + "name": { + "description": "Unique name for the model." + }, + "permissions": { + "description": "", + "items": { + "description": "", + "properties": { + "group_name": { + "description": "" + }, + "level": { + "description": "" + }, + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + } + }, + "tags": { + "description": "Tags: Additional metadata key-value pairs for this `registered_model`.", + "items": { + "description": "", + "properties": { + "key": { + "description": "The tag key." + }, + "value": { + "description": "The tag value." + } + } + } + }, + "user_id": { + "description": "User that created this `registered_model`" + } + } + } + }, + "pipelines": { + "description": "List of DLT pipelines", + "additionalproperties": { + "description": "", + "properties": { + "catalog": { + "description": "A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog." + }, + "channel": { + "description": "DLT Release Channel that specifies which version to use." + }, + "clusters": { + "description": "Cluster settings for this pipeline deployment.", + "items": { + "description": "", + "properties": { + "apply_policy_default_values": { + "description": "Note: This field won't be persisted. Only API users will check this field." + }, + "autoscale": { + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "properties": { + "max_workers": { + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + }, + "min_workers": { + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + } + } + }, + "aws_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "ebs_volume_count": { + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." + }, + "ebs_volume_iops": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_size": { + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." + }, + "ebs_volume_throughput": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_type": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "instance_profile_arn": { + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." + }, + "spot_bid_price_percent": { + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." + }, + "zone_id": { + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nThe list of available zones as well as the default value can be found by using the\n`List Zones` method." + } + } + }, + "azure_attributes": { + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "log_analytics_info": { + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "properties": { + "log_analytics_primary_key": { + "description": "\u003cneeds content added\u003e" + }, + "log_analytics_workspace_id": { + "description": "\u003cneeds content added\u003e" + } + } + }, + "spot_bid_max_price": { + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." + } + } + }, + "cluster_log_conf": { + "description": "The configuration for delivering spark logs to a long-term storage destination.\nOnly dbfs destinations are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.\n", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "s3": { + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + } + } + }, + "custom_tags": { + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "additionalproperties": { + "description": "" + } + }, + "driver_instance_pool_id": { + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." + }, + "driver_node_type_id": { + "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above." + }, + "gcp_attributes": { + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "boot_disk_size": { + "description": "boot disk size in GB" + }, + "google_service_account": { + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." + }, + "local_ssd_count": { + "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + } + } + }, + "instance_pool_id": { + "description": "The optional ID of the instance pool to which the cluster belongs." + }, + "label": { + "description": "A label for the cluster specification, either `default` to configure the default cluster, or `maintenance` to configure the maintenance cluster. This field is optional. The default value is `default`." + }, + "node_type_id": { + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" + }, + "num_workers": { + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." + }, + "policy_id": { + "description": "The ID of the cluster policy used to create the cluster if applicable." + }, + "spark_conf": { + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nSee :method:clusters/create for more details.\n", + "additionalproperties": { + "description": "" + } + }, + "spark_env_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "additionalproperties": { + "description": "" + } + }, + "ssh_public_keys": { + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "items": { + "description": "" + } + } + } + } + }, + "configuration": { + "description": "String-String configuration for this pipeline execution.", + "additionalproperties": { + "description": "" + } + }, + "continuous": { + "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." + }, + "development": { + "description": "Whether the pipeline is in Development mode. Defaults to false." + }, + "edition": { + "description": "Pipeline product edition." + }, + "filters": { + "description": "Filters on which Pipeline packages to include in the deployed graph.", + "properties": { + "exclude": { + "description": "Paths to exclude.", + "items": { + "description": "" + } + }, + "include": { + "description": "Paths to include.", + "items": { + "description": "" + } + } + } + }, + "id": { + "description": "Unique identifier for this pipeline." + }, + "libraries": { + "description": "Libraries or code needed by this deployment.", + "items": { + "description": "", + "properties": { + "file": { + "description": "The path to a file that defines a pipeline and is stored in the Databricks Repos.\n", + "properties": { + "path": { + "description": "The absolute path of the file." + } + } + }, + "jar": { + "description": "URI of the jar to be installed. Currently only DBFS is supported.\n" + }, + "maven": { + "description": "Specification of a maven library to be installed.\n", + "properties": { + "coordinates": { + "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\"." + }, + "exclusions": { + "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", + "items": { + "description": "" + } + }, + "repo": { + "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched." + } + } + }, + "notebook": { + "description": "The path to a notebook that defines a pipeline and is stored in the \u003cDatabricks\u003e workspace.\n", + "properties": { + "path": { + "description": "The absolute path of the notebook." + } + } + } + } + } + }, + "name": { + "description": "Friendly identifier for this pipeline." + }, + "notifications": { + "description": "List of notification settings for this pipeline.", + "items": { + "description": "", + "properties": { + "alerts": { + "description": "A list of alerts that trigger the sending of notifications to the configured\ndestinations. The supported alerts are:\n\n* `on-update-success`: A pipeline update completes successfully.\n* `on-update-failure`: Each time a pipeline update fails.\n* `on-update-fatal-failure`: A pipeline update fails with a non-retryable (fatal) error.\n* `on-flow-failure`: A single data flow fails.\n", + "items": { + "description": "" + } + }, + "email_recipients": { + "description": "A list of email addresses notified when a configured alert is triggered.\n", + "items": { + "description": "" + } + } + } + } + }, + "permissions": { + "description": "", + "items": { + "description": "", + "properties": { + "group_name": { + "description": "" + }, + "level": { + "description": "" + }, + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + } + }, + "photon": { + "description": "Whether Photon is enabled for this pipeline." + }, + "serverless": { + "description": "Whether serverless compute is enabled for this pipeline." + }, + "storage": { + "description": "DBFS root directory for storing checkpoints and tables." + }, + "target": { + "description": "Target schema (database) to add tables in this pipeline to. If not specified, no data is published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`." + }, + "trigger": { + "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", + "properties": { + "cron": { + "description": "", + "properties": { + "quartz_cron_schedule": { + "description": "" + }, + "timezone_id": { + "description": "" + } + } + }, + "manual": { + "description": "" + } + } + } + } + } + }, + "registered_models": { + "description": "List of Registered Models", + "additionalproperties": { + "description": "", + "properties": { + "catalog_name": { + "description": "The name of the catalog where the schema and the registered model reside" + }, + "comment": { + "description": "The comment attached to the registered model" + }, + "grants": { + "description": "", + "items": { + "description": "", + "properties": { + "principal": { + "description": "" + }, + "privileges": { + "description": "", + "items": { + "description": "" + } + } + } + } + }, + "name": { + "description": "The name of the registered model" + }, + "schema_name": { + "description": "The name of the schema where the registered model resides" + }, + "storage_location": { + "description": "The storage location on the cloud under which model version data files are stored" + } + } + } + } + } + }, + "run_as": { + "description": "", + "properties": { + "service_principal_name": { + "description": "" + }, + "user_name": { + "description": "" + } + } + }, + "sync": { + "description": "", + "properties": { + "exclude": { + "description": "", + "items": { + "description": "" + } + }, + "include": { + "description": "", + "items": { + "description": "" + } + } + } + }, + "variables": { + "description": "", + "additionalproperties": { + "description": "" + } + }, + "workspace": { + "description": "Configures which workspace to connect to and locations for files, state, and similar locations within the workspace file tree.", + "properties": { + "artifact_path": { + "description": "The remote path to synchronize build artifacts to. This defaults to `${workspace.root}/artifacts`" + }, + "auth_type": { + "description": "" + }, + "azure_client_id": { + "description": "" + }, + "azure_environment": { + "description": "Azure environment, one of (Public, UsGov, China, Germany)." + }, + "azure_login_app_id": { + "description": "Azure Login Application ID." + }, + "azure_tenant_id": { + "description": "" + }, + "azure_use_msi": { + "description": "" + }, + "azure_workspace_resource_id": { + "description": "Azure Resource Manager ID for Azure Databricks workspace." + }, + "client_id": { + "description": "" + }, + "file_path": { + "description": "The remote path to synchronize local files artifacts to. This defaults to `${workspace.root}/files`" + }, + "google_service_account": { + "description": "" + }, + "host": { + "description": "Host url of the workspace." + }, + "profile": { + "description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg." + }, + "root_path": { + "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`" + }, + "state_path": { + "description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`" + } + } + } } } }, @@ -3640,6 +4445,9 @@ "artifact_path": { "description": "The remote path to synchronize build artifacts to. This defaults to `${workspace.root}/artifacts`" }, + "auth_type": { + "description": "" + }, "azure_client_id": { "description": "" }, @@ -3658,6 +4466,9 @@ "azure_workspace_resource_id": { "description": "Azure Resource Manager ID for Azure Databricks workspace." }, + "client_id": { + "description": "" + }, "file_path": { "description": "The remote path to synchronize local files artifacts to. This defaults to `${workspace.root}/files`" }, @@ -3679,4 +4490,4 @@ } } } -} +} \ No newline at end of file diff --git a/bundle/schema/schema.go b/bundle/schema/schema.go index 00dd2719..8b5c36d1 100644 --- a/bundle/schema/schema.go +++ b/bundle/schema/schema.go @@ -17,6 +17,10 @@ const readonlyTag = "readonly" // Fields can be tagged as "internal" to remove them from the generated schema. const internalTag = "internal" +// Annotation for bundle fields that have been deprecated. +// Fields tagged as "deprecated" are removed/omitted from the generated schema. +const deprecatedTag = "deprecated" + // This function translates golang types into json schema. Here is the mapping // between json schema types and golang types // @@ -205,7 +209,9 @@ func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*jsonschem required := []string{} for _, child := range children { bundleTag := child.Tag.Get("bundle") - if bundleTag == readonlyTag || bundleTag == internalTag { + // Fields marked as "readonly", "internal" or "deprecated" are skipped + // while generating the schema + if bundleTag == readonlyTag || bundleTag == internalTag || bundleTag == deprecatedTag { continue } diff --git a/cmd/bundle/schema.go b/cmd/bundle/schema.go index ec817037..f516695c 100644 --- a/cmd/bundle/schema.go +++ b/cmd/bundle/schema.go @@ -2,7 +2,6 @@ package bundle import ( "encoding/json" - "os" "reflect" "github.com/databricks/cli/bundle/config" @@ -16,47 +15,24 @@ func newSchemaCommand() *cobra.Command { Short: "Generate JSON Schema for bundle configuration", } - var openapi string - var outputFile string - var onlyDocs bool - cmd.Flags().StringVar(&openapi, "openapi", "", "path to a databricks openapi spec") - cmd.Flags().BoolVar(&onlyDocs, "only-docs", false, "only generate descriptions for the schema") - cmd.Flags().StringVar(&outputFile, "output-file", "", "File path to write the schema to. If not specified, the schema will be written to stdout.") - cmd.RunE = func(cmd *cobra.Command, args []string) error { - // If no openapi spec is provided, try to use the environment variable. - // This environment variable is set during CLI code generation. - if openapi == "" { - openapi = os.Getenv("DATABRICKS_OPENAPI_SPEC") - } - docs, err := schema.BundleDocs(openapi) + // Load embedded schema descriptions. + docs, err := schema.LoadBundleDescriptions() if err != nil { return err } + + // Generate the JSON schema from the bundle configuration struct in Go. schema, err := schema.New(reflect.TypeOf(config.Root{}), docs) if err != nil { return err } + + // Print the JSON schema to stdout. result, err := json.MarshalIndent(schema, "", " ") if err != nil { return err } - if onlyDocs { - result, err = json.MarshalIndent(docs, "", " ") - if err != nil { - return err - } - } - - // If outputFile is provided, write to that file. - if outputFile != "" { - f, err := os.OpenFile(outputFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) - if err != nil { - return err - } - defer f.Close() - cmd.SetOut(f) - } cmd.OutOrStdout().Write(result) return nil } From 2d829678a091e4872b9935a95c78da03c71a14f0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 6 Dec 2023 15:37:38 +0100 Subject: [PATCH 280/310] Release v0.210.2 (#1044) CLI: * Add documentation for positional args in commands generated from the Databricks OpenAPI specification ([#1033](https://github.com/databricks/cli/pull/1033)). * Ask for host when .databrickscfg doesn't exist ([#1041](https://github.com/databricks/cli/pull/1041)). * Add list of supported values for flags that represent an enum field ([#1036](https://github.com/databricks/cli/pull/1036)). Bundles: * Fix panic when bundle auth resolution fails ([#1002](https://github.com/databricks/cli/pull/1002)). * Add versioning for bundle templates ([#972](https://github.com/databricks/cli/pull/972)). * Add support for conditional prompting in bundle init ([#971](https://github.com/databricks/cli/pull/971)). * Pass parameters to task when run with `--python-params` and `python_wheel_wrapper` is true ([#1037](https://github.com/databricks/cli/pull/1037)). * Change default_python template to auto-update version on each wheel build ([#1034](https://github.com/databricks/cli/pull/1034)). Internal: * Rewrite the friendly log handler ([#1038](https://github.com/databricks/cli/pull/1038)). * Move bundle schema update to an internal module ([#1012](https://github.com/databricks/cli/pull/1012)). Dependency updates: * Bump github.com/databricks/databricks-sdk-go from 0.26.0 to 0.26.1 ([#1040](https://github.com/databricks/cli/pull/1040)). --- CHANGELOG.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cc418d1..22d535ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Version changelog +## 0.210.2 + +CLI: + * Add documentation for positional args in commands generated from the Databricks OpenAPI specification ([#1033](https://github.com/databricks/cli/pull/1033)). + * Ask for host when .databrickscfg doesn't exist ([#1041](https://github.com/databricks/cli/pull/1041)). + * Add list of supported values for flags that represent an enum field ([#1036](https://github.com/databricks/cli/pull/1036)). + +Bundles: + * Fix panic when bundle auth resolution fails ([#1002](https://github.com/databricks/cli/pull/1002)). + * Add versioning for bundle templates ([#972](https://github.com/databricks/cli/pull/972)). + * Add support for conditional prompting in bundle init ([#971](https://github.com/databricks/cli/pull/971)). + * Pass parameters to task when run with `--python-params` and `python_wheel_wrapper` is true ([#1037](https://github.com/databricks/cli/pull/1037)). + * Change default_python template to auto-update version on each wheel build ([#1034](https://github.com/databricks/cli/pull/1034)). + +Internal: + * Rewrite the friendly log handler ([#1038](https://github.com/databricks/cli/pull/1038)). + * Move bundle schema update to an internal module ([#1012](https://github.com/databricks/cli/pull/1012)). + + +Dependency updates: + * Bump github.com/databricks/databricks-sdk-go from 0.26.0 to 0.26.1 ([#1040](https://github.com/databricks/cli/pull/1040)). + ## 0.210.1 This is a bugfix release to address issues with v0.210.0. From 42c06267eb64b2ccbe55985f4a426335e404cd3b Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Mon, 11 Dec 2023 08:30:19 -0800 Subject: [PATCH 281/310] Stub out Python virtual environment installation for `labs` commands (#1057) This PR removes 15 seconds from `make test` runtime --- cmd/labs/project/installer_test.go | 10 +++++++++- libs/process/stub.go | 22 +++++++++++++++++++--- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index 60af43c6..709e14f2 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -21,6 +21,7 @@ import ( "github.com/databricks/cli/cmd/labs/project" "github.com/databricks/cli/internal" "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/process" "github.com/databricks/cli/libs/python" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/iam" @@ -194,6 +195,13 @@ func TestInstallerWorksForReleases(t *testing.T) { ctx := installerContext(t, server) + ctx, stub := process.WithStub(ctx) + stub.WithStdoutFor(`python[\S]+ --version`, "Python 3.10.5") + // on Unix, we call `python3`, but on Windows it is `python.exe` + stub.WithStderrFor(`python[\S]+ -m venv .*/.databricks/labs/blueprint/state/venv`, "[mock venv create]") + stub.WithStderrFor(`python[\S]+ -m pip install .`, "[mock pip install]") + stub.WithStdoutFor(`python[\S]+ install.py`, "setting up important infrastructure") + // simulate the case of GitHub Actions ctx = env.Set(ctx, "DATABRICKS_HOST", server.URL) ctx = env.Set(ctx, "DATABRICKS_TOKEN", "...") @@ -228,7 +236,7 @@ func TestInstallerWorksForReleases(t *testing.T) { // │ │ │ └── site-packages // │ │ │ ├── ... // │ │ │ ├── distutils-precedence.pth - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", "blueprint") + r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", "blueprint", "--debug") r.RunAndExpectOutput("setting up important infrastructure") } diff --git a/libs/process/stub.go b/libs/process/stub.go index 280a9a8a..8472f65d 100644 --- a/libs/process/stub.go +++ b/libs/process/stub.go @@ -2,9 +2,11 @@ package process import ( "context" + "errors" "fmt" "os/exec" "path/filepath" + "regexp" "strings" ) @@ -128,14 +130,24 @@ func (s *processStub) normCmd(v *exec.Cmd) string { // "/var/folders/bc/7qf8yghj6v14t40096pdcqy40000gp/T/tmp.03CAcYcbOI/python3" becomes "python3". // Use [processStub.WithCallback] if you need to match against the full executable path. binaryName := filepath.Base(v.Path) - args := strings.Join(v.Args[1:], " ") + var unixArgs []string + for _, arg := range v.Args[1:] { + unixArgs = append(unixArgs, filepath.ToSlash(arg)) + } + args := strings.Join(unixArgs, " ") return fmt.Sprintf("%s %s", binaryName, args) } +var ErrStubContinue = errors.New("continue executing the stub after callback") + func (s *processStub) run(cmd *exec.Cmd) error { s.calls = append(s.calls, cmd) - resp, ok := s.responses[s.normCmd(cmd)] - if ok { + for pattern, resp := range s.responses { + re := regexp.MustCompile(pattern) + norm := s.normCmd(cmd) + if !re.MatchString(norm) { + continue + } if resp.stdout != "" { cmd.Stdout.Write([]byte(resp.stdout)) } @@ -147,6 +159,10 @@ func (s *processStub) run(cmd *exec.Cmd) error { if s.callback != nil { return s.callback(cmd) } + var zeroStub reponseStub + if s.reponseStub == zeroStub { + return fmt.Errorf("no default process stub") + } if s.reponseStub.stdout != "" { cmd.Stdout.Write([]byte(s.reponseStub.stdout)) } From 8b9930a49a0ce64973c8bcfecdfe306d2e9d5170 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 11 Dec 2023 20:13:14 +0100 Subject: [PATCH 282/310] Improve default template (#1046) ## Changes - Tweak strings, documentation in template - Extend requirements-dev.txt with setuptools/wheel for building whl files - Clarify what the "_job.yml" file is for for users who are only interested in DLT pipelines (answering a question that came up recently) ## Tests Existing tests exercise this template --- .../databricks_template_schema.json | 6 +-- .../template/{{.project_name}}/.gitignore | 1 - .../template/{{.project_name}}/README.md.tmpl | 2 +- .../{{.project_name}}/databricks.yml.tmpl | 38 +++++++------------ .../requirements-dev.txt.tmpl | 4 ++ .../resources/{{.project_name}}_job.yml.tmpl | 9 ++++- 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/libs/template/templates/default-python/databricks_template_schema.json b/libs/template/templates/default-python/databricks_template_schema.json index 8d5afb57..d53bad91 100644 --- a/libs/template/templates/default-python/databricks_template_schema.json +++ b/libs/template/templates/default-python/databricks_template_schema.json @@ -1,10 +1,10 @@ { - "welcome_message": "\nWelcome to the sample Databricks Asset Bundle template! Please enter the following information to initialize your sample DAB.\n", + "welcome_message": "\nWelcome to the default Python template for Databricks Asset Bundles!", "properties": { "project_name": { "type": "string", "default": "my_project", - "description": "Unique name for this project", + "description": "Please provide the following details to tailor the template to your preferences.\n\nUnique name for this project", "order": 1, "pattern": "^[A-Za-z0-9_]+$", "pattern_match_failure_message": "Name must consist of letters, numbers, and underscores." @@ -31,5 +31,5 @@ "order": 4 } }, - "success_message": "\n✨ Your new project has been created in the '{{.project_name}}' directory!\n\nPlease refer to the README.md of your project for further instructions on getting started.\nOr read the documentation on Databricks Asset Bundles at https://docs.databricks.com/dev-tools/bundles/index.html." + "success_message": "Workspace to use (auto-detected, edit in '{{.project_name}}/databricks.yml'): {{workspace_host}}\n\n✨ Your new project has been created in the '{{.project_name}}' directory!\n\nPlease refer to the README.md file for \"getting started\" instructions.\nSee also the documentation at https://docs.databricks.com/dev-tools/bundles/index.html." } diff --git a/libs/template/templates/default-python/template/{{.project_name}}/.gitignore b/libs/template/templates/default-python/template/{{.project_name}}/.gitignore index aa87f019..0dab7f49 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/.gitignore +++ b/libs/template/templates/default-python/template/{{.project_name}}/.gitignore @@ -1,4 +1,3 @@ - .databricks/ build/ dist/ diff --git a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl index b451d03b..476c1cd6 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl @@ -28,7 +28,7 @@ The '{{.project_name}}' project was generated by using the default-python templa $ databricks bundle deploy --target prod ``` -5. To run a job or pipeline, use the "run" comand: +5. To run a job or pipeline, use the "run" command: ``` $ databricks bundle run ``` diff --git a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl index 7fbf4da4..7860b32b 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl @@ -7,46 +7,36 @@ include: - resources/*.yml targets: - # The 'dev' target, used for development purposes. - # Whenever a developer deploys using 'dev', they get their own copy. + # The 'dev' target, for development purposes. This target is the default. dev: - # We use 'mode: development' to make sure everything deployed to this target gets a prefix - # like '[dev my_user_name]'. Setting this mode also disables any schedules and - # automatic triggers for jobs and enables the 'development' mode for Delta Live Tables pipelines. + # We use 'mode: development' to indicate this is a personal development copy: + # - Deployed resources get prefixed with '[dev my_user_name]' + # - Any job schedules and triggers are paused by default + # - The 'development' mode is used for Delta Live Tables pipelines mode: development default: true workspace: host: {{workspace_host}} - # Optionally, there could be a 'staging' target here. - # (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/index.html.) + ## Optionally, there could be a 'staging' target here. + ## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/index.html.) # # staging: - # workspace: - # host: {{workspace_host}} + # workspace: + # host: {{workspace_host}} # The 'prod' target, used for production deployment. prod: - # For production deployments, we only have a single copy, so we override the - # workspace.root_path default of - # /Users/${workspace.current_user.userName}/.bundle/${bundle.target}/${bundle.name} - # to a path that is not specific to the current user. - {{- /* - Explaining 'mode: production' isn't as pressing as explaining 'mode: development'. - As we already talked about the other mode above, users can just - look at documentation or ask the assistant about 'mode: production'. - # - # By making use of 'mode: production' we enable strict checks - # to make sure we have correctly configured this target. - */}} + # We use 'mode: production' to indicate this is a production deployment. + # Doing so enables strict verification of the settings below. mode: production workspace: host: {{workspace_host}} + # We only have a single deployment copy for production, so we use a shared path. root_path: /Shared/.bundle/prod/${bundle.name} {{- if not is_service_principal}} run_as: - # This runs as {{user_name}} in production. Alternatively, - # a service principal could be used here using service_principal_name - # (see Databricks documentation). + # This runs as {{user_name}} in production. We could also use a service principal here + # using service_principal_name (see https://docs.databricks.com/dev-tools/bundles/permissions.html). user_name: {{user_name}} {{end -}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl index 2d4c0f64..6da40321 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl @@ -6,6 +6,10 @@ ## pytest is the default package used for testing pytest +## Dependencies for building wheel files +setuptools +wheel + ## databricks-connect can be used to run parts of this project locally. ## See https://docs.databricks.com/dev-tools/databricks-connect.html. ## diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl index 23bdee49..dc79e3a1 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl @@ -1,10 +1,17 @@ -# The main job for {{.project_name}} +# The main job for {{.project_name}}. + +{{- /* Clarify what this job is for for DLT-only users. */}} +{{if and (eq .include_dlt "yes") (and (eq .include_notebook "no") (eq .include_python "no")) -}} +# This job runs {{.project_name}}_pipeline on a schedule. +{{end -}} + resources: jobs: {{.project_name}}_job: name: {{.project_name}}_job schedule: + # Run every day at 8:37 AM quartz_cron_expression: '44 37 8 * * ?' timezone_id: Europe/Amsterdam From b479a7cf6796c9e34ce759e5698b09ae84249d91 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 12 Dec 2023 03:23:21 +0530 Subject: [PATCH 283/310] Upgrade Terraform schema version to v1.31.1 (#1055) Co-authored-by: Pieter Noordhuis --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../tf/schema/data_source_current_config.go | 12 +++++ .../schema/data_source_instance_profiles.go | 15 +++++++ bundle/internal/tf/schema/data_source_job.go | 37 +++++++++++++--- .../tf/schema/data_source_mlflow_model.go | 39 ++++++++++++++++ bundle/internal/tf/schema/data_sources.go | 6 +++ .../resource_catalog_workspace_binding.go | 9 ++-- .../tf/schema/resource_cluster_policy.go | 44 +++++++++++++++---- .../resource_default_namespace_setting.go | 14 ++++++ bundle/internal/tf/schema/resource_job.go | 37 +++++++++++++--- .../internal/tf/schema/resource_metastore.go | 2 +- .../schema/resource_metastore_data_access.go | 6 ++- .../tf/schema/resource_mlflow_model.go | 13 +++--- .../tf/schema/resource_mws_credentials.go | 2 +- .../resource_mws_customer_managed_keys.go | 2 +- .../internal/tf/schema/resource_recipient.go | 1 + .../tf/schema/resource_sql_dashboard.go | 1 + .../internal/tf/schema/resource_sql_query.go | 8 ++-- .../tf/schema/resource_storage_credential.go | 4 +- .../tf/schema/resource_workspace_file.go | 1 + bundle/internal/tf/schema/resources.go | 2 + bundle/internal/tf/schema/root.go | 2 +- 22 files changed, 213 insertions(+), 46 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_current_config.go create mode 100644 bundle/internal/tf/schema/data_source_instance_profiles.go create mode 100644 bundle/internal/tf/schema/data_source_mlflow_model.go create mode 100644 bundle/internal/tf/schema/resource_default_namespace_setting.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 3269a971..d141592a 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.29.0" +const ProviderVersion = "1.31.1" diff --git a/bundle/internal/tf/schema/data_source_current_config.go b/bundle/internal/tf/schema/data_source_current_config.go new file mode 100644 index 00000000..52be33fc --- /dev/null +++ b/bundle/internal/tf/schema/data_source_current_config.go @@ -0,0 +1,12 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceCurrentConfig struct { + AccountId string `json:"account_id,omitempty"` + AuthType string `json:"auth_type,omitempty"` + CloudType string `json:"cloud_type,omitempty"` + Host string `json:"host,omitempty"` + Id string `json:"id,omitempty"` + IsAccount bool `json:"is_account,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_instance_profiles.go b/bundle/internal/tf/schema/data_source_instance_profiles.go new file mode 100644 index 00000000..fa2d014d --- /dev/null +++ b/bundle/internal/tf/schema/data_source_instance_profiles.go @@ -0,0 +1,15 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceInstanceProfilesInstanceProfiles struct { + Arn string `json:"arn,omitempty"` + IsMeta bool `json:"is_meta,omitempty"` + Name string `json:"name,omitempty"` + RoleArn string `json:"role_arn,omitempty"` +} + +type DataSourceInstanceProfiles struct { + Id string `json:"id,omitempty"` + InstanceProfiles []DataSourceInstanceProfilesInstanceProfiles `json:"instance_profiles,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index 49be8f01..75d3672b 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -30,7 +30,6 @@ type DataSourceJobJobSettingsSettingsDeployment struct { } type DataSourceJobJobSettingsSettingsEmailNotifications struct { - AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` @@ -492,8 +491,6 @@ type DataSourceJobJobSettingsSettingsTaskDependsOn struct { } type DataSourceJobJobSettingsSettingsTaskEmailNotifications struct { - AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` - NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` @@ -777,6 +774,29 @@ type DataSourceJobJobSettingsSettingsTaskSqlTask struct { Query *DataSourceJobJobSettingsSettingsTaskSqlTaskQuery `json:"query,omitempty"` } +type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { + Id string `json:"id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnFailure struct { + Id string `json:"id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStart struct { + Id string `json:"id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess struct { + Id string `json:"id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskWebhookNotifications struct { + OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` + OnStart []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnSuccess []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` +} + type DataSourceJobJobSettingsSettingsTask struct { ComputeKey string `json:"compute_key,omitempty"` Description string `json:"description,omitempty"` @@ -804,6 +824,7 @@ type DataSourceJobJobSettingsSettingsTask struct { SparkPythonTask *DataSourceJobJobSettingsSettingsTaskSparkPythonTask `json:"spark_python_task,omitempty"` SparkSubmitTask *DataSourceJobJobSettingsSettingsTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` SqlTask *DataSourceJobJobSettingsSettingsTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *DataSourceJobJobSettingsSettingsTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type DataSourceJobJobSettingsSettingsTriggerFileArrival struct { @@ -818,19 +839,19 @@ type DataSourceJobJobSettingsSettingsTrigger struct { } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id"` + Id string `json:"id,omitempty"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure struct { - Id string `json:"id"` + Id string `json:"id,omitempty"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart struct { - Id string `json:"id"` + Id string `json:"id,omitempty"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess struct { - Id string `json:"id"` + Id string `json:"id,omitempty"` } type DataSourceJobJobSettingsSettingsWebhookNotifications struct { @@ -841,6 +862,8 @@ type DataSourceJobJobSettingsSettingsWebhookNotifications struct { } type DataSourceJobJobSettingsSettings struct { + Description string `json:"description,omitempty"` + EditMode string `json:"edit_mode,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"` Format string `json:"format,omitempty"` MaxConcurrentRuns int `json:"max_concurrent_runs,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_mlflow_model.go b/bundle/internal/tf/schema/data_source_mlflow_model.go new file mode 100644 index 00000000..a7f26d7c --- /dev/null +++ b/bundle/internal/tf/schema/data_source_mlflow_model.go @@ -0,0 +1,39 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceMlflowModelLatestVersionsTags struct { + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` +} + +type DataSourceMlflowModelLatestVersions struct { + CreationTimestamp int `json:"creation_timestamp,omitempty"` + CurrentStage string `json:"current_stage,omitempty"` + Description string `json:"description,omitempty"` + LastUpdatedTimestamp int `json:"last_updated_timestamp,omitempty"` + Name string `json:"name,omitempty"` + RunId string `json:"run_id,omitempty"` + RunLink string `json:"run_link,omitempty"` + Source string `json:"source,omitempty"` + Status string `json:"status,omitempty"` + StatusMessage string `json:"status_message,omitempty"` + UserId string `json:"user_id,omitempty"` + Version string `json:"version,omitempty"` + Tags []DataSourceMlflowModelLatestVersionsTags `json:"tags,omitempty"` +} + +type DataSourceMlflowModelTags struct { + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` +} + +type DataSourceMlflowModel struct { + Description string `json:"description,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + PermissionLevel string `json:"permission_level,omitempty"` + UserId string `json:"user_id,omitempty"` + LatestVersions []DataSourceMlflowModelLatestVersions `json:"latest_versions,omitempty"` + Tags []DataSourceMlflowModelTags `json:"tags,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 79658298..c61ab909 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -10,16 +10,19 @@ type DataSources struct { Cluster map[string]*DataSourceCluster `json:"databricks_cluster,omitempty"` ClusterPolicy map[string]*DataSourceClusterPolicy `json:"databricks_cluster_policy,omitempty"` Clusters map[string]*DataSourceClusters `json:"databricks_clusters,omitempty"` + CurrentConfig map[string]*DataSourceCurrentConfig `json:"databricks_current_config,omitempty"` CurrentUser map[string]*DataSourceCurrentUser `json:"databricks_current_user,omitempty"` DbfsFile map[string]*DataSourceDbfsFile `json:"databricks_dbfs_file,omitempty"` DbfsFilePaths map[string]*DataSourceDbfsFilePaths `json:"databricks_dbfs_file_paths,omitempty"` Directory map[string]*DataSourceDirectory `json:"databricks_directory,omitempty"` Group map[string]*DataSourceGroup `json:"databricks_group,omitempty"` InstancePool map[string]*DataSourceInstancePool `json:"databricks_instance_pool,omitempty"` + InstanceProfiles map[string]*DataSourceInstanceProfiles `json:"databricks_instance_profiles,omitempty"` Job map[string]*DataSourceJob `json:"databricks_job,omitempty"` Jobs map[string]*DataSourceJobs `json:"databricks_jobs,omitempty"` Metastore map[string]*DataSourceMetastore `json:"databricks_metastore,omitempty"` Metastores map[string]*DataSourceMetastores `json:"databricks_metastores,omitempty"` + MlflowModel map[string]*DataSourceMlflowModel `json:"databricks_mlflow_model,omitempty"` MwsCredentials map[string]*DataSourceMwsCredentials `json:"databricks_mws_credentials,omitempty"` MwsWorkspaces map[string]*DataSourceMwsWorkspaces `json:"databricks_mws_workspaces,omitempty"` NodeType map[string]*DataSourceNodeType `json:"databricks_node_type,omitempty"` @@ -49,16 +52,19 @@ func NewDataSources() *DataSources { Cluster: make(map[string]*DataSourceCluster), ClusterPolicy: make(map[string]*DataSourceClusterPolicy), Clusters: make(map[string]*DataSourceClusters), + CurrentConfig: make(map[string]*DataSourceCurrentConfig), CurrentUser: make(map[string]*DataSourceCurrentUser), DbfsFile: make(map[string]*DataSourceDbfsFile), DbfsFilePaths: make(map[string]*DataSourceDbfsFilePaths), Directory: make(map[string]*DataSourceDirectory), Group: make(map[string]*DataSourceGroup), InstancePool: make(map[string]*DataSourceInstancePool), + InstanceProfiles: make(map[string]*DataSourceInstanceProfiles), Job: make(map[string]*DataSourceJob), Jobs: make(map[string]*DataSourceJobs), Metastore: make(map[string]*DataSourceMetastore), Metastores: make(map[string]*DataSourceMetastores), + MlflowModel: make(map[string]*DataSourceMlflowModel), MwsCredentials: make(map[string]*DataSourceMwsCredentials), MwsWorkspaces: make(map[string]*DataSourceMwsWorkspaces), NodeType: make(map[string]*DataSourceNodeType), diff --git a/bundle/internal/tf/schema/resource_catalog_workspace_binding.go b/bundle/internal/tf/schema/resource_catalog_workspace_binding.go index 40bced98..f828de8f 100644 --- a/bundle/internal/tf/schema/resource_catalog_workspace_binding.go +++ b/bundle/internal/tf/schema/resource_catalog_workspace_binding.go @@ -3,7 +3,10 @@ package schema type ResourceCatalogWorkspaceBinding struct { - CatalogName string `json:"catalog_name"` - Id string `json:"id,omitempty"` - WorkspaceId string `json:"workspace_id"` + BindingType string `json:"binding_type,omitempty"` + CatalogName string `json:"catalog_name,omitempty"` + Id string `json:"id,omitempty"` + SecurableName string `json:"securable_name,omitempty"` + SecurableType string `json:"securable_type,omitempty"` + WorkspaceId int `json:"workspace_id,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_cluster_policy.go b/bundle/internal/tf/schema/resource_cluster_policy.go index a5d28bcc..637fe645 100644 --- a/bundle/internal/tf/schema/resource_cluster_policy.go +++ b/bundle/internal/tf/schema/resource_cluster_policy.go @@ -2,13 +2,39 @@ package schema -type ResourceClusterPolicy struct { - Definition string `json:"definition,omitempty"` - Description string `json:"description,omitempty"` - Id string `json:"id,omitempty"` - MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"` - Name string `json:"name"` - PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"` - PolicyFamilyId string `json:"policy_family_id,omitempty"` - PolicyId string `json:"policy_id,omitempty"` +type ResourceClusterPolicyLibrariesCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceClusterPolicyLibrariesMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceClusterPolicyLibrariesPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceClusterPolicyLibraries struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceClusterPolicyLibrariesCran `json:"cran,omitempty"` + Maven *ResourceClusterPolicyLibrariesMaven `json:"maven,omitempty"` + Pypi *ResourceClusterPolicyLibrariesPypi `json:"pypi,omitempty"` +} + +type ResourceClusterPolicy struct { + Definition string `json:"definition,omitempty"` + Description string `json:"description,omitempty"` + Id string `json:"id,omitempty"` + MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"` + Name string `json:"name"` + PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"` + PolicyFamilyId string `json:"policy_family_id,omitempty"` + PolicyId string `json:"policy_id,omitempty"` + Libraries []ResourceClusterPolicyLibraries `json:"libraries,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_default_namespace_setting.go b/bundle/internal/tf/schema/resource_default_namespace_setting.go new file mode 100644 index 00000000..1fa01194 --- /dev/null +++ b/bundle/internal/tf/schema/resource_default_namespace_setting.go @@ -0,0 +1,14 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceDefaultNamespaceSettingNamespace struct { + Value string `json:"value,omitempty"` +} + +type ResourceDefaultNamespaceSetting struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + SettingName string `json:"setting_name,omitempty"` + Namespace *ResourceDefaultNamespaceSettingNamespace `json:"namespace,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index b4a33bdf..7884efd7 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -30,7 +30,6 @@ type ResourceJobDeployment struct { } type ResourceJobEmailNotifications struct { - AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` @@ -492,8 +491,6 @@ type ResourceJobTaskDependsOn struct { } type ResourceJobTaskEmailNotifications struct { - AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` - NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` @@ -777,6 +774,29 @@ type ResourceJobTaskSqlTask struct { Query *ResourceJobTaskSqlTaskQuery `json:"query,omitempty"` } +type ResourceJobTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { + Id string `json:"id,omitempty"` +} + +type ResourceJobTaskWebhookNotificationsOnFailure struct { + Id string `json:"id,omitempty"` +} + +type ResourceJobTaskWebhookNotificationsOnStart struct { + Id string `json:"id,omitempty"` +} + +type ResourceJobTaskWebhookNotificationsOnSuccess struct { + Id string `json:"id,omitempty"` +} + +type ResourceJobTaskWebhookNotifications struct { + OnDurationWarningThresholdExceeded []ResourceJobTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []ResourceJobTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` + OnStart []ResourceJobTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnSuccess []ResourceJobTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` +} + type ResourceJobTask struct { ComputeKey string `json:"compute_key,omitempty"` Description string `json:"description,omitempty"` @@ -804,6 +824,7 @@ type ResourceJobTask struct { SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTriggerFileArrival struct { @@ -818,19 +839,19 @@ type ResourceJobTrigger struct { } type ResourceJobWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id"` + Id string `json:"id,omitempty"` } type ResourceJobWebhookNotificationsOnFailure struct { - Id string `json:"id"` + Id string `json:"id,omitempty"` } type ResourceJobWebhookNotificationsOnStart struct { - Id string `json:"id"` + Id string `json:"id,omitempty"` } type ResourceJobWebhookNotificationsOnSuccess struct { - Id string `json:"id"` + Id string `json:"id,omitempty"` } type ResourceJobWebhookNotifications struct { @@ -843,6 +864,8 @@ type ResourceJobWebhookNotifications struct { type ResourceJob struct { AlwaysRunning bool `json:"always_running,omitempty"` ControlRunState bool `json:"control_run_state,omitempty"` + Description string `json:"description,omitempty"` + EditMode string `json:"edit_mode,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"` Format string `json:"format,omitempty"` Id string `json:"id,omitempty"` diff --git a/bundle/internal/tf/schema/resource_metastore.go b/bundle/internal/tf/schema/resource_metastore.go index 3561d2bf..31535b21 100644 --- a/bundle/internal/tf/schema/resource_metastore.go +++ b/bundle/internal/tf/schema/resource_metastore.go @@ -17,7 +17,7 @@ type ResourceMetastore struct { Name string `json:"name"` Owner string `json:"owner,omitempty"` Region string `json:"region,omitempty"` - StorageRoot string `json:"storage_root"` + StorageRoot string `json:"storage_root,omitempty"` StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"` UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` diff --git a/bundle/internal/tf/schema/resource_metastore_data_access.go b/bundle/internal/tf/schema/resource_metastore_data_access.go index 86df5e4b..ec1395f7 100644 --- a/bundle/internal/tf/schema/resource_metastore_data_access.go +++ b/bundle/internal/tf/schema/resource_metastore_data_access.go @@ -3,7 +3,9 @@ package schema type ResourceMetastoreDataAccessAwsIamRole struct { - RoleArn string `json:"role_arn"` + ExternalId string `json:"external_id,omitempty"` + RoleArn string `json:"role_arn"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` } type ResourceMetastoreDataAccessAzureManagedIdentity struct { @@ -34,7 +36,7 @@ type ResourceMetastoreDataAccess struct { ForceDestroy bool `json:"force_destroy,omitempty"` Id string `json:"id,omitempty"` IsDefault bool `json:"is_default,omitempty"` - MetastoreId string `json:"metastore_id"` + MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name"` Owner string `json:"owner,omitempty"` ReadOnly bool `json:"read_only,omitempty"` diff --git a/bundle/internal/tf/schema/resource_mlflow_model.go b/bundle/internal/tf/schema/resource_mlflow_model.go index 406c124f..41f8e0f3 100644 --- a/bundle/internal/tf/schema/resource_mlflow_model.go +++ b/bundle/internal/tf/schema/resource_mlflow_model.go @@ -8,12 +8,9 @@ type ResourceMlflowModelTags struct { } type ResourceMlflowModel struct { - CreationTimestamp int `json:"creation_timestamp,omitempty"` - Description string `json:"description,omitempty"` - Id string `json:"id,omitempty"` - LastUpdatedTimestamp int `json:"last_updated_timestamp,omitempty"` - Name string `json:"name"` - RegisteredModelId string `json:"registered_model_id,omitempty"` - UserId string `json:"user_id,omitempty"` - Tags []ResourceMlflowModelTags `json:"tags,omitempty"` + Description string `json:"description,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + RegisteredModelId string `json:"registered_model_id,omitempty"` + Tags []ResourceMlflowModelTags `json:"tags,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_mws_credentials.go b/bundle/internal/tf/schema/resource_mws_credentials.go index 265ad3ec..859663ed 100644 --- a/bundle/internal/tf/schema/resource_mws_credentials.go +++ b/bundle/internal/tf/schema/resource_mws_credentials.go @@ -3,7 +3,7 @@ package schema type ResourceMwsCredentials struct { - AccountId string `json:"account_id"` + AccountId string `json:"account_id,omitempty"` CreationTime int `json:"creation_time,omitempty"` CredentialsId string `json:"credentials_id,omitempty"` CredentialsName string `json:"credentials_name"` diff --git a/bundle/internal/tf/schema/resource_mws_customer_managed_keys.go b/bundle/internal/tf/schema/resource_mws_customer_managed_keys.go index 6c288479..411602b5 100644 --- a/bundle/internal/tf/schema/resource_mws_customer_managed_keys.go +++ b/bundle/internal/tf/schema/resource_mws_customer_managed_keys.go @@ -3,7 +3,7 @@ package schema type ResourceMwsCustomerManagedKeysAwsKeyInfo struct { - KeyAlias string `json:"key_alias"` + KeyAlias string `json:"key_alias,omitempty"` KeyArn string `json:"key_arn"` KeyRegion string `json:"key_region,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_recipient.go b/bundle/internal/tf/schema/resource_recipient.go index f2bb0e75..47d6de37 100644 --- a/bundle/internal/tf/schema/resource_recipient.go +++ b/bundle/internal/tf/schema/resource_recipient.go @@ -22,6 +22,7 @@ type ResourceRecipient struct { DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` Id string `json:"id,omitempty"` Name string `json:"name"` + Owner string `json:"owner,omitempty"` SharingCode string `json:"sharing_code,omitempty"` IpAccessList *ResourceRecipientIpAccessList `json:"ip_access_list,omitempty"` Tokens []ResourceRecipientTokens `json:"tokens,omitempty"` diff --git a/bundle/internal/tf/schema/resource_sql_dashboard.go b/bundle/internal/tf/schema/resource_sql_dashboard.go index fc97bbde..eb6162d7 100644 --- a/bundle/internal/tf/schema/resource_sql_dashboard.go +++ b/bundle/internal/tf/schema/resource_sql_dashboard.go @@ -8,6 +8,7 @@ type ResourceSqlDashboard struct { Id string `json:"id,omitempty"` Name string `json:"name"` Parent string `json:"parent,omitempty"` + RunAsRole string `json:"run_as_role,omitempty"` Tags []string `json:"tags,omitempty"` UpdatedAt string `json:"updated_at,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_sql_query.go b/bundle/internal/tf/schema/resource_sql_query.go index 27c653fc..6c355a09 100644 --- a/bundle/internal/tf/schema/resource_sql_query.go +++ b/bundle/internal/tf/schema/resource_sql_query.go @@ -45,9 +45,9 @@ type ResourceSqlQueryParameterDatetimesecRange struct { } type ResourceSqlQueryParameterEnumMultiple struct { - Prefix string `json:"prefix"` + Prefix string `json:"prefix,omitempty"` Separator string `json:"separator"` - Suffix string `json:"suffix"` + Suffix string `json:"suffix,omitempty"` } type ResourceSqlQueryParameterEnum struct { @@ -62,9 +62,9 @@ type ResourceSqlQueryParameterNumber struct { } type ResourceSqlQueryParameterQueryMultiple struct { - Prefix string `json:"prefix"` + Prefix string `json:"prefix,omitempty"` Separator string `json:"separator"` - Suffix string `json:"suffix"` + Suffix string `json:"suffix,omitempty"` } type ResourceSqlQueryParameterQuery struct { diff --git a/bundle/internal/tf/schema/resource_storage_credential.go b/bundle/internal/tf/schema/resource_storage_credential.go index 1687c79a..1f103023 100644 --- a/bundle/internal/tf/schema/resource_storage_credential.go +++ b/bundle/internal/tf/schema/resource_storage_credential.go @@ -3,7 +3,9 @@ package schema type ResourceStorageCredentialAwsIamRole struct { - RoleArn string `json:"role_arn"` + ExternalId string `json:"external_id,omitempty"` + RoleArn string `json:"role_arn"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` } type ResourceStorageCredentialAzureManagedIdentity struct { diff --git a/bundle/internal/tf/schema/resource_workspace_file.go b/bundle/internal/tf/schema/resource_workspace_file.go index 053085a2..f3ff5f88 100644 --- a/bundle/internal/tf/schema/resource_workspace_file.go +++ b/bundle/internal/tf/schema/resource_workspace_file.go @@ -10,4 +10,5 @@ type ResourceWorkspaceFile struct { Path string `json:"path"` Source string `json:"source,omitempty"` Url string `json:"url,omitempty"` + WorkspacePath string `json:"workspace_path,omitempty"` } diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 9a04be7e..4519a568 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -15,6 +15,7 @@ type Resources struct { ClusterPolicy map[string]*ResourceClusterPolicy `json:"databricks_cluster_policy,omitempty"` Connection map[string]*ResourceConnection `json:"databricks_connection,omitempty"` DbfsFile map[string]*ResourceDbfsFile `json:"databricks_dbfs_file,omitempty"` + DefaultNamespaceSetting map[string]*ResourceDefaultNamespaceSetting `json:"databricks_default_namespace_setting,omitempty"` Directory map[string]*ResourceDirectory `json:"databricks_directory,omitempty"` Entitlements map[string]*ResourceEntitlements `json:"databricks_entitlements,omitempty"` ExternalLocation map[string]*ResourceExternalLocation `json:"databricks_external_location,omitempty"` @@ -99,6 +100,7 @@ func NewResources() *Resources { ClusterPolicy: make(map[string]*ResourceClusterPolicy), Connection: make(map[string]*ResourceConnection), DbfsFile: make(map[string]*ResourceDbfsFile), + DefaultNamespaceSetting: make(map[string]*ResourceDefaultNamespaceSetting), Directory: make(map[string]*ResourceDirectory), Entitlements: make(map[string]*ResourceEntitlements), ExternalLocation: make(map[string]*ResourceExternalLocation), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 3ad8cf4d..937182d4 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -25,7 +25,7 @@ func NewRoot() *Root { "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ "source": "databricks/databricks", - "version": "1.29.0", + "version": "1.31.1", }, }, }, From 37671d9f545cacfa3ab31ca73628d57949c4209a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 12 Dec 2023 12:36:06 +0100 Subject: [PATCH 284/310] Fix passthrough of pipeline notifications (#1058) ## Changes Notifications weren't passed along because of a plural vs singular mismatch. ## Tests * Added unit test coverage. * Manually confirmed it now works in an example bundle. --- bundle/deploy/terraform/convert.go | 6 ++++++ bundle/deploy/terraform/convert_test.go | 26 +++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 71385881..8d51a375 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -140,6 +140,12 @@ func BundleToTerraform(config *config.Root) *schema.Root { conv(v, &l) dst.Cluster = append(dst.Cluster, l) } + + for _, v := range src.Notifications { + var l schema.ResourcePipelineNotification + conv(v, &l) + dst.Notification = append(dst.Notification, l) + } } tfroot.Resource.Pipeline[k] = &dst diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index bb5a63ec..00086c76 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -139,6 +139,26 @@ func TestConvertPipeline(t *testing.T) { }, }, }, + Notifications: []pipelines.Notifications{ + { + Alerts: []string{ + "on-update-fatal-failure", + }, + EmailRecipients: []string{ + "jane@doe.com", + }, + }, + { + Alerts: []string{ + "on-update-failure", + "on-flow-failure", + }, + EmailRecipients: []string{ + "jane@doe.com", + "john@doe.com", + }, + }, + }, }, } @@ -153,6 +173,12 @@ func TestConvertPipeline(t *testing.T) { out := BundleToTerraform(&config) assert.Equal(t, "my pipeline", out.Resource.Pipeline["my_pipeline"].Name) assert.Len(t, out.Resource.Pipeline["my_pipeline"].Library, 2) + notifs := out.Resource.Pipeline["my_pipeline"].Notification + assert.Len(t, notifs, 2) + assert.Equal(t, notifs[0].Alerts, []string{"on-update-fatal-failure"}) + assert.Equal(t, notifs[0].EmailRecipients, []string{"jane@doe.com"}) + assert.Equal(t, notifs[1].Alerts, []string{"on-update-failure", "on-flow-failure"}) + assert.Equal(t, notifs[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"}) assert.Nil(t, out.Data) } From e6bc4c9876c5a9244c96688c7b57ed7a0f01d8bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 11:12:50 +0100 Subject: [PATCH 285/310] Bump github.com/hashicorp/hc-install from 0.6.1 to 0.6.2 (#1054) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/hashicorp/hc-install](https://github.com/hashicorp/hc-install) from 0.6.1 to 0.6.2.
Release notes

Sourced from github.com/hashicorp/hc-install's releases.

v0.6.2

What's Changed

New Contributors

Full Changelog: https://github.com/hashicorp/hc-install/compare/v0.6.1...v0.6.2

Commits
  • b00cdaf Set VERSION to 0.6.2
  • 9bbc98c ci: Add release workflow (#99)
  • b22ec09 go: bump version to 1.21.4 (#169)
  • fd6075b build(deps): bump github.com/go-git/go-git/v5 from 5.10.0 to 5.10.1 (#168)
  • 9de7b57 Result of tsccr-helper -log-level=info -pin-all-workflows . (#167)
  • 1626fa4 github: Disable dependabot for GHA (#166)
  • 0ee87ea build(deps): bump golang.org/x/mod from 0.13.0 to 0.14.0 (#165)
  • ed6709c go: bump version to 1.21.3 (#164)
  • 0e6b3da build(deps): bump actions/checkout from 4.1.0 to 4.1.1 (#162)
  • cb4ec80 build(deps): bump github.com/go-git/go-git/v5 from 5.9.0 to 5.10.0 (#163)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/hc-install&package-manager=go_modules&previous-version=0.6.1&new-version=0.6.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- go.mod | 2 +- go.sum | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 099c94aa..fabb998a 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.4.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 - github.com/hashicorp/hc-install v0.6.1 // MPL 2.0 + github.com/hashicorp/hc-install v0.6.2 // MPL 2.0 github.com/hashicorp/terraform-exec v0.19.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.18.0 // MPL 2.0 github.com/imdario/mergo v0.3.15 // BSD-3-Clause diff --git a/go.sum b/go.sum index 7ce8ed2a..5350d7c9 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,6 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= -github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= @@ -51,8 +49,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= -github.com/go-git/go-git/v5 v5.9.0/go.mod h1:RKIqga24sWdMGZF+1Ekv9kylsDz6LzdTSI2s/OsZWE0= +github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= +github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -96,8 +94,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.1 h1:IGxShH7AVhPaSuSJpKtVi/EFORNjO+OYVJJrAtGG2mY= -github.com/hashicorp/hc-install v0.6.1/go.mod h1:0fW3jpg+wraYSnFDJ6Rlie3RvLf1bIqVIkzoon4KoVE= +github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= +github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= github.com/hashicorp/terraform-json v0.18.0 h1:pCjgJEqqDESv4y0Tzdqfxr/edOIGkjs8keY42xfNBwU= @@ -134,8 +132,8 @@ github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDj github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= -github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= From 8ac47d0b1d7b8bc8ff76d034c695dfac4e2352de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 11:13:02 +0100 Subject: [PATCH 286/310] Bump github.com/databricks/databricks-sdk-go from 0.26.1 to 0.26.2 (#1053) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.26.1 to 0.26.2.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.26.2

This is a bugfix release, including a fix correcting issues with OAuth flows, due to a bug with the propagation of the response status in httpclient's RoundTrip() implementation. This fixes the failed during request visitor: token: oauth2: cannot fetch token: Response: {...} error.

All fixes:

  • Migrate Azure MSI & Metadata Service token sources to httpclient and add 100% test coverage (#709).
  • Added config.NewAzureCliTokenSource and config.NewAzureMsiTokenSource constructors (#727).
  • Use per-config refresh context for OAuth tokens (#728).
Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

0.26.2

This is a bugfix release, including a fix correcting issues with OAuth flows, due to a bug with the propagation of the response status in httpclient's RoundTrip() implementation. This fixes the failed during request visitor: token: oauth2: cannot fetch token: Response: {...} error.

All fixes:

  • Migrate Azure MSI & Metadata Service token sources to httpclient and add 100% test coverage (#709).
  • Added config.NewAzureCliTokenSource and config.NewAzureMsiTokenSource constructors (#727).
  • Use per-config refresh context for OAuth tokens (#728).
Commits
  • ebb3ea4 Release v0.26.2 (#732)
  • 2dbf06a Use per-config refresh context for OAuth tokens (#728)
  • 964cdc1 Added config.NewAzureCliTokenSource and config.NewAzureMsiTokenSource con...
  • 82d089a Migrate Azure MSI & Metadata Service token sources to httpclient and add 10...
  • 8b28282 Add account level MSI credentials test (#726)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.26.1&new-version=0.26.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fabb998a..e76191c1 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.26.1 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.26.2 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.4.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 5350d7c9..01b2cdc0 100644 --- a/go.sum +++ b/go.sum @@ -29,8 +29,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.26.1 h1:Wumg1H1K7Y3bNSRWERLE+9+BbCGljZAEwv/xc+xhT6s= -github.com/databricks/databricks-sdk-go v0.26.1/go.mod h1:cyFYsqaDiIdaKPdNAuh+YsMUL1k9Lt02JB/72+zgCxg= +github.com/databricks/databricks-sdk-go v0.26.2 h1:OcA8aOpwCqCs+brATOuOR6BmqCK/Boye21+1rYw2MOg= +github.com/databricks/databricks-sdk-go v0.26.2/go.mod h1:cyFYsqaDiIdaKPdNAuh+YsMUL1k9Lt02JB/72+zgCxg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From dc12b3f1cf1d398b7c6d608dd9a5a9376953d7e0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 13 Dec 2023 16:43:03 +0100 Subject: [PATCH 287/310] Release v0.210.3 (#1062) Bundles: * Improve default template ([#1046](https://github.com/databricks/cli/pull/1046)). * Fix passthrough of pipeline notifications ([#1058](https://github.com/databricks/cli/pull/1058)). Internal: * Stub out Python virtual environment installation for `labs` commands ([#1057](https://github.com/databricks/cli/pull/1057)). * Upgrade Terraform schema version to v1.31.1 ([#1055](https://github.com/databricks/cli/pull/1055)). Dependency updates: * Bump github.com/hashicorp/hc-install from 0.6.1 to 0.6.2 ([#1054](https://github.com/databricks/cli/pull/1054)). * Bump github.com/databricks/databricks-sdk-go from 0.26.1 to 0.26.2 ([#1053](https://github.com/databricks/cli/pull/1053)). --- CHANGELOG.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 22d535ac..051494c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Version changelog +## 0.210.3 + +Bundles: + * Improve default template ([#1046](https://github.com/databricks/cli/pull/1046)). + * Fix passthrough of pipeline notifications ([#1058](https://github.com/databricks/cli/pull/1058)). + +Internal: + * Stub out Python virtual environment installation for `labs` commands ([#1057](https://github.com/databricks/cli/pull/1057)). + * Upgrade Terraform schema version to v1.31.1 ([#1055](https://github.com/databricks/cli/pull/1055)). + + +Dependency updates: + * Bump github.com/hashicorp/hc-install from 0.6.1 to 0.6.2 ([#1054](https://github.com/databricks/cli/pull/1054)). + * Bump github.com/databricks/databricks-sdk-go from 0.26.1 to 0.26.2 ([#1053](https://github.com/databricks/cli/pull/1053)). + ## 0.210.2 CLI: From a6ec9ac08b87db34453b2a48108df423dd922303 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 14 Dec 2023 09:15:00 +0100 Subject: [PATCH 288/310] Upgrade Go SDK to 0.27.0 (#1064) ## Changes Upgrade Go SDK to 0.27.0 --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 - bundle/schema/docs/bundle_descriptions.json | 372 +++++++++++++++++- cmd/account/cmd.go | 2 - cmd/account/network-policy/network-policy.go | 257 ------------ cmd/account/private-access/private-access.go | 4 +- cmd/workspace/catalogs/catalogs.go | 4 +- cmd/workspace/clean-rooms/clean-rooms.go | 1 - cmd/workspace/connections/connections.go | 2 + .../external-locations/external-locations.go | 2 +- cmd/workspace/metastores/metastores.go | 1 + cmd/workspace/providers/providers.go | 8 +- cmd/workspace/recipients/recipients.go | 8 +- .../registered-models/registered-models.go | 1 + cmd/workspace/schemas/schemas.go | 1 + .../serving-endpoints/serving-endpoints.go | 102 ++++- cmd/workspace/shares/shares.go | 4 +- .../storage-credentials.go | 11 +- cmd/workspace/volumes/volumes.go | 1 + go.mod | 4 +- go.sum | 8 +- 21 files changed, 490 insertions(+), 306 deletions(-) delete mode 100755 cmd/account/network-policy/network-policy.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 4343d612..0d79c490 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -22f09783eb8a84d52026f856be3b2068f9498db3 \ No newline at end of file +63caa3cb0c05045e81d3dcf2451fa990d8670f36 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index f3389320..ac552297 100755 --- a/.gitattributes +++ b/.gitattributes @@ -11,7 +11,6 @@ cmd/account/log-delivery/log-delivery.go linguist-generated=true cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true cmd/account/metastores/metastores.go linguist-generated=true cmd/account/network-connectivity/network-connectivity.go linguist-generated=true -cmd/account/network-policy/network-policy.go linguist-generated=true cmd/account/networks/networks.go linguist-generated=true cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true cmd/account/private-access/private-access.go linguist-generated=true diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 09462fb0..a53a5274 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -1275,7 +1275,10 @@ "description": "ID of the job to trigger." }, "job_parameters": { - "description": "" + "description": "Job-level parameters used to trigger the job.", + "additionalproperties": { + "description": "" + } } } }, @@ -1414,7 +1417,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -1491,7 +1494,7 @@ } }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -1550,8 +1553,162 @@ "config": { "description": "The core config of the serving endpoint.", "properties": { + "auto_capture_config": { + "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.", + "properties": { + "catalog_name": { + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set." + }, + "enabled": { + "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again." + }, + "schema_name": { + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set." + }, + "table_name_prefix": { + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set." + } + } + }, + "served_entities": { + "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 10 served entities.", + "items": { + "description": "", + "properties": { + "entity_name": { + "description": "The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC),\nor a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of\n__catalog_name__.__schema_name__.__model_name__.\n" + }, + "entity_version": { + "description": "The version of the model in Databricks Model Registry to be served or empty if the entity is a FEATURE_SPEC." + }, + "environment_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity.\nNote: this is an experimental feature and subject to change. \nExample entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`", + "additionalproperties": { + "description": "" + } + }, + "external_model": { + "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n", + "properties": { + "config": { + "description": "The config for the external model, which must match the provider.", + "properties": { + "ai21labs_config": { + "description": "AI21Labs Config", + "properties": { + "ai21labs_api_key": { + "description": "The Databricks secret key reference for an AI21Labs API key." + } + } + }, + "anthropic_config": { + "description": "Anthropic Config", + "properties": { + "anthropic_api_key": { + "description": "The Databricks secret key reference for an Anthropic API key." + } + } + }, + "aws_bedrock_config": { + "description": "AWS Bedrock Config", + "properties": { + "aws_access_key_id": { + "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." + }, + "aws_region": { + "description": "The AWS region to use. Bedrock has to be enabled there." + }, + "aws_secret_access_key": { + "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." + }, + "bedrock_provider": { + "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + } + } + }, + "cohere_config": { + "description": "Cohere Config", + "properties": { + "cohere_api_key": { + "description": "The Databricks secret key reference for a Cohere API key." + } + } + }, + "databricks_model_serving_config": { + "description": "Databricks Model Serving Config", + "properties": { + "databricks_api_token": { + "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" + }, + "databricks_workspace_url": { + "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" + } + } + }, + "openai_config": { + "description": "OpenAI Config", + "properties": { + "openai_api_base": { + "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" + }, + "openai_api_key": { + "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." + }, + "openai_api_type": { + "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" + }, + "openai_api_version": { + "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" + }, + "openai_deployment_name": { + "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" + }, + "openai_organization": { + "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" + } + } + }, + "palm_config": { + "description": "PaLM Config", + "properties": { + "palm_api_key": { + "description": "The Databricks secret key reference for a PaLM API key." + } + } + } + } + }, + "name": { + "description": "The name of the external model." + }, + "provider": { + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" + }, + "task": { + "description": "The task type of the external model." + } + } + }, + "instance_profile_arn": { + "description": "ARN of the instance profile that the served entity uses to access AWS resources." + }, + "name": { + "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.\nIf not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other\nentities, it defaults to \u003centity-name\u003e-\u003centity-version\u003e.\n" + }, + "scale_to_zero_enabled": { + "description": "Whether the compute resources for the served entity should scale down to zero." + }, + "workload_size": { + "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.\n" + }, + "workload_type": { + "description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n" + } + } + } + }, "served_models": { - "description": "A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", + "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", "items": { "description": "", "properties": { @@ -1580,7 +1737,7 @@ "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n" }, "workload_type": { - "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See documentation for all\noptions.\n" + "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n" } } } @@ -1589,7 +1746,7 @@ "description": "The traffic config defining how invocations to the serving endpoint should be routed.", "properties": { "routes": { - "description": "The list of routes that define traffic to each served model.", + "description": "The list of routes that define traffic to each served entity.", "items": { "description": "", "properties": { @@ -1629,6 +1786,23 @@ } } }, + "rate_limits": { + "description": "Rate limits to be applied to the serving endpoint. NOTE: only external and foundation model endpoints are supported as of now.", + "items": { + "description": "", + "properties": { + "calls": { + "description": "Used to specify how many calls are allowed for a key within the renewal_period." + }, + "key": { + "description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified." + }, + "renewal_period": { + "description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported." + } + } + } + }, "tags": { "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", "items": { @@ -3460,7 +3634,10 @@ "description": "ID of the job to trigger." }, "job_parameters": { - "description": "" + "description": "Job-level parameters used to trigger the job.", + "additionalproperties": { + "description": "" + } } } }, @@ -3599,7 +3776,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -3676,7 +3853,7 @@ } }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -3735,8 +3912,162 @@ "config": { "description": "The core config of the serving endpoint.", "properties": { + "auto_capture_config": { + "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.", + "properties": { + "catalog_name": { + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set." + }, + "enabled": { + "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again." + }, + "schema_name": { + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set." + }, + "table_name_prefix": { + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set." + } + } + }, + "served_entities": { + "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 10 served entities.", + "items": { + "description": "", + "properties": { + "entity_name": { + "description": "The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC),\nor a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of\n__catalog_name__.__schema_name__.__model_name__.\n" + }, + "entity_version": { + "description": "The version of the model in Databricks Model Registry to be served or empty if the entity is a FEATURE_SPEC." + }, + "environment_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity.\nNote: this is an experimental feature and subject to change. \nExample entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`", + "additionalproperties": { + "description": "" + } + }, + "external_model": { + "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n", + "properties": { + "config": { + "description": "The config for the external model, which must match the provider.", + "properties": { + "ai21labs_config": { + "description": "AI21Labs Config", + "properties": { + "ai21labs_api_key": { + "description": "The Databricks secret key reference for an AI21Labs API key." + } + } + }, + "anthropic_config": { + "description": "Anthropic Config", + "properties": { + "anthropic_api_key": { + "description": "The Databricks secret key reference for an Anthropic API key." + } + } + }, + "aws_bedrock_config": { + "description": "AWS Bedrock Config", + "properties": { + "aws_access_key_id": { + "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." + }, + "aws_region": { + "description": "The AWS region to use. Bedrock has to be enabled there." + }, + "aws_secret_access_key": { + "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." + }, + "bedrock_provider": { + "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + } + } + }, + "cohere_config": { + "description": "Cohere Config", + "properties": { + "cohere_api_key": { + "description": "The Databricks secret key reference for a Cohere API key." + } + } + }, + "databricks_model_serving_config": { + "description": "Databricks Model Serving Config", + "properties": { + "databricks_api_token": { + "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" + }, + "databricks_workspace_url": { + "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" + } + } + }, + "openai_config": { + "description": "OpenAI Config", + "properties": { + "openai_api_base": { + "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" + }, + "openai_api_key": { + "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." + }, + "openai_api_type": { + "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" + }, + "openai_api_version": { + "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" + }, + "openai_deployment_name": { + "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" + }, + "openai_organization": { + "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" + } + } + }, + "palm_config": { + "description": "PaLM Config", + "properties": { + "palm_api_key": { + "description": "The Databricks secret key reference for a PaLM API key." + } + } + } + } + }, + "name": { + "description": "The name of the external model." + }, + "provider": { + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" + }, + "task": { + "description": "The task type of the external model." + } + } + }, + "instance_profile_arn": { + "description": "ARN of the instance profile that the served entity uses to access AWS resources." + }, + "name": { + "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.\nIf not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other\nentities, it defaults to \u003centity-name\u003e-\u003centity-version\u003e.\n" + }, + "scale_to_zero_enabled": { + "description": "Whether the compute resources for the served entity should scale down to zero." + }, + "workload_size": { + "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.\n" + }, + "workload_type": { + "description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n" + } + } + } + }, "served_models": { - "description": "A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", + "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", "items": { "description": "", "properties": { @@ -3765,7 +4096,7 @@ "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n" }, "workload_type": { - "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See documentation for all\noptions.\n" + "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n" } } } @@ -3774,7 +4105,7 @@ "description": "The traffic config defining how invocations to the serving endpoint should be routed.", "properties": { "routes": { - "description": "The list of routes that define traffic to each served model.", + "description": "The list of routes that define traffic to each served entity.", "items": { "description": "", "properties": { @@ -3814,6 +4145,23 @@ } } }, + "rate_limits": { + "description": "Rate limits to be applied to the serving endpoint. NOTE: only external and foundation model endpoints are supported as of now.", + "items": { + "description": "", + "properties": { + "calls": { + "description": "Used to specify how many calls are allowed for a key within the renewal_period." + }, + "key": { + "description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified." + }, + "renewal_period": { + "description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported." + } + } + } + }, "tags": { "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", "items": { diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 72bf9107..627d6d59 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -17,7 +17,6 @@ import ( account_metastore_assignments "github.com/databricks/cli/cmd/account/metastore-assignments" account_metastores "github.com/databricks/cli/cmd/account/metastores" network_connectivity "github.com/databricks/cli/cmd/account/network-connectivity" - account_network_policy "github.com/databricks/cli/cmd/account/network-policy" networks "github.com/databricks/cli/cmd/account/networks" o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps" private_access "github.com/databricks/cli/cmd/account/private-access" @@ -51,7 +50,6 @@ func New() *cobra.Command { cmd.AddCommand(account_metastore_assignments.New()) cmd.AddCommand(account_metastores.New()) cmd.AddCommand(network_connectivity.New()) - cmd.AddCommand(account_network_policy.New()) cmd.AddCommand(networks.New()) cmd.AddCommand(o_auth_published_apps.New()) cmd.AddCommand(private_access.New()) diff --git a/cmd/account/network-policy/network-policy.go b/cmd/account/network-policy/network-policy.go deleted file mode 100755 index c89b53ee..00000000 --- a/cmd/account/network-policy/network-policy.go +++ /dev/null @@ -1,257 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package network_policy - -import ( - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/settings" - "github.com/spf13/cobra" -) - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cmdOverrides []func(*cobra.Command) - -func New() *cobra.Command { - cmd := &cobra.Command{ - Use: "network-policy", - Short: `Network policy is a set of rules that defines what can be accessed from your Databricks network.`, - Long: `Network policy is a set of rules that defines what can be accessed from your - Databricks network. E.g.: You can choose to block your SQL UDF to access - internet from your Databricks serverless clusters. - - There is only one instance of this setting per account. Since this setting has - a default value, this setting is present on all accounts even though it's - never set on a given account. Deletion reverts the value of the setting back - to the default value.`, - GroupID: "settings", - Annotations: map[string]string{ - "package": "settings", - }, - - // This service is being previewed; hide from help output. - Hidden: true, - } - - // Apply optional overrides to this command. - for _, fn := range cmdOverrides { - fn(cmd) - } - - return cmd -} - -// start delete-account-network-policy command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteAccountNetworkPolicyOverrides []func( - *cobra.Command, - *settings.DeleteAccountNetworkPolicyRequest, -) - -func newDeleteAccountNetworkPolicy() *cobra.Command { - cmd := &cobra.Command{} - - var deleteAccountNetworkPolicyReq settings.DeleteAccountNetworkPolicyRequest - - // TODO: short flags - - cmd.Use = "delete-account-network-policy ETAG" - cmd.Short = `Delete Account Network Policy.` - cmd.Long = `Delete Account Network Policy. - - Reverts back all the account network policies back to default. - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - deleteAccountNetworkPolicyReq.Etag = args[0] - - response, err := a.NetworkPolicy.DeleteAccountNetworkPolicy(ctx, deleteAccountNetworkPolicyReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteAccountNetworkPolicyOverrides { - fn(cmd, &deleteAccountNetworkPolicyReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteAccountNetworkPolicy()) - }) -} - -// start read-account-network-policy command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var readAccountNetworkPolicyOverrides []func( - *cobra.Command, - *settings.ReadAccountNetworkPolicyRequest, -) - -func newReadAccountNetworkPolicy() *cobra.Command { - cmd := &cobra.Command{} - - var readAccountNetworkPolicyReq settings.ReadAccountNetworkPolicyRequest - - // TODO: short flags - - cmd.Use = "read-account-network-policy ETAG" - cmd.Short = `Get Account Network Policy.` - cmd.Long = `Get Account Network Policy. - - Gets the value of Account level Network Policy. - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - readAccountNetworkPolicyReq.Etag = args[0] - - response, err := a.NetworkPolicy.ReadAccountNetworkPolicy(ctx, readAccountNetworkPolicyReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range readAccountNetworkPolicyOverrides { - fn(cmd, &readAccountNetworkPolicyReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReadAccountNetworkPolicy()) - }) -} - -// start update-account-network-policy command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updateAccountNetworkPolicyOverrides []func( - *cobra.Command, - *settings.UpdateAccountNetworkPolicyRequest, -) - -func newUpdateAccountNetworkPolicy() *cobra.Command { - cmd := &cobra.Command{} - - var updateAccountNetworkPolicyReq settings.UpdateAccountNetworkPolicyRequest - var updateAccountNetworkPolicyJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updateAccountNetworkPolicyJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().BoolVar(&updateAccountNetworkPolicyReq.AllowMissing, "allow-missing", updateAccountNetworkPolicyReq.AllowMissing, `This should always be set to true for Settings RPCs.`) - // TODO: complex arg: setting - - cmd.Use = "update-account-network-policy" - cmd.Short = `Update Account Network Policy.` - cmd.Long = `Update Account Network Policy. - - Updates the policy content of Account level Network Policy.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - if cmd.Flags().Changed("json") { - err = updateAccountNetworkPolicyJson.Unmarshal(&updateAccountNetworkPolicyReq) - if err != nil { - return err - } - } - - response, err := a.NetworkPolicy.UpdateAccountNetworkPolicy(ctx, updateAccountNetworkPolicyReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updateAccountNetworkPolicyOverrides { - fn(cmd, &updateAccountNetworkPolicyReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateAccountNetworkPolicy()) - }) -} - -// end service AccountNetworkPolicy diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index 8470415c..458ff827 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -163,7 +163,7 @@ func newDelete() *cobra.Command { is accessed over [AWS PrivateLink]. Before configuring PrivateLink, read the [Databricks article about - PrivateLink]. + PrivateLink].", [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html @@ -246,7 +246,7 @@ func newGet() *cobra.Command { accessed over [AWS PrivateLink]. Before configuring PrivateLink, read the [Databricks article about - PrivateLink]. + PrivateLink].", [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index f66934da..6ffe4a39 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -338,7 +338,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`) cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATED, OPEN]`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of catalog.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the catalog.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of catalog.`) // TODO: map via StringToStringVar: properties @@ -351,7 +351,7 @@ func newUpdate() *cobra.Command { of the catalog). Arguments: - NAME: Name of catalog.` + NAME: The name of the catalog.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index aa2e9f3c..0e6bd0df 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -327,7 +327,6 @@ func newUpdate() *cobra.Command { // TODO: array: catalog_updates cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the clean room.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of clean room.`) cmd.Use = "update NAME_ARG" diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index e32830f9..99161613 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -336,6 +336,8 @@ func newUpdate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the connection.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the connection.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of the connection.`) cmd.Use = "update" diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 0d3682bb..ed302b0b 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -351,7 +351,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`) // TODO: complex arg: encryption_details cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the external location.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the external location.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Indicates whether the external location is read-only.`) cmd.Flags().BoolVar(&updateReq.SkipValidation, "skip-validation", updateReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`) diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 2f5d2195..a0e03ad0 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -620,6 +620,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Int64Var(&updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, "delta-sharing-recipient-token-lifetime-in-seconds", updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, `The lifetime of delta sharing recipient token in seconds.`) cmd.Flags().Var(&updateReq.DeltaSharingScope, "delta-sharing-scope", `The scope of Delta Sharing enabled for the metastore. Supported values: [INTERNAL, INTERNAL_AND_EXTERNAL]`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The user-specified name of the metastore.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the metastore.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the metastore.`) cmd.Flags().StringVar(&updateReq.PrivilegeModelVersion, "privilege-model-version", updateReq.PrivilegeModelVersion, `Privilege model version of the metastore, of the form major.minor (e.g., 1.0).`) cmd.Flags().StringVar(&updateReq.StorageRootCredentialId, "storage-root-credential-id", updateReq.StorageRootCredentialId, `UUID of storage credential to access the metastore storage_root.`) diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index 38612089..851c668a 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -445,7 +445,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Description about the provider.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the Provider.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the provider.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of Provider owner.`) cmd.Flags().StringVar(&updateReq.RecipientProfileStr, "recipient-profile-str", updateReq.RecipientProfileStr, `This field is required when the __authentication_type__ is **TOKEN** or not provided.`) @@ -459,7 +459,7 @@ func newUpdate() *cobra.Command { provider. Arguments: - NAME: The name of the Provider.` + NAME: Name of the provider.` cmd.Annotations = make(map[string]string) @@ -482,14 +482,14 @@ func newUpdate() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Providers drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "The name of the Provider") + id, err := cmdio.Select(ctx, names, "Name of the provider") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have the name of the provider") + return fmt.Errorf("expected to have name of the provider") } updateReq.Name = args[0] diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 7498e5cb..463d7985 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -554,7 +554,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Description about the recipient.`) // TODO: complex arg: ip_access_list - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of Recipient.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the recipient.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of the recipient owner.`) // TODO: complex arg: properties_kvpairs @@ -567,7 +567,7 @@ func newUpdate() *cobra.Command { the user must be both a metastore admin and the owner of the recipient. Arguments: - NAME: Name of Recipient.` + NAME: Name of the recipient.` cmd.Annotations = make(map[string]string) @@ -590,14 +590,14 @@ func newUpdate() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Recipients drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "Name of Recipient") + id, err := cmdio.Select(ctx, names, "Name of the recipient") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have name of recipient") + return fmt.Errorf("expected to have name of the recipient") } updateReq.Name = args[0] diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index ddb47e55..774859f1 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -588,6 +588,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the registered model.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the registered model.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the registered model.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the registered model.`) cmd.Use = "update FULL_NAME" diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index eefb4b38..b7863514 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -374,6 +374,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of schema, relative to parent catalog.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the schema.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of schema.`) // TODO: map via StringToStringVar: properties diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 8f8349a8..a174bc45 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -29,11 +29,11 @@ func New() *cobra.Command { scalable REST API endpoints using serverless compute. This means the endpoints and associated compute resources are fully managed by Databricks and will not appear in your cloud account. A serving endpoint can consist of one or more - MLflow models from the Databricks Model Registry, called served models. A - serving endpoint can have at most ten served models. You can configure traffic - settings to define how requests should be routed to your served models behind - an endpoint. Additionally, you can configure the scale of resources that - should be applied to each served model.`, + MLflow models from the Databricks Model Registry, called served entities. A + serving endpoint can have at most ten served entities. You can configure + traffic settings to define how requests should be routed to your served + entities behind an endpoint. Additionally, you can configure the scale of + resources that should be applied to each served entity.`, GroupID: "serving", Annotations: map[string]string{ "package": "serving", @@ -140,6 +140,7 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: array: rate_limits // TODO: array: tags cmd.Use = "create" @@ -713,6 +714,82 @@ func init() { }) } +// start put command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var putOverrides []func( + *cobra.Command, + *serving.PutRequest, +) + +func newPut() *cobra.Command { + cmd := &cobra.Command{} + + var putReq serving.PutRequest + var putJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&putJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: rate_limits + + cmd.Use = "put NAME" + cmd.Short = `Update the rate limits of a serving endpoint.` + cmd.Long = `Update the rate limits of a serving endpoint. + + Used to update the rate limits of a serving endpoint. NOTE: only external and + foundation model endpoints are supported as of now. + + Arguments: + NAME: The name of the serving endpoint whose rate limits are being updated. This + field is required.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = putJson.Unmarshal(&putReq) + if err != nil { + return err + } + } + putReq.Name = args[0] + + response, err := w.ServingEndpoints.Put(ctx, putReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range putOverrides { + fn(cmd, &putReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newPut()) + }) +} + // start query command // Slice with functions to override default command behavior. @@ -733,8 +810,17 @@ func newQuery() *cobra.Command { // TODO: array: dataframe_records // TODO: complex arg: dataframe_split + // TODO: map via StringToStringVar: extra_params + // TODO: any: input // TODO: any: inputs // TODO: array: instances + cmd.Flags().IntVar(&queryReq.MaxTokens, "max-tokens", queryReq.MaxTokens, `The max tokens field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`) + // TODO: array: messages + cmd.Flags().IntVar(&queryReq.N, "n", queryReq.N, `The n (number of candidates) field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`) + // TODO: any: prompt + // TODO: array: stop + cmd.Flags().BoolVar(&queryReq.Stream, "stream", queryReq.Stream, `The stream field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`) + cmd.Flags().Float64Var(&queryReq.Temperature, "temperature", queryReq.Temperature, `The temperature field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`) cmd.Use = "query NAME" cmd.Short = `Query a serving endpoint with provided model input.` @@ -886,14 +972,16 @@ func newUpdateConfig() *cobra.Command { // TODO: short flags cmd.Flags().Var(&updateConfigJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: complex arg: auto_capture_config + // TODO: array: served_models // TODO: complex arg: traffic_config cmd.Use = "update-config" cmd.Short = `Update a serving endpoint with a new config.` cmd.Long = `Update a serving endpoint with a new config. - Updates any combination of the serving endpoint's served models, the compute - configuration of those served models, and the endpoint's traffic config. An + Updates any combination of the serving endpoint's served entities, the compute + configuration of those served entities, and the endpoint's traffic config. An endpoint that already has an update in progress can not be updated until the current update completes or fails.` diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index 8b983c4e..7cb85abf 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -390,7 +390,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the share.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the share.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of share.`) // TODO: array: updates @@ -414,7 +414,7 @@ func newUpdate() *cobra.Command { Table removals through **update** do not require additional privileges. Arguments: - NAME: Name of the share.` + NAME: The name of the share.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 2c6efd82..9a03cf7d 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -68,6 +68,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: aws_iam_role // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal + // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `Comment associated with the credential.`) // TODO: output-only field cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) @@ -366,10 +367,11 @@ func newUpdate() *cobra.Command { // TODO: complex arg: aws_iam_role // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal + // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`) // TODO: output-only field cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The credential name.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) cmd.Flags().BoolVar(&updateReq.SkipValidation, "skip-validation", updateReq.SkipValidation, `Supplying true to this argument skips validation of the updated credential.`) @@ -381,7 +383,7 @@ func newUpdate() *cobra.Command { Updates a storage credential on the metastore. Arguments: - NAME: The credential name. The name must be unique within the metastore.` + NAME: Name of the storage credential.` cmd.Annotations = make(map[string]string) @@ -404,14 +406,14 @@ func newUpdate() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "The credential name") + id, err := cmdio.Select(ctx, names, "Name of the storage credential") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have the credential name") + return fmt.Errorf("expected to have name of the storage credential") } updateReq.Name = args[0] @@ -461,6 +463,7 @@ func newValidate() *cobra.Command { // TODO: complex arg: aws_iam_role // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal + // TODO: complex arg: cloudflare_api_token // TODO: output-only field cmd.Flags().StringVar(&validateReq.ExternalLocationName, "external-location-name", validateReq.ExternalLocationName, `The name of an existing external location to validate.`) cmd.Flags().BoolVar(&validateReq.ReadOnly, "read-only", validateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 427bdb58..77b60181 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -418,6 +418,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the volume.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the volume.`) + cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the volume.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the volume.`) cmd.Use = "update FULL_NAME_ARG" diff --git a/go.mod b/go.mod index e76191c1..d8ab6cb4 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.26.2 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.27.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.4.0 // BSD-3-Clause @@ -58,7 +58,7 @@ require ( golang.org/x/net v0.19.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.152.0 // indirect + google.golang.org/api v0.153.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect google.golang.org/grpc v1.59.0 // indirect diff --git a/go.sum b/go.sum index 01b2cdc0..946b5dd6 100644 --- a/go.sum +++ b/go.sum @@ -29,8 +29,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.26.2 h1:OcA8aOpwCqCs+brATOuOR6BmqCK/Boye21+1rYw2MOg= -github.com/databricks/databricks-sdk-go v0.26.2/go.mod h1:cyFYsqaDiIdaKPdNAuh+YsMUL1k9Lt02JB/72+zgCxg= +github.com/databricks/databricks-sdk-go v0.27.0 h1:JJ9CxVE7Js08Ug/gafM1gGYx+u/je2g2I4bSYeMPPaY= +github.com/databricks/databricks-sdk-go v0.27.0/go.mod h1:AGzQDmVUcf/J9ARx2FgObcRI5RO2VZ1jehhxFM6tA60= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -246,8 +246,8 @@ golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.152.0 h1:t0r1vPnfMc260S2Ci+en7kfCZaLOPs5KI0sVV/6jZrY= -google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= +google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= +google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= From b17e845d44b9c4df9181b1b5952b523328f46a6e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 18 Dec 2023 10:57:07 +0100 Subject: [PATCH 289/310] Skip profile resolution if `DATABRICKS_AUTH_TYPE` is set (#1068) ## Changes If a user configures a workspace host in a bundle and wants to use the "azure-cli" authentication type, we would still run profile resolution. If the databrickscfg has a matching profile, we still load it, even though it should be a fallback. ## Tests * Unit test. * Manually confirmed that setting `DATABRICKS_AUTH_TYPE=azure-cli` now works as expected. --- libs/databrickscfg/loader.go | 5 ++++- libs/databrickscfg/loader_test.go | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/libs/databrickscfg/loader.go b/libs/databrickscfg/loader.go index a7985390..1dc2a945 100644 --- a/libs/databrickscfg/loader.go +++ b/libs/databrickscfg/loader.go @@ -108,6 +108,7 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error { } func (l profileFromHostLoader) isAnyAuthConfigured(cfg *config.Config) bool { + // If any of the auth-specific attributes are set, we can skip profile resolution. for _, a := range config.ConfigAttributes { if a.Auth == "" { continue @@ -116,5 +117,7 @@ func (l profileFromHostLoader) isAnyAuthConfigured(cfg *config.Config) bool { return true } } - return false + // If the auth type is set, we can skip profile resolution. + // For example, to force "azure-cli", only the host and the auth type will be set. + return cfg.AuthType != "" } diff --git a/libs/databrickscfg/loader_test.go b/libs/databrickscfg/loader_test.go index 0677687f..4525115e 100644 --- a/libs/databrickscfg/loader_test.go +++ b/libs/databrickscfg/loader_test.go @@ -32,6 +32,23 @@ func TestLoaderSkipsExistingAuth(t *testing.T) { assert.NoError(t, err) } +func TestLoaderSkipsExplicitAuthType(t *testing.T) { + cfg := config.Config{ + Loaders: []config.Loader{ + ResolveProfileFromHost, + }, + ConfigFile: "testdata/databrickscfg", + Host: "https://default", + AuthType: "azure-cli", + } + + err := cfg.EnsureResolved() + assert.NoError(t, err) + assert.Equal(t, "azure-cli", cfg.AuthType) + assert.Empty(t, cfg.Profile) + assert.Empty(t, cfg.Token) +} + func TestLoaderSkipsNonExistingConfigFile(t *testing.T) { cfg := config.Config{ Loaders: []config.Loader{ From cee70a53c849054027d5f77b436def08a168ea66 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 18 Dec 2023 12:22:22 +0100 Subject: [PATCH 290/310] Test existing behavior when loading non-string spark conf values (#1071) ## Changes This test is expected to fail when we enable the custom YAML loader. --- .../tests/job_with_spark_conf/databricks.yml | 25 +++++++++++++++++++ bundle/tests/job_with_spark_conf_test.go | 22 ++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 bundle/tests/job_with_spark_conf/databricks.yml create mode 100644 bundle/tests/job_with_spark_conf_test.go diff --git a/bundle/tests/job_with_spark_conf/databricks.yml b/bundle/tests/job_with_spark_conf/databricks.yml new file mode 100644 index 00000000..9b568cf9 --- /dev/null +++ b/bundle/tests/job_with_spark_conf/databricks.yml @@ -0,0 +1,25 @@ +resources: + jobs: + job_with_spark_conf: + name: Test job + max_concurrent_runs: 1 + + job_clusters: + - job_cluster_key: test_cluster + new_cluster: + spark_version: 14.2.x-scala2.12 + node_type_id: i3.xlarge + num_workers: 2 + spark_conf: + + # Test behavior if non-string values are specified. + spark.string: string + spark.int: 1 + spark.bool: true + spark.float: 1.2 + + tasks: + - task_key: test_task + job_cluster_key: test_cluster + spark_python_task: + python_file: test.py diff --git a/bundle/tests/job_with_spark_conf_test.go b/bundle/tests/job_with_spark_conf_test.go new file mode 100644 index 00000000..a2c04c5e --- /dev/null +++ b/bundle/tests/job_with_spark_conf_test.go @@ -0,0 +1,22 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestJobWithSparkConf(t *testing.T) { + b := loadTarget(t, "./job_with_spark_conf", "default") + assert.Len(t, b.Config.Resources.Jobs, 1) + + job := b.Config.Resources.Jobs["job_with_spark_conf"] + assert.Len(t, job.JobClusters, 1) + assert.Equal(t, "test_cluster", job.JobClusters[0].JobClusterKey) + + // Existing behavior is such that including non-string values + // in the spark_conf map will cause the job to fail to load. + // This is expected to be solved once we switch to the custom YAML loader. + tasks := job.Tasks + assert.Len(t, tasks, 0, "see https://github.com/databricks/cli/issues/992") +} From 4765493f187138c15c40cd00b5f43211a2181237 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 12:54:28 +0100 Subject: [PATCH 291/310] Bump github.com/google/uuid from 1.4.0 to 1.5.0 (#1073) Bumps [github.com/google/uuid](https://github.com/google/uuid) from 1.4.0 to 1.5.0.
Release notes

Sourced from github.com/google/uuid's releases.

v1.5.0

1.5.0 (2023-12-12)

Features

  • Validate UUID without creating new UUID (#141) (9ee7366)
Changelog

Sourced from github.com/google/uuid's changelog.

1.5.0 (2023-12-12)

Features

  • Validate UUID without creating new UUID (#141) (9ee7366)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/google/uuid&package-manager=go_modules&previous-version=1.4.0&new-version=1.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d8ab6cb4..0f859790 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/databricks/databricks-sdk-go v0.27.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE - github.com/google/uuid v1.4.0 // BSD-3-Clause + github.com/google/uuid v1.5.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 github.com/hashicorp/hc-install v0.6.2 // MPL 2.0 github.com/hashicorp/terraform-exec v0.19.0 // MPL 2.0 diff --git a/go.sum b/go.sum index 946b5dd6..2112cacb 100644 --- a/go.sum +++ b/go.sum @@ -84,8 +84,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= From 6dd6899b527e697ee12aca14be6030ccfd3ed4cf Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 18 Dec 2023 16:01:59 +0100 Subject: [PATCH 292/310] Do not allow input prompts in Git Bash terminal (#1069) ## Changes Likely due to fact that Git Bash does not correctly support ANSI escape sequences, we cannot use `promptui` package there. See known issues: - https://github.com/manifoldco/promptui/issues/208 - https://github.com/chzyer/readline/issues/191 --- cmd/bundle/init.go | 2 +- cmd/bundle/run.go | 2 +- cmd/labs/project/installer.go | 2 +- cmd/labs/project/login.go | 8 ++++---- cmd/root/auth.go | 4 ++-- libs/cmdio/io.go | 25 +++++++++++++++++++++++++ libs/cmdio/io_test.go | 21 +++++++++++++++++++++ libs/template/config.go | 2 +- 8 files changed, 56 insertions(+), 10 deletions(-) create mode 100644 libs/cmdio/io_test.go diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index ac6f49de..18d76db1 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -131,7 +131,7 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf templatePath = args[0] } else { var err error - if !cmdio.IsOutTTY(ctx) || !cmdio.IsInTTY(ctx) { + if !cmdio.IsPromptSupported(ctx) { return errors.New("please specify a template") } templatePath, err = cmdio.AskSelect(ctx, "Template to use", nativeTemplateOptions()) diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index b2766b20..c9e35aa3 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -45,7 +45,7 @@ func newRunCommand() *cobra.Command { } // If no arguments are specified, prompt the user to select something to run. - if len(args) == 0 && cmdio.IsInteractive(ctx) { + if len(args) == 0 && cmdio.IsPromptSupported(ctx) { // Invert completions from KEY -> NAME, to NAME -> KEY. inv := make(map[string]string) for k, v := range run.ResourceCompletionMap(b) { diff --git a/cmd/labs/project/installer.go b/cmd/labs/project/installer.go index fa676819..7ba2830e 100644 --- a/cmd/labs/project/installer.go +++ b/cmd/labs/project/installer.go @@ -157,7 +157,7 @@ func (i *installer) recordVersion(ctx context.Context) error { } func (i *installer) login(ctx context.Context) (*databricks.WorkspaceClient, error) { - if !cmdio.IsInteractive(ctx) { + if !cmdio.IsPromptSupported(ctx) { log.Debugf(ctx, "Skipping workspace profile prompts in non-interactive mode") return nil, nil } diff --git a/cmd/labs/project/login.go b/cmd/labs/project/login.go index dd235064..fc872bcf 100644 --- a/cmd/labs/project/login.go +++ b/cmd/labs/project/login.go @@ -50,7 +50,7 @@ func (lc *loginConfig) askWorkspaceProfile(ctx context.Context, cfg *config.Conf lc.WorkspaceProfile = cfg.Profile return } - if !cmdio.IsInteractive(ctx) { + if !cmdio.IsPromptSupported(ctx) { return ErrNotInTTY } lc.WorkspaceProfile, err = root.AskForWorkspaceProfile(ctx) @@ -66,7 +66,7 @@ func (lc *loginConfig) askCluster(ctx context.Context, w *databricks.WorkspaceCl lc.ClusterID = w.Config.ClusterID return } - if !cmdio.IsInteractive(ctx) { + if !cmdio.IsPromptSupported(ctx) { return ErrNotInTTY } clusterID, err := cfgpickers.AskForCluster(ctx, w, @@ -87,7 +87,7 @@ func (lc *loginConfig) askWarehouse(ctx context.Context, w *databricks.Workspace lc.WarehouseID = w.Config.WarehouseID return } - if !cmdio.IsInteractive(ctx) { + if !cmdio.IsPromptSupported(ctx) { return ErrNotInTTY } lc.WarehouseID, err = cfgpickers.AskForWarehouse(ctx, w, @@ -99,7 +99,7 @@ func (lc *loginConfig) askAccountProfile(ctx context.Context, cfg *config.Config if !lc.HasAccountLevelCommands() { return nil } - if !cmdio.IsInteractive(ctx) { + if !cmdio.IsPromptSupported(ctx) { return ErrNotInTTY } lc.AccountProfile, err = root.AskForAccountProfile(ctx) diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 33f80e1f..2a0cb22e 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -41,7 +41,7 @@ func accountClientOrPrompt(ctx context.Context, cfg *config.Config, allowPrompt } prompt := false - if allowPrompt && err != nil && cmdio.IsInteractive(ctx) { + if allowPrompt && err != nil && cmdio.IsPromptSupported(ctx) { // Prompt to select a profile if the current configuration is not an account client. prompt = prompt || errors.Is(err, databricks.ErrNotAccountClient) // Prompt to select a profile if the current configuration doesn't resolve to a credential provider. @@ -109,7 +109,7 @@ func workspaceClientOrPrompt(ctx context.Context, cfg *config.Config, allowPromp } prompt := false - if allowPrompt && err != nil && cmdio.IsInteractive(ctx) { + if allowPrompt && err != nil && cmdio.IsPromptSupported(ctx) { // Prompt to select a profile if the current configuration is not a workspace client. prompt = prompt || errors.Is(err, databricks.ErrNotWorkspaceClient) // Prompt to select a profile if the current configuration doesn't resolve to a credential provider. diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index cf405a7a..8b421ef5 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -10,6 +10,7 @@ import ( "time" "github.com/briandowns/spinner" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/flags" "github.com/manifoldco/promptui" "github.com/mattn/go-isatty" @@ -88,6 +89,30 @@ func (c *cmdIO) IsTTY() bool { return isatty.IsTerminal(fd) || isatty.IsCygwinTerminal(fd) } +func IsPromptSupported(ctx context.Context) bool { + // We do not allow prompting in non-interactive mode and in Git Bash on Windows. + // Likely due to fact that Git Bash does not (correctly support ANSI escape sequences, + // we cannot use promptui package there. + // See known issues: + // - https://github.com/manifoldco/promptui/issues/208 + // - https://github.com/chzyer/readline/issues/191 + // We also do not allow prompting in non-interactive mode, + // because it's not possible to read from stdin in non-interactive mode. + return (IsInteractive(ctx) || (IsOutTTY(ctx) && IsInTTY(ctx))) && !IsGitBash(ctx) +} + +func IsGitBash(ctx context.Context) bool { + // Check if the MSYSTEM environment variable is set to "MINGW64" + msystem := env.Get(ctx, "MSYSTEM") + if strings.EqualFold(msystem, "MINGW64") { + // Check for typical Git Bash env variable for prompts + ps1 := env.Get(ctx, "PS1") + return strings.Contains(ps1, "MINGW") || strings.Contains(ps1, "MSYSTEM") + } + + return false +} + func Render(ctx context.Context, v any) error { c := fromContext(ctx) return RenderWithTemplate(ctx, v, c.template) diff --git a/libs/cmdio/io_test.go b/libs/cmdio/io_test.go new file mode 100644 index 00000000..1e474204 --- /dev/null +++ b/libs/cmdio/io_test.go @@ -0,0 +1,21 @@ +package cmdio + +import ( + "context" + "testing" + + "github.com/databricks/cli/libs/env" + "github.com/stretchr/testify/assert" +) + +func TestIsPromptSupportedFalseForGitBash(t *testing.T) { + ctx := context.Background() + ctx, _ = SetupTest(ctx) + + assert.True(t, IsPromptSupported(ctx)) + + ctx = env.Set(ctx, "MSYSTEM", "MINGW64") + ctx = env.Set(ctx, "TERM", "xterm") + ctx = env.Set(ctx, "PS1", "\\[\033]0;$TITLEPREFIX:$PWD\007\\]\n\\[\033[32m\\]\\u@\\h \\[\033[35m\\]$MSYSTEM \\[\033[33m\\]\\w\\[\033[36m\\]`__git_ps1`\\[\033[0m\\]\n$") + assert.False(t, IsPromptSupported(ctx)) +} diff --git a/libs/template/config.go b/libs/template/config.go index 2b4d19d1..b52c0ee8 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -212,7 +212,7 @@ func (c *config) promptForValues(r *renderer) error { // Prompt user for any missing config values. Assign default values if // terminal is not TTY func (c *config) promptOrAssignDefaultValues(r *renderer) error { - if cmdio.IsOutTTY(c.ctx) && cmdio.IsInTTY(c.ctx) { + if cmdio.IsPromptSupported(c.ctx) { return c.promptForValues(r) } return c.assignDefaultValues(r) From 5526cd3fb29fc4906f119d268f643f12a9d275cc Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 18 Dec 2023 17:09:11 +0100 Subject: [PATCH 293/310] Added output template for list-secrets command (#1074) ## Changes Fixes #1067 ## Tests ``` andrew.nester@HFW9Y94129 cli % databricks secrets list-secrets "my-test-scope" --output text Key Last Updated Timestamp my-secret 1692805686489 my-test-secret 1692767910771 ``` --- cmd/workspace/secrets/overrides.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/workspace/secrets/overrides.go b/cmd/workspace/secrets/overrides.go index 40c7baba..6e765bf7 100644 --- a/cmd/workspace/secrets/overrides.go +++ b/cmd/workspace/secrets/overrides.go @@ -2,6 +2,7 @@ package secrets import ( "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/spf13/cobra" ) @@ -16,7 +17,15 @@ func listScopesOverride(listScopesCmd *cobra.Command) { {{end}}`) } +func listSecretsOverride(listSecretsCommand *cobra.Command, _ *workspace.ListSecretsRequest) { + listSecretsCommand.Annotations["template"] = cmdio.Heredoc(` + {{header "Key"}} {{header "Last Updated Timestamp"}} + {{range .}}{{.Key|green}} {{.LastUpdatedTimestamp}} + {{end}}`) +} + func init() { cmdOverrides = append(cmdOverrides, cmdOverride) listScopesOverrides = append(listScopesOverrides, listScopesOverride) + listSecretsOverrides = append(listSecretsOverrides, listSecretsOverride) } From 2d93f62f2102326fbd8ec867cd67ded88187cb42 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 19 Dec 2023 13:08:52 +0530 Subject: [PATCH 294/310] Set metadata fields required to enable break-glass UI for jobs (#880) ## Changes This PR sets the following fields for all jobs that are deployed from a DAB 1. `deployment`: This provides the platform with the path to a file to read the metadata from. 2. `edit_mode`: This tells the platform to display the break-glass UI for jobs deployed from a DAB. Setting this is required to re-lock the UI after a user clicks "disconnect from source". 3. `format = MULTI_TASK`. This makes the Terraform provider always use jobs API 2.1 for creating/updating the job. Required because `deployment` and `edit_mode` are only available in API 2.1. ## Tests Unit test and manually. Manually verified that deployments trigger the break glass UI. Manually verified there is no Terraform drift when all three fields are set. --------- Co-authored-by: Pieter Noordhuis --- bundle/deploy/metadata/annotate_jobs.go | 36 ++++++++++ bundle/deploy/metadata/annotate_jobs_test.go | 72 ++++++++++++++++++++ bundle/phases/initialize.go | 2 + 3 files changed, 110 insertions(+) create mode 100644 bundle/deploy/metadata/annotate_jobs.go create mode 100644 bundle/deploy/metadata/annotate_jobs_test.go diff --git a/bundle/deploy/metadata/annotate_jobs.go b/bundle/deploy/metadata/annotate_jobs.go new file mode 100644 index 00000000..5b9ae5b8 --- /dev/null +++ b/bundle/deploy/metadata/annotate_jobs.go @@ -0,0 +1,36 @@ +package metadata + +import ( + "context" + "path" + + "github.com/databricks/cli/bundle" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +type annotateJobs struct{} + +func AnnotateJobs() bundle.Mutator { + return &annotateJobs{} +} + +func (m *annotateJobs) Name() string { + return "metadata.AnnotateJobs" +} + +func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) error { + for _, job := range b.Config.Resources.Jobs { + if job.JobSettings == nil { + continue + } + + job.JobSettings.Deployment = &jobs.JobDeployment{ + Kind: jobs.JobDeploymentKindBundle, + MetadataFilePath: path.Join(b.Config.Workspace.StatePath, MetadataFileName), + } + job.JobSettings.EditMode = jobs.JobSettingsEditModeUiLocked + job.JobSettings.Format = jobs.FormatMultiTask + } + + return nil +} diff --git a/bundle/deploy/metadata/annotate_jobs_test.go b/bundle/deploy/metadata/annotate_jobs_test.go new file mode 100644 index 00000000..c7a02e75 --- /dev/null +++ b/bundle/deploy/metadata/annotate_jobs_test.go @@ -0,0 +1,72 @@ +package metadata + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +func TestAnnotateJobsMutator(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + StatePath: "/a/b/c", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my-job-1": { + JobSettings: &jobs.JobSettings{ + Name: "My Job One", + }, + }, + "my-job-2": { + JobSettings: &jobs.JobSettings{ + Name: "My Job Two", + }, + }, + }, + }, + }, + } + + err := AnnotateJobs().Apply(context.Background(), b) + assert.NoError(t, err) + + assert.Equal(t, + &jobs.JobDeployment{ + Kind: jobs.JobDeploymentKindBundle, + MetadataFilePath: "/a/b/c/metadata.json", + }, + b.Config.Resources.Jobs["my-job-1"].JobSettings.Deployment) + assert.Equal(t, jobs.JobSettingsEditModeUiLocked, b.Config.Resources.Jobs["my-job-1"].EditMode) + assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-1"].Format) + + assert.Equal(t, + &jobs.JobDeployment{ + Kind: jobs.JobDeploymentKindBundle, + MetadataFilePath: "/a/b/c/metadata.json", + }, + b.Config.Resources.Jobs["my-job-2"].JobSettings.Deployment) + assert.Equal(t, jobs.JobSettingsEditModeUiLocked, b.Config.Resources.Jobs["my-job-2"].EditMode) + assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-2"].Format) +} + +func TestAnnotateJobsMutatorJobWithoutSettings(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my-job-1": {}, + }, + }, + }, + } + + err := AnnotateJobs().Apply(context.Background(), b) + assert.NoError(t, err) +} diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 6d84b0e1..d1acdd2f 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle/config/interpolation" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/bundle/deploy/metadata" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/permissions" "github.com/databricks/cli/bundle/python" @@ -37,6 +38,7 @@ func Initialize() bundle.Mutator { mutator.TranslatePaths(), python.WrapperWarning(), permissions.ApplyBundlePermissions(), + metadata.AnnotateJobs(), terraform.Initialize(), scripts.Execute(config.ScriptPostInit), }, From 5991e33ca7b606d0710766f610dda6e15c8435a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 10:18:05 +0100 Subject: [PATCH 295/310] Bump golang.org/x/crypto from 0.16.0 to 0.17.0 (#1076) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.16.0 to 0.17.0.
Commits
  • 9d2ee97 ssh: implement strict KEX protocol changes
  • 4e5a261 ssh: close net.Conn on all NewServerConn errors
  • 152cdb1 x509roots/fallback: update bundle
  • fdfe1f8 ssh: defer channel window adjustment
  • b8ffc16 blake2b: drop Go 1.6, Go 1.8 compatibility
  • 7e6fbd8 ssh: wrap errors from client handshake
  • bda2f3f argon2: avoid clobbering BP
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/crypto&package-manager=go_modules&previous-version=0.16.0&new-version=0.17.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/databricks/cli/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0f859790..70224949 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.16.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/net v0.19.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index 2112cacb..4256acbd 100644 --- a/go.sum +++ b/go.sum @@ -161,8 +161,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= From f18094d9437b8ad05a34da70215477c30911c647 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 19 Dec 2023 10:58:46 +0100 Subject: [PATCH 296/310] Revert using IsPromptSupported from promptOrAssignDefaultValues (#1077) ## Changes Fixes nightly test `TestAccBundleInitErrorOnUnknownFields`. `TestAccBundleInitErrorOnUnknownFields` has an interactive shell by default so the test fails on waiting for prompt. This was introduced in #1069. ## Tests Nightly test succeed. --- libs/template/config.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/template/config.go b/libs/template/config.go index b52c0ee8..a54d394d 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -212,7 +212,8 @@ func (c *config) promptForValues(r *renderer) error { // Prompt user for any missing config values. Assign default values if // terminal is not TTY func (c *config) promptOrAssignDefaultValues(r *renderer) error { - if cmdio.IsPromptSupported(c.ctx) { + // TODO: replace with IsPromptSupported call (requires fixing TestAccBundleInitErrorOnUnknownFields test) + if cmdio.IsOutTTY(c.ctx) && cmdio.IsInTTY(c.ctx) { return c.promptForValues(r) } return c.assignDefaultValues(r) From 42f21d82fe4267dd6ff818d6763ccdca5db5c07e Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 20 Dec 2023 13:01:53 +0100 Subject: [PATCH 297/310] Do not prompt for template values in Git Bash (#1082) ## Changes Follow up on #1077 --- libs/template/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/template/config.go b/libs/template/config.go index a54d394d..85fa2265 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -213,7 +213,7 @@ func (c *config) promptForValues(r *renderer) error { // terminal is not TTY func (c *config) promptOrAssignDefaultValues(r *renderer) error { // TODO: replace with IsPromptSupported call (requires fixing TestAccBundleInitErrorOnUnknownFields test) - if cmdio.IsOutTTY(c.ctx) && cmdio.IsInTTY(c.ctx) { + if cmdio.IsOutTTY(c.ctx) && cmdio.IsInTTY(c.ctx) && !cmdio.IsGitBash(c.ctx) { return c.promptForValues(r) } return c.assignDefaultValues(r) From 875c9d2db13a1d5649627443b935b73c1c854845 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Thu, 21 Dec 2023 09:00:37 +0100 Subject: [PATCH 298/310] Tune output of bundle deploy command (#1047) ## Changes Update the output of the `deploy` command to be more concise and consistent: ``` $ databricks bundle deploy Building my_project... Uploading my_project-0.0.1+20231207.205106-py3-none-any.whl... Uploading bundle files to /Users/lennart.kats@databricks.com/.bundle/my_project/dev/files... Deploying resources... Updating deployment state... Deployment complete! ``` This does away with the intermediate success messages, makes consistent use of `...`, and only prints the success message at the very end after everything is completed. Below is the original output for comparison: ``` $ databricks bundle deploy Detecting Python wheel project... Found Python wheel project at /tmp/output/my_project Building my_project... Build succeeded Uploading my_project-0.0.1+20231207.205134-py3-none-any.whl... Upload succeeded Starting upload of bundle files Uploaded bundle files at /Users/lennart.kats@databricks.com/.bundle/my_project/dev/files! Starting resource deployment Resource deployment completed! ``` --- bundle/artifacts/artifacts.go | 5 +++-- bundle/artifacts/whl/autodetect.go | 7 +++---- bundle/artifacts/whl/build.go | 3 ++- bundle/deploy/files/upload.go | 5 +++-- bundle/deploy/terraform/apply.go | 5 +++-- bundle/deploy/terraform/state_push.go | 2 ++ bundle/log_string.go | 27 +++++++++++++++++++++++++++ bundle/phases/deploy.go | 1 + bundle/phases/destroy.go | 1 + 9 files changed, 45 insertions(+), 11 deletions(-) create mode 100644 bundle/log_string.go diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index dd261d3b..76d29f56 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/log" ) type mutatorFactory = func(name string) bundle.Mutator @@ -67,7 +68,7 @@ func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) error { if err != nil { return fmt.Errorf("build for %s failed, error: %w, output: %s", m.name, err, out) } - cmdio.LogString(ctx, "Build succeeded") + log.Infof(ctx, "Build succeeded") return nil } @@ -124,7 +125,7 @@ func uploadArtifact(ctx context.Context, a *config.Artifact, uploadPath string, if err != nil { return err } - cmdio.LogString(ctx, "Upload succeeded") + log.Infof(ctx, "Upload succeeded") f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) } } diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index 7c1c59d4..c858a38c 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -11,7 +11,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/log" ) @@ -32,17 +31,17 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect") return nil } - cmdio.LogString(ctx, "Detecting Python wheel project...") + log.Infof(ctx, "Detecting Python wheel project...") // checking if there is setup.py in the bundle root setupPy := filepath.Join(b.Config.Path, "setup.py") _, err := os.Stat(setupPy) if err != nil { - cmdio.LogString(ctx, "No Python wheel project found at bundle root folder") + log.Infof(ctx, "No Python wheel project found at bundle root folder") return nil } - cmdio.LogString(ctx, fmt.Sprintf("Found Python wheel project at %s", b.Config.Path)) + log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.Config.Path)) module := extractModuleName(setupPy) if b.Config.Artifacts == nil { diff --git a/bundle/artifacts/whl/build.go b/bundle/artifacts/whl/build.go index c1e7e8fa..aeec31a6 100644 --- a/bundle/artifacts/whl/build.go +++ b/bundle/artifacts/whl/build.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/python" ) @@ -44,7 +45,7 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { if err != nil { return fmt.Errorf("build failed %s, error: %w, output: %s", m.name, err, out) } - cmdio.LogString(ctx, "Build succeeded") + log.Infof(ctx, "Build succeeded") wheels := python.FindFilesWithSuffixInPath(distPath, ".whl") if len(wheels) == 0 { diff --git a/bundle/deploy/files/upload.go b/bundle/deploy/files/upload.go index aebbf6d5..26d1ef4b 100644 --- a/bundle/deploy/files/upload.go +++ b/bundle/deploy/files/upload.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/log" ) type upload struct{} @@ -15,7 +16,7 @@ func (m *upload) Name() string { } func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { - cmdio.LogString(ctx, "Starting upload of bundle files") + cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath)) sync, err := getSync(ctx, b) if err != nil { return err @@ -26,7 +27,7 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - cmdio.LogString(ctx, fmt.Sprintf("Uploaded bundle files at %s!\n", b.Config.Workspace.FilePath)) + log.Infof(ctx, "Uploaded bundle files") return nil } diff --git a/bundle/deploy/terraform/apply.go b/bundle/deploy/terraform/apply.go index ab868f76..117cdfc1 100644 --- a/bundle/deploy/terraform/apply.go +++ b/bundle/deploy/terraform/apply.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/log" "github.com/hashicorp/terraform-exec/tfexec" ) @@ -21,7 +22,7 @@ func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) error { return fmt.Errorf("terraform not initialized") } - cmdio.LogString(ctx, "Starting resource deployment") + cmdio.LogString(ctx, "Deploying resources...") err := tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { @@ -33,7 +34,7 @@ func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) error { return fmt.Errorf("terraform apply: %w", err) } - cmdio.LogString(ctx, "Resource deployment completed!") + log.Infof(ctx, "Resource deployment completed") return nil } diff --git a/bundle/deploy/terraform/state_push.go b/bundle/deploy/terraform/state_push.go index 30a43596..a5140329 100644 --- a/bundle/deploy/terraform/state_push.go +++ b/bundle/deploy/terraform/state_push.go @@ -6,6 +6,7 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) @@ -37,6 +38,7 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { defer local.Close() // Upload state file from local cache directory to filer. + cmdio.LogString(ctx, "Updating deployment state...") log.Infof(ctx, "Writing local state file to remote state directory") err = f.Write(ctx, TerraformStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists) if err != nil { diff --git a/bundle/log_string.go b/bundle/log_string.go new file mode 100644 index 00000000..63800d6d --- /dev/null +++ b/bundle/log_string.go @@ -0,0 +1,27 @@ +package bundle + +import ( + "context" + + "github.com/databricks/cli/libs/cmdio" +) + +type LogStringMutator struct { + message string +} + +func (d *LogStringMutator) Name() string { + return "log_string" +} + +func LogString(message string) Mutator { + return &LogStringMutator{ + message: message, + } +} + +func (m *LogStringMutator) Apply(ctx context.Context, b *Bundle) error { + cmdio.LogString(ctx, m.message) + + return nil +} diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 6f0d3a6c..20fe2e41 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -45,6 +45,7 @@ func Deploy() bundle.Mutator { lock.Release(lock.GoalDeploy), ), scripts.Execute(config.ScriptPostDeploy), + bundle.LogString("Deployment complete!"), ) return newPhase( diff --git a/bundle/phases/destroy.go b/bundle/phases/destroy.go index 5841916d..216d2921 100644 --- a/bundle/phases/destroy.go +++ b/bundle/phases/destroy.go @@ -24,6 +24,7 @@ func Destroy() bundle.Mutator { ), lock.Release(lock.GoalDestroy), ), + bundle.LogString("Destroy complete!"), ) return newPhase( From 55732bc6acb9b5d491d7d4fe67d73756ec2d59b7 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 21 Dec 2023 15:23:08 +0530 Subject: [PATCH 299/310] Release v0.211.0 (#1083) CLI: * Upgrade Go SDK to 0.27.0 ([#1064](https://github.com/databricks/cli/pull/1064)). * Skip profile resolution if `DATABRICKS_AUTH_TYPE` is set ([#1068](https://github.com/databricks/cli/pull/1068)). * Do not allow input prompts in Git Bash terminal ([#1069](https://github.com/databricks/cli/pull/1069)). * Added output template for list-secrets command ([#1074](https://github.com/databricks/cli/pull/1074)). Bundles: * Set metadata fields required to enable break-glass UI for jobs ([#880](https://github.com/databricks/cli/pull/880)). * Do not prompt for template values in Git Bash ([#1082](https://github.com/databricks/cli/pull/1082)). * Tune output of bundle deploy command ([#1047](https://github.com/databricks/cli/pull/1047)). API Changes: * Changed `databricks connections update` command with new required argument order. * Changed `databricks serving-endpoints update-config` command with new required argument order. * Added `databricks serving-endpoints put` command. * Removed `databricks account network-policy` command group. OpenAPI commit 63caa3cb0c05045e81d3dcf2451fa990d8670f36 (2023-12-12) Dependency updates: * Bump github.com/google/uuid from 1.4.0 to 1.5.0 ([#1073](https://github.com/databricks/cli/pull/1073)). * Bump golang.org/x/crypto from 0.16.0 to 0.17.0 ([#1076](https://github.com/databricks/cli/pull/1076)). --- CHANGELOG.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 051494c6..03fa1fa6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Version changelog +## 0.211.0 + +CLI: + * Upgrade Go SDK to 0.27.0 ([#1064](https://github.com/databricks/cli/pull/1064)). + * Skip profile resolution if `DATABRICKS_AUTH_TYPE` is set ([#1068](https://github.com/databricks/cli/pull/1068)). + * Do not allow input prompts in Git Bash terminal ([#1069](https://github.com/databricks/cli/pull/1069)). + * Added output template for list-secrets command ([#1074](https://github.com/databricks/cli/pull/1074)). + +Bundles: +* Set metadata fields required to enable break-glass UI for jobs ([#880](https://github.com/databricks/cli/pull/880)). +* Do not prompt for template values in Git Bash ([#1082](https://github.com/databricks/cli/pull/1082)). +* Tune output of bundle deploy command ([#1047](https://github.com/databricks/cli/pull/1047)). + +API Changes: + * Changed `databricks connections update` command with new required argument order. + * Changed `databricks serving-endpoints update-config` command with new required argument order. + * Added `databricks serving-endpoints put` command. + * Removed `databricks account network-policy` command group. + +OpenAPI commit 63caa3cb0c05045e81d3dcf2451fa990d8670f36 (2023-12-12) + +Dependency updates: + * Bump github.com/google/uuid from 1.4.0 to 1.5.0 ([#1073](https://github.com/databricks/cli/pull/1073)). + * Bump golang.org/x/crypto from 0.16.0 to 0.17.0 ([#1076](https://github.com/databricks/cli/pull/1076)). + ## 0.210.3 Bundles: From ac37a592f1ceb6e1b2b4ab913924830369f25529 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 21 Dec 2023 16:45:23 +0100 Subject: [PATCH 300/310] Added exec.NewCommandExecutor to execute commands with correct interpreter (#1075) ## Changes Instead of handling command chaining ourselves, we execute passed commands as-is by storing them, in temp file and passing to correct interpreter (bash or cmd) based on OS. Fixes #1065 ## Tests Added unit tests --- bundle/artifacts/whl/infer.go | 2 +- bundle/config/artifact.go | 21 ++--- bundle/config/artifacts_test.go | 18 +++++ bundle/scripts/scripts.go | 34 +++----- bundle/scripts/scripts_test.go | 6 +- libs/exec/exec.go | 101 ++++++++++++++++++++++++ libs/exec/exec_test.go | 136 ++++++++++++++++++++++++++++++++ libs/exec/interpreter.go | 123 +++++++++++++++++++++++++++++ 8 files changed, 399 insertions(+), 42 deletions(-) create mode 100644 bundle/config/artifacts_test.go create mode 100644 libs/exec/exec.go create mode 100644 libs/exec/exec_test.go create mode 100644 libs/exec/interpreter.go diff --git a/bundle/artifacts/whl/infer.go b/bundle/artifacts/whl/infer.go index dedecc30..dc2b8e23 100644 --- a/bundle/artifacts/whl/infer.go +++ b/bundle/artifacts/whl/infer.go @@ -33,7 +33,7 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error { // version=datetime.datetime.utcnow().strftime("%Y%m%d.%H%M%S"), // ... //) - artifact.BuildCommand = fmt.Sprintf("%s setup.py bdist_wheel", py) + artifact.BuildCommand = fmt.Sprintf(`"%s" setup.py bdist_wheel`, py) return nil } diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index 63ab6c48..2a1a92a1 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -1,14 +1,12 @@ package config import ( - "bytes" "context" "fmt" "path" - "strings" "github.com/databricks/cli/bundle/config/paths" - "github.com/databricks/cli/libs/process" + "github.com/databricks/cli/libs/exec" "github.com/databricks/databricks-sdk-go/service/compute" ) @@ -52,20 +50,11 @@ func (a *Artifact) Build(ctx context.Context) ([]byte, error) { return nil, fmt.Errorf("no build property defined") } - out := make([][]byte, 0) - commands := strings.Split(a.BuildCommand, " && ") - for _, command := range commands { - buildParts := strings.Split(command, " ") - var buf bytes.Buffer - _, err := process.Background(ctx, buildParts, - process.WithCombinedOutput(&buf), - process.WithDir(a.Path)) - if err != nil { - return buf.Bytes(), err - } - out = append(out, buf.Bytes()) + e, err := exec.NewCommandExecutor(a.Path) + if err != nil { + return nil, err } - return bytes.Join(out, []byte{}), nil + return e.Exec(ctx, a.BuildCommand) } func (a *Artifact) NormalisePaths() { diff --git a/bundle/config/artifacts_test.go b/bundle/config/artifacts_test.go new file mode 100644 index 00000000..5fa159fd --- /dev/null +++ b/bundle/config/artifacts_test.go @@ -0,0 +1,18 @@ +package config + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestArtifactBuild(t *testing.T) { + artifact := Artifact{ + BuildCommand: "echo 'Hello from build command'", + } + res, err := artifact.Build(context.Background()) + assert.NoError(t, err) + assert.NotNil(t, res) + assert.Equal(t, "Hello from build command\n", string(res)) +} diff --git a/bundle/scripts/scripts.go b/bundle/scripts/scripts.go index 90c1914f..2f13bc19 100644 --- a/bundle/scripts/scripts.go +++ b/bundle/scripts/scripts.go @@ -5,12 +5,12 @@ import ( "context" "fmt" "io" - "os/exec" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/exec" "github.com/databricks/cli/libs/log" ) @@ -29,7 +29,12 @@ func (m *script) Name() string { } func (m *script) Apply(ctx context.Context, b *bundle.Bundle) error { - cmd, out, err := executeHook(ctx, b, m.scriptHook) + executor, err := exec.NewCommandExecutor(b.Config.Path) + if err != nil { + return err + } + + cmd, out, err := executeHook(ctx, executor, b, m.scriptHook) if err != nil { return err } @@ -50,32 +55,18 @@ func (m *script) Apply(ctx context.Context, b *bundle.Bundle) error { return cmd.Wait() } -func executeHook(ctx context.Context, b *bundle.Bundle, hook config.ScriptHook) (*exec.Cmd, io.Reader, error) { +func executeHook(ctx context.Context, executor *exec.Executor, b *bundle.Bundle, hook config.ScriptHook) (exec.Command, io.Reader, error) { command := getCommmand(b, hook) if command == "" { return nil, nil, nil } - interpreter, err := findInterpreter() + cmd, err := executor.StartCommand(ctx, string(command)) if err != nil { return nil, nil, err } - // TODO: switch to process.Background(...) - cmd := exec.CommandContext(ctx, interpreter, "-c", string(command)) - cmd.Dir = b.Config.Path - - outPipe, err := cmd.StdoutPipe() - if err != nil { - return nil, nil, err - } - - errPipe, err := cmd.StderrPipe() - if err != nil { - return nil, nil, err - } - - return cmd, io.MultiReader(outPipe, errPipe), cmd.Start() + return cmd, io.MultiReader(cmd.Stdout(), cmd.Stderr()), nil } func getCommmand(b *bundle.Bundle, hook config.ScriptHook) config.Command { @@ -85,8 +76,3 @@ func getCommmand(b *bundle.Bundle, hook config.ScriptHook) config.Command { return b.Config.Experimental.Scripts[hook] } - -func findInterpreter() (string, error) { - // At the moment we just return 'sh' on all platforms and use it to execute scripts - return "sh", nil -} diff --git a/bundle/scripts/scripts_test.go b/bundle/scripts/scripts_test.go index 8b7aa0d1..a8835b59 100644 --- a/bundle/scripts/scripts_test.go +++ b/bundle/scripts/scripts_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/exec" "github.com/stretchr/testify/require" ) @@ -21,7 +22,10 @@ func TestExecutesHook(t *testing.T) { }, }, } - _, out, err := executeHook(context.Background(), b, config.ScriptPreBuild) + + executor, err := exec.NewCommandExecutor(b.Config.Path) + require.NoError(t, err) + _, out, err := executeHook(context.Background(), executor, b, config.ScriptPreBuild) require.NoError(t, err) reader := bufio.NewReader(out) diff --git a/libs/exec/exec.go b/libs/exec/exec.go new file mode 100644 index 00000000..7ef6762b --- /dev/null +++ b/libs/exec/exec.go @@ -0,0 +1,101 @@ +package exec + +import ( + "context" + "io" + "os" + osexec "os/exec" +) + +type Command interface { + // Wait for command to terminate. It must have been previously started. + Wait() error + + // StdinPipe returns a pipe that will be connected to the command's standard input when the command starts. + Stdout() io.ReadCloser + + // StderrPipe returns a pipe that will be connected to the command's standard error when the command starts. + Stderr() io.ReadCloser +} + +type command struct { + cmd *osexec.Cmd + execContext *execContext + stdout io.ReadCloser + stderr io.ReadCloser +} + +func (c *command) Wait() error { + // After the command has finished (cmd.Wait call), remove the temporary script file + defer os.Remove(c.execContext.scriptFile) + + err := c.cmd.Wait() + if err != nil { + return err + } + + return nil +} + +func (c *command) Stdout() io.ReadCloser { + return c.stdout +} + +func (c *command) Stderr() io.ReadCloser { + return c.stderr +} + +type Executor struct { + interpreter interpreter + dir string +} + +func NewCommandExecutor(dir string) (*Executor, error) { + interpreter, err := findInterpreter() + if err != nil { + return nil, err + } + return &Executor{ + interpreter: interpreter, + dir: dir, + }, nil +} + +func (e *Executor) StartCommand(ctx context.Context, command string) (Command, error) { + ec, err := e.interpreter.prepare(command) + if err != nil { + return nil, err + } + return e.start(ctx, ec) +} + +func (e *Executor) start(ctx context.Context, ec *execContext) (Command, error) { + cmd := osexec.CommandContext(ctx, ec.executable, ec.args...) + cmd.Dir = e.dir + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + return &command{cmd, ec, stdout, stderr}, cmd.Start() +} + +func (e *Executor) Exec(ctx context.Context, command string) ([]byte, error) { + cmd, err := e.StartCommand(ctx, command) + if err != nil { + return nil, err + } + + res, err := io.ReadAll(io.MultiReader(cmd.Stdout(), cmd.Stderr())) + if err != nil { + return nil, err + } + + return res, cmd.Wait() +} diff --git a/libs/exec/exec_test.go b/libs/exec/exec_test.go new file mode 100644 index 00000000..a1d8d6ff --- /dev/null +++ b/libs/exec/exec_test.go @@ -0,0 +1,136 @@ +package exec + +import ( + "context" + "fmt" + "io" + "runtime" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestExecutorWithSimpleInput(t *testing.T) { + executor, err := NewCommandExecutor(".") + assert.NoError(t, err) + out, err := executor.Exec(context.Background(), "echo 'Hello'") + assert.NoError(t, err) + assert.NotNil(t, out) + assert.Equal(t, "Hello\n", string(out)) +} + +func TestExecutorWithComplexInput(t *testing.T) { + executor, err := NewCommandExecutor(".") + assert.NoError(t, err) + out, err := executor.Exec(context.Background(), "echo 'Hello' && echo 'World'") + assert.NoError(t, err) + assert.NotNil(t, out) + assert.Equal(t, "Hello\nWorld\n", string(out)) +} + +func TestExecutorWithInvalidCommand(t *testing.T) { + executor, err := NewCommandExecutor(".") + assert.NoError(t, err) + out, err := executor.Exec(context.Background(), "invalid-command") + assert.Error(t, err) + assert.Contains(t, string(out), "invalid-command: command not found") +} + +func TestExecutorWithInvalidCommandWithWindowsLikePath(t *testing.T) { + if runtime.GOOS != "windows" { + t.SkipNow() + } + + executor, err := NewCommandExecutor(".") + assert.NoError(t, err) + out, err := executor.Exec(context.Background(), `"C:\Program Files\invalid-command.exe"`) + assert.Error(t, err) + assert.Contains(t, string(out), "C:\\Program Files\\invalid-command.exe: No such file or directory") +} + +func TestFindBashInterpreterNonWindows(t *testing.T) { + if runtime.GOOS == "windows" { + t.SkipNow() + } + + interpreter, err := findBashInterpreter() + assert.NoError(t, err) + assert.NotEmpty(t, interpreter) + + e, err := NewCommandExecutor(".") + assert.NoError(t, err) + e.interpreter = interpreter + + assert.NoError(t, err) + out, err := e.Exec(context.Background(), `echo "Hello from bash"`) + assert.NoError(t, err) + + assert.Equal(t, "Hello from bash\n", string(out)) +} + +func TestFindCmdInterpreter(t *testing.T) { + if runtime.GOOS != "windows" { + t.SkipNow() + } + + interpreter, err := findCmdInterpreter() + assert.NoError(t, err) + assert.NotEmpty(t, interpreter) + + e, err := NewCommandExecutor(".") + assert.NoError(t, err) + e.interpreter = interpreter + + assert.NoError(t, err) + out, err := e.Exec(context.Background(), `echo "Hello from cmd"`) + assert.NoError(t, err) + + assert.Contains(t, string(out), "Hello from cmd") +} + +func TestExecutorCleanupsTempFiles(t *testing.T) { + executor, err := NewCommandExecutor(".") + assert.NoError(t, err) + + ec, err := executor.interpreter.prepare("echo 'Hello'") + assert.NoError(t, err) + + cmd, err := executor.start(context.Background(), ec) + assert.NoError(t, err) + + fileName := ec.args[1] + assert.FileExists(t, fileName) + + err = cmd.Wait() + assert.NoError(t, err) + assert.NoFileExists(t, fileName) +} + +func TestMultipleCommandsRunInParrallel(t *testing.T) { + executor, err := NewCommandExecutor(".") + assert.NoError(t, err) + + const count = 5 + var wg sync.WaitGroup + + for i := 0; i < count; i++ { + wg.Add(1) + cmd, err := executor.StartCommand(context.Background(), fmt.Sprintf("echo 'Hello %d'", i)) + go func(cmd Command, i int) { + defer wg.Done() + + stdout := cmd.Stdout() + out, err := io.ReadAll(stdout) + assert.NoError(t, err) + + err = cmd.Wait() + assert.NoError(t, err) + + assert.Equal(t, fmt.Sprintf("Hello %d\n", i), string(out)) + }(cmd, i) + assert.NoError(t, err) + } + + wg.Wait() +} diff --git a/libs/exec/interpreter.go b/libs/exec/interpreter.go new file mode 100644 index 00000000..e600e47f --- /dev/null +++ b/libs/exec/interpreter.go @@ -0,0 +1,123 @@ +package exec + +import ( + "errors" + "fmt" + "io" + "os" + osexec "os/exec" +) + +type interpreter interface { + prepare(string) (*execContext, error) +} + +type execContext struct { + executable string + args []string + scriptFile string +} + +type bashInterpreter struct { + executable string +} + +func (b *bashInterpreter) prepare(command string) (*execContext, error) { + filename, err := createTempScript(command, ".sh") + if err != nil { + return nil, err + } + + return &execContext{ + executable: b.executable, + args: []string{"-e", filename}, + scriptFile: filename, + }, nil +} + +type cmdInterpreter struct { + executable string +} + +func (c *cmdInterpreter) prepare(command string) (*execContext, error) { + filename, err := createTempScript(command, ".cmd") + if err != nil { + return nil, err + } + + return &execContext{ + executable: c.executable, + args: []string{"/D", "/E:ON", "/V:OFF", "/S", "/C", fmt.Sprintf(`CALL %s`, filename)}, + scriptFile: filename, + }, nil +} + +func findInterpreter() (interpreter, error) { + interpreter, err := findBashInterpreter() + if err != nil { + return nil, err + } + + if interpreter != nil { + return interpreter, nil + } + + interpreter, err = findCmdInterpreter() + if err != nil { + return nil, err + } + + if interpreter != nil { + return interpreter, nil + } + + return nil, errors.New("no interpreter found") +} + +func findBashInterpreter() (interpreter, error) { + // Lookup for bash executable first (Linux, MacOS, maybe Windows) + out, err := osexec.LookPath("bash") + if err != nil && !errors.Is(err, osexec.ErrNotFound) { + return nil, err + } + + // Bash executable is not found, returning early + if out == "" { + return nil, nil + } + + return &bashInterpreter{executable: out}, nil +} + +func findCmdInterpreter() (interpreter, error) { + // Lookup for CMD executable (Windows) + out, err := osexec.LookPath("cmd") + if err != nil && !errors.Is(err, osexec.ErrNotFound) { + return nil, err + } + + // CMD executable is not found, returning early + if out == "" { + return nil, nil + } + + return &cmdInterpreter{executable: out}, nil +} + +func createTempScript(command string, extension string) (string, error) { + file, err := os.CreateTemp(os.TempDir(), "cli-exec*"+extension) + if err != nil { + return "", err + } + + defer file.Close() + + _, err = io.WriteString(file, command) + if err != nil { + // Try to remove the file if we failed to write to it + os.Remove(file.Name()) + return "", err + } + + return file.Name(), nil +} From a1297d71fd1646051612d7a4170394f93994b72c Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 22 Dec 2023 11:38:09 +0100 Subject: [PATCH 301/310] Functionality to walk a `config.Value` tree (#1081) ## Changes This change adds: * A `config.Walk` function to walk a configuration tree * A `config.Path` type to represent a value's path inside a tree * Functions to create a `config.Path` from a string, or convert one to a string ## Tests Additional unit tests with full coverage. --- libs/config/path.go | 96 ++++++++++++ libs/config/path_string.go | 89 +++++++++++ libs/config/path_string_test.go | 100 +++++++++++++ libs/config/path_test.go | 76 ++++++++++ libs/config/walk.go | 66 +++++++++ libs/config/walk_test.go | 254 ++++++++++++++++++++++++++++++++ 6 files changed, 681 insertions(+) create mode 100644 libs/config/path.go create mode 100644 libs/config/path_string.go create mode 100644 libs/config/path_string_test.go create mode 100644 libs/config/path_test.go create mode 100644 libs/config/walk.go create mode 100644 libs/config/walk_test.go diff --git a/libs/config/path.go b/libs/config/path.go new file mode 100644 index 00000000..f1abf48c --- /dev/null +++ b/libs/config/path.go @@ -0,0 +1,96 @@ +package config + +import ( + "bytes" + "fmt" +) + +type pathComponent struct { + key string + index int +} + +// Path represents a path to a value in a [Value] configuration tree. +type Path []pathComponent + +// EmptyPath is the empty path. +// It is defined for convenience and clarity. +var EmptyPath = Path{} + +// Key returns a path component for a key. +func Key(k string) pathComponent { + return pathComponent{key: k} +} + +// Index returns a path component for an index. +func Index(i int) pathComponent { + return pathComponent{index: i} +} + +// NewPath returns a new path from the given components. +// The individual components may be created with [Key] or [Index]. +func NewPath(cs ...pathComponent) Path { + return cs +} + +// Join joins the given paths. +func (p Path) Join(qs ...Path) Path { + for _, q := range qs { + p = p.Append(q...) + } + return p +} + +// Append appends the given components to the path. +func (p Path) Append(cs ...pathComponent) Path { + return append(p, cs...) +} + +// Equal returns true if the paths are equal. +func (p Path) Equal(q Path) bool { + pl := len(p) + ql := len(q) + if pl != ql { + return false + } + for i := 0; i < pl; i++ { + if p[i] != q[i] { + return false + } + } + return true +} + +// HasPrefix returns true if the path has the specified prefix. +// The empty path is a prefix of all paths. +func (p Path) HasPrefix(q Path) bool { + pl := len(p) + ql := len(q) + if pl < ql { + return false + } + for i := 0; i < ql; i++ { + if p[i] != q[i] { + return false + } + } + return true +} + +// String returns a string representation of the path. +func (p Path) String() string { + var buf bytes.Buffer + + for i, c := range p { + if i > 0 && c.key != "" { + buf.WriteRune('.') + } + if c.key != "" { + buf.WriteString(c.key) + } else { + buf.WriteString(fmt.Sprintf("[%d]", c.index)) + } + } + + return buf.String() +} diff --git a/libs/config/path_string.go b/libs/config/path_string.go new file mode 100644 index 00000000..9538ad27 --- /dev/null +++ b/libs/config/path_string.go @@ -0,0 +1,89 @@ +package config + +import ( + "fmt" + "strconv" + "strings" +) + +// MustPathFromString is like NewPathFromString but panics on error. +func MustPathFromString(input string) Path { + p, err := NewPathFromString(input) + if err != nil { + panic(err) + } + return p +} + +// NewPathFromString parses a path from a string. +// +// The string must be a sequence of keys and indices separated by dots. +// Indices must be enclosed in square brackets. +// The string may include a leading dot. +// +// Examples: +// - foo.bar +// - foo[1].bar +// - foo.bar[1] +// - foo.bar[1][2] +// - . +func NewPathFromString(input string) (Path, error) { + var path Path + + p := input + + // Trim leading dot. + if p != "" && p[0] == '.' { + p = p[1:] + } + + for p != "" { + // Every component may have a leading dot. + if p != "" && p[0] == '.' { + p = p[1:] + } + + if p == "" { + return nil, fmt.Errorf("invalid path: %s", input) + } + + if p[0] == '[' { + // Find next ] + i := strings.Index(p, "]") + if i < 0 { + return nil, fmt.Errorf("invalid path: %s", input) + } + + // Parse index + j, err := strconv.Atoi(p[1:i]) + if err != nil { + return nil, fmt.Errorf("invalid path: %s", input) + } + + // Append index + path = append(path, Index(j)) + p = p[i+1:] + + // The next character must be a . or [ + if p != "" && strings.IndexAny(p, ".[") != 0 { + return nil, fmt.Errorf("invalid path: %s", input) + } + } else { + // Find next . or [ + i := strings.IndexAny(p, ".[") + if i < 0 { + i = len(p) + } + + if i == 0 { + return nil, fmt.Errorf("invalid path: %s", input) + } + + // Append key + path = append(path, Key(p[:i])) + p = p[i:] + } + } + + return path, nil +} diff --git a/libs/config/path_string_test.go b/libs/config/path_string_test.go new file mode 100644 index 00000000..89e64561 --- /dev/null +++ b/libs/config/path_string_test.go @@ -0,0 +1,100 @@ +package config_test + +import ( + "fmt" + "testing" + + . "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" +) + +func TestNewPathFromString(t *testing.T) { + for _, tc := range []struct { + input string + output Path + err error + }{ + { + input: "", + output: NewPath(), + }, + { + input: ".", + output: NewPath(), + }, + { + input: "foo.bar", + output: NewPath(Key("foo"), Key("bar")), + }, + { + input: "[1]", + output: NewPath(Index(1)), + }, + { + input: "foo[1].bar", + output: NewPath(Key("foo"), Index(1), Key("bar")), + }, + { + input: "foo.bar[1]", + output: NewPath(Key("foo"), Key("bar"), Index(1)), + }, + { + input: "foo.bar[1][2]", + output: NewPath(Key("foo"), Key("bar"), Index(1), Index(2)), + }, + { + input: "foo.bar[1][2][3]", + output: NewPath(Key("foo"), Key("bar"), Index(1), Index(2), Index(3)), + }, + { + input: "foo[1234]", + output: NewPath(Key("foo"), Index(1234)), + }, + { + input: "foo[123", + err: fmt.Errorf("invalid path: foo[123"), + }, + { + input: "foo[123]]", + err: fmt.Errorf("invalid path: foo[123]]"), + }, + { + input: "foo[[123]", + err: fmt.Errorf("invalid path: foo[[123]"), + }, + { + input: "foo[[123]]", + err: fmt.Errorf("invalid path: foo[[123]]"), + }, + { + input: "foo[foo]", + err: fmt.Errorf("invalid path: foo[foo]"), + }, + { + input: "foo..bar", + err: fmt.Errorf("invalid path: foo..bar"), + }, + { + input: "foo.bar.", + err: fmt.Errorf("invalid path: foo.bar."), + }, + { + // Every component may have a leading dot. + input: ".foo.[1].bar", + output: NewPath(Key("foo"), Index(1), Key("bar")), + }, + { + // But after an index there must be a dot. + input: "foo[1]bar", + err: fmt.Errorf("invalid path: foo[1]bar"), + }, + } { + p, err := NewPathFromString(tc.input) + if tc.err != nil { + assert.EqualError(t, err, tc.err.Error(), tc.input) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.output, p) + } + } +} diff --git a/libs/config/path_test.go b/libs/config/path_test.go new file mode 100644 index 00000000..3fdd848e --- /dev/null +++ b/libs/config/path_test.go @@ -0,0 +1,76 @@ +package config_test + +import ( + "testing" + + "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" +) + +func TestPathAppend(t *testing.T) { + p := config.NewPath(config.Key("foo")) + + // Single arg. + p1 := p.Append(config.Key("bar")) + assert.True(t, p1.Equal(config.NewPath(config.Key("foo"), config.Key("bar")))) + + // Multiple args. + p2 := p.Append(config.Key("bar"), config.Index(1)) + assert.True(t, p2.Equal(config.NewPath(config.Key("foo"), config.Key("bar"), config.Index(1)))) +} + +func TestPathJoin(t *testing.T) { + p := config.NewPath(config.Key("foo")) + + // Single arg. + p1 := p.Join(config.NewPath(config.Key("bar"))) + assert.True(t, p1.Equal(config.NewPath(config.Key("foo"), config.Key("bar")))) + + // Multiple args. + p2 := p.Join(config.NewPath(config.Key("bar")), config.NewPath(config.Index(1))) + assert.True(t, p2.Equal(config.NewPath(config.Key("foo"), config.Key("bar"), config.Index(1)))) +} + +func TestPathEqualEmpty(t *testing.T) { + assert.True(t, config.EmptyPath.Equal(config.EmptyPath)) +} + +func TestPathEqual(t *testing.T) { + p1 := config.NewPath(config.Key("foo"), config.Index(1)) + p2 := config.NewPath(config.Key("bar"), config.Index(2)) + assert.False(t, p1.Equal(p2), "expected %q to not equal %q", p1, p2) + + p3 := config.NewPath(config.Key("foo"), config.Index(1)) + assert.True(t, p1.Equal(p3), "expected %q to equal %q", p1, p3) + + p4 := config.NewPath(config.Key("foo"), config.Index(1), config.Key("bar"), config.Index(2)) + assert.False(t, p1.Equal(p4), "expected %q to not equal %q", p1, p4) +} + +func TestPathHasPrefixEmpty(t *testing.T) { + empty := config.EmptyPath + nonEmpty := config.NewPath(config.Key("foo")) + assert.True(t, empty.HasPrefix(empty)) + assert.True(t, nonEmpty.HasPrefix(empty)) + assert.False(t, empty.HasPrefix(nonEmpty)) +} + +func TestPathHasPrefix(t *testing.T) { + p1 := config.NewPath(config.Key("foo"), config.Index(1)) + p2 := config.NewPath(config.Key("bar"), config.Index(2)) + assert.False(t, p1.HasPrefix(p2), "expected %q to not have prefix %q", p1, p2) + + p3 := config.NewPath(config.Key("foo")) + assert.True(t, p1.HasPrefix(p3), "expected %q to have prefix %q", p1, p3) +} + +func TestPathString(t *testing.T) { + p1 := config.NewPath(config.Key("foo"), config.Index(1)) + assert.Equal(t, "foo[1]", p1.String()) + + p2 := config.NewPath(config.Key("bar"), config.Index(2), config.Key("baz")) + assert.Equal(t, "bar[2].baz", p2.String()) + + p3 := config.NewPath(config.Key("foo"), config.Index(1), config.Key("bar"), config.Index(2), config.Key("baz")) + assert.Equal(t, "foo[1].bar[2].baz", p3.String()) +} diff --git a/libs/config/walk.go b/libs/config/walk.go new file mode 100644 index 00000000..ce058338 --- /dev/null +++ b/libs/config/walk.go @@ -0,0 +1,66 @@ +package config + +import "errors" + +// WalkValueFunc is the type of the function called by Walk to traverse the configuration tree. +type WalkValueFunc func(p Path, v Value) (Value, error) + +// ErrDrop may be returned by WalkValueFunc to remove a value from the subtree. +var ErrDrop = errors.New("drop value from subtree") + +// ErrSkip may be returned by WalkValueFunc to skip traversal of a subtree. +var ErrSkip = errors.New("skip traversal of subtree") + +// Walk walks the configuration tree and calls the given function on each node. +// The callback may return ErrDrop to remove a value from the subtree. +// The callback may return ErrSkip to skip traversal of a subtree. +// If the callback returns another error, the walk is aborted, and the error is returned. +func Walk(v Value, fn func(p Path, v Value) (Value, error)) (Value, error) { + return walk(v, EmptyPath, fn) +} + +// Unexported counterpart to Walk. +// It carries the path leading up to the current node, +// such that it can be passed to the WalkValueFunc. +func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, error) { + v, err := fn(p, v) + if err != nil { + if err == ErrSkip { + return v, nil + } + return NilValue, err + } + + switch v.Kind() { + case KindMap: + m := v.MustMap() + out := make(map[string]Value, len(m)) + for k := range m { + nv, err := walk(m[k], p.Append(Key(k)), fn) + if err == ErrDrop { + continue + } + if err != nil { + return NilValue, err + } + out[k] = nv + } + v.v = out + case KindSequence: + s := v.MustSequence() + out := make([]Value, 0, len(s)) + for i := range s { + nv, err := walk(s[i], p.Append(Index(i)), fn) + if err == ErrDrop { + continue + } + if err != nil { + return NilValue, err + } + out = append(out, nv) + } + v.v = out + } + + return v, nil +} diff --git a/libs/config/walk_test.go b/libs/config/walk_test.go new file mode 100644 index 00000000..806ca256 --- /dev/null +++ b/libs/config/walk_test.go @@ -0,0 +1,254 @@ +package config_test + +import ( + "errors" + "testing" + + . "github.com/databricks/cli/libs/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Return values for specific paths. +type walkReturn struct { + path Path + + // Return values. + fn func(Value) Value + err error +} + +// Track the calls to the callback. +type walkCall struct { + path Path + value Value +} + +// Track the calls to the callback. +type walkCallTracker struct { + returns []walkReturn + calls []walkCall +} + +func (w *walkCallTracker) on(path string, fn func(Value) Value, err error) { + w.returns = append(w.returns, walkReturn{MustPathFromString(path), fn, err}) +} + +func (w *walkCallTracker) returnSkip(path string) { + w.on(path, func(v Value) Value { return v }, ErrSkip) +} + +func (w *walkCallTracker) returnDrop(path string) { + w.on(path, func(v Value) Value { return NilValue }, ErrDrop) +} + +func (w *walkCallTracker) track(p Path, v Value) (Value, error) { + w.calls = append(w.calls, walkCall{p, v}) + + // Look for matching return. + for _, r := range w.returns { + if p.Equal(r.path) { + return r.fn(v), r.err + } + } + + return v, nil +} + +func TestWalkEmpty(t *testing.T) { + var tracker walkCallTracker + + value := V(nil) + out, err := Walk(value, tracker.track) + require.NoError(t, err) + assert.Equal(t, value, out) + + // The callback should have been called once. + assert.Len(t, tracker.calls, 1) + + // The call should have been made with the empty path. + assert.Equal(t, EmptyPath, tracker.calls[0].path) + + // The value should be the same as the input. + assert.Equal(t, value, tracker.calls[0].value) +} + +func TestWalkMapSkip(t *testing.T) { + var tracker walkCallTracker + + // Skip traversal of the root value. + tracker.returnSkip(".") + + value := V(map[string]Value{ + "key": V("value"), + }) + out, err := Walk(value, tracker.track) + require.NoError(t, err) + assert.Equal( + t, + V(map[string]Value{ + "key": V("value"), + }), + out, + ) + + // The callback should have been called once. + assert.Len(t, tracker.calls, 1) + + // The call should have been made with the empty path. + assert.Equal(t, EmptyPath, tracker.calls[0].path) + + // The value should be the same as the input. + assert.Equal(t, value, tracker.calls[0].value) +} + +func TestWalkMapDrop(t *testing.T) { + var tracker walkCallTracker + + // Drop the value at key "foo". + tracker.returnDrop(".foo") + + value := V(map[string]Value{ + "foo": V("bar"), + "bar": V("baz"), + }) + out, err := Walk(value, tracker.track) + require.NoError(t, err) + assert.Equal( + t, + V(map[string]Value{ + "bar": V("baz"), + }), + out, + ) + + // The callback should have been called for the root and every key in the map. + assert.Len(t, tracker.calls, 3) + + // Calls 2 and 3 have been made for the keys in the map. + assert.ElementsMatch(t, + []Path{ + tracker.calls[1].path, + tracker.calls[2].path, + }, []Path{ + MustPathFromString(".foo"), + MustPathFromString(".bar"), + }) +} + +func TestWalkMapError(t *testing.T) { + var tracker walkCallTracker + + // Return an error from the callback for key "foo". + cerr := errors.New("error!") + tracker.on(".foo", func(v Value) Value { return v }, cerr) + + value := V(map[string]Value{ + "foo": V("bar"), + }) + out, err := Walk(value, tracker.track) + assert.Equal(t, cerr, err) + assert.Equal(t, NilValue, out) + + // The callback should have been called twice. + assert.Len(t, tracker.calls, 2) + + // The second call was for the value at key "foo". + assert.Equal(t, MustPathFromString(".foo"), tracker.calls[1].path) +} + +func TestWalkSequenceSkip(t *testing.T) { + var tracker walkCallTracker + + // Skip traversal of the root value. + tracker.returnSkip(".") + + value := V([]Value{ + V("foo"), + V("bar"), + }) + out, err := Walk(value, tracker.track) + require.NoError(t, err) + assert.Equal( + t, + V([]Value{ + V("foo"), + V("bar"), + }), + out, + ) + + // The callback should have been called once. + assert.Len(t, tracker.calls, 1) + + // The call should have been made with the empty path. + assert.Equal(t, EmptyPath, tracker.calls[0].path) + + // The value should be the same as the input. + assert.Equal(t, value, tracker.calls[0].value) +} + +func TestWalkSequenceDrop(t *testing.T) { + var tracker walkCallTracker + + // Drop the value at index 1. + tracker.returnDrop(".[1]") + + value := V([]Value{ + V("foo"), + V("bar"), + V("baz"), + }) + out, err := Walk(value, tracker.track) + require.NoError(t, err) + assert.Equal( + t, + V([]Value{ + V("foo"), + V("baz"), + }), + out, + ) + + // The callback should have been called for the root and every value in the sequence. + assert.Len(t, tracker.calls, 4) + + // The second call was for the value at index 0. + assert.Equal(t, MustPathFromString(".[0]"), tracker.calls[1].path) + assert.Equal(t, V("foo"), tracker.calls[1].value) + + // The third call was for the value at index 1. + assert.Equal(t, MustPathFromString(".[1]"), tracker.calls[2].path) + assert.Equal(t, V("bar"), tracker.calls[2].value) + + // The fourth call was for the value at index 2. + assert.Equal(t, MustPathFromString(".[2]"), tracker.calls[3].path) + assert.Equal(t, V("baz"), tracker.calls[3].value) +} + +func TestWalkSequenceError(t *testing.T) { + var tracker walkCallTracker + + // Return an error from the callback for index 1. + cerr := errors.New("error!") + tracker.on(".[1]", func(v Value) Value { return v }, cerr) + + value := V([]Value{ + V("foo"), + V("bar"), + }) + out, err := Walk(value, tracker.track) + assert.Equal(t, cerr, err) + assert.Equal(t, NilValue, out) + + // The callback should have been called three times. + assert.Len(t, tracker.calls, 3) + + // The second call was for the value at index 0. + assert.Equal(t, MustPathFromString(".[0]"), tracker.calls[1].path) + assert.Equal(t, V("foo"), tracker.calls[1].value) + + // The third call was for the value at index 1. + assert.Equal(t, MustPathFromString(".[1]"), tracker.calls[2].path) + assert.Equal(t, V("bar"), tracker.calls[2].value) +} From 938eb1600c9682b33b8de0b74273faf0687b9192 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 22 Dec 2023 14:20:45 +0100 Subject: [PATCH 302/310] Rename libs/config -> libs/dyn (#1086) ## Changes The name "dynamic value", or "dyn" for short, is more descriptive than the opaque "config". Also, it conveniently does not alias with other packages in the repository, or (popular ones) elsewhere. (discussed with @andrewnester) ## Tests n/a --- libs/config/path_test.go | 76 ------- libs/diag/diagnostic.go | 4 +- .../convert/end_to_end_test.go | 4 +- libs/{config => dyn}/convert/error.go | 4 +- libs/{config => dyn}/convert/from_typed.go | 108 +++++----- .../convert/from_typed_test.go | 186 +++++++++--------- libs/{config => dyn}/convert/normalize.go | 96 ++++----- .../{config => dyn}/convert/normalize_test.go | 132 ++++++------- libs/{config => dyn}/convert/struct_info.go | 14 +- .../convert/struct_info_test.go | 8 +- libs/{config => dyn}/convert/to_typed.go | 54 ++--- libs/{config => dyn}/convert/to_typed_test.go | 126 ++++++------ libs/{config => dyn}/kind.go | 2 +- libs/{config => dyn}/location.go | 2 +- libs/{config => dyn}/location_test.go | 6 +- libs/{config => dyn}/merge/merge.go | 40 ++-- libs/{config => dyn}/merge/merge_test.go | 76 +++---- libs/{config => dyn}/path.go | 2 +- libs/{config => dyn}/path_string.go | 2 +- libs/{config => dyn}/path_string_test.go | 4 +- libs/dyn/path_test.go | 76 +++++++ libs/{config => dyn}/value.go | 2 +- libs/{config => dyn}/value_test.go | 22 +-- libs/{config => dyn}/walk.go | 2 +- libs/{config => dyn}/walk_test.go | 4 +- libs/{config => dyn}/yamlloader/loader.go | 80 ++++---- .../yamlloader/testdata/anchor_01.yml | 0 .../yamlloader/testdata/anchor_02.yml | 0 .../yamlloader/testdata/anchor_03.yml | 0 .../yamlloader/testdata/anchor_04.yml | 0 .../yamlloader/testdata/anchor_05.yml | 0 .../yamlloader/testdata/anchor_06.yml | 0 .../yamlloader/testdata/anchor_07.yml | 0 .../yamlloader/testdata/anchor_08.yml | 0 .../yamlloader/testdata/empty.yml | 0 .../yamlloader/testdata/error_01.yml | 0 .../yamlloader/testdata/error_02.yml | 0 .../yamlloader/testdata/error_03.yml | 0 .../yamlloader/testdata/mix_01.yml | 0 .../yamlloader/testdata/mix_02.yml | 0 libs/{config => dyn}/yamlloader/yaml.go | 8 +- .../yamlloader/yaml_anchor_test.go | 44 ++--- .../yamlloader/yaml_error_test.go | 2 +- .../yamlloader/yaml_mix_test.go | 6 +- libs/{config => dyn}/yamlloader/yaml_test.go | 8 +- 45 files changed, 600 insertions(+), 600 deletions(-) delete mode 100644 libs/config/path_test.go rename libs/{config => dyn}/convert/end_to_end_test.go (93%) rename libs/{config => dyn}/convert/error.go (73%) rename libs/{config => dyn}/convert/from_typed.go (58%) rename libs/{config => dyn}/convert/from_typed_test.go (60%) rename libs/{config => dyn}/convert/normalize.go (58%) rename libs/{config => dyn}/convert/normalize_test.go (74%) rename libs/{config => dyn}/convert/struct_info.go (87%) rename libs/{config => dyn}/convert/struct_info_test.go (97%) rename libs/{config => dyn}/convert/to_typed.go (81%) rename libs/{config => dyn}/convert/to_typed_test.go (76%) rename libs/{config => dyn}/kind.go (98%) rename libs/{config => dyn}/location.go (92%) rename libs/{config => dyn}/location_test.go (54%) rename libs/{config => dyn}/merge/merge.go (60%) rename libs/{config => dyn}/merge/merge_test.go (67%) rename libs/{config => dyn}/path.go (99%) rename libs/{config => dyn}/path_string.go (99%) rename libs/{config => dyn}/path_string_test.go (96%) create mode 100644 libs/dyn/path_test.go rename libs/{config => dyn}/value.go (99%) rename libs/{config => dyn}/value_test.go (55%) rename libs/{config => dyn}/walk.go (99%) rename libs/{config => dyn}/walk_test.go (98%) rename libs/{config => dyn}/yamlloader/loader.go (61%) rename libs/{config => dyn}/yamlloader/testdata/anchor_01.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/anchor_02.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/anchor_03.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/anchor_04.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/anchor_05.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/anchor_06.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/anchor_07.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/anchor_08.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/empty.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/error_01.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/error_02.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/error_03.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/mix_01.yml (100%) rename libs/{config => dyn}/yamlloader/testdata/mix_02.yml (100%) rename libs/{config => dyn}/yamlloader/yaml.go (56%) rename libs/{config => dyn}/yamlloader/yaml_anchor_test.go (61%) rename libs/{config => dyn}/yamlloader/yaml_error_test.go (94%) rename libs/{config => dyn}/yamlloader/yaml_mix_test.go (79%) rename libs/{config => dyn}/yamlloader/yaml_test.go (76%) diff --git a/libs/config/path_test.go b/libs/config/path_test.go deleted file mode 100644 index 3fdd848e..00000000 --- a/libs/config/path_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package config_test - -import ( - "testing" - - "github.com/databricks/cli/libs/config" - "github.com/stretchr/testify/assert" -) - -func TestPathAppend(t *testing.T) { - p := config.NewPath(config.Key("foo")) - - // Single arg. - p1 := p.Append(config.Key("bar")) - assert.True(t, p1.Equal(config.NewPath(config.Key("foo"), config.Key("bar")))) - - // Multiple args. - p2 := p.Append(config.Key("bar"), config.Index(1)) - assert.True(t, p2.Equal(config.NewPath(config.Key("foo"), config.Key("bar"), config.Index(1)))) -} - -func TestPathJoin(t *testing.T) { - p := config.NewPath(config.Key("foo")) - - // Single arg. - p1 := p.Join(config.NewPath(config.Key("bar"))) - assert.True(t, p1.Equal(config.NewPath(config.Key("foo"), config.Key("bar")))) - - // Multiple args. - p2 := p.Join(config.NewPath(config.Key("bar")), config.NewPath(config.Index(1))) - assert.True(t, p2.Equal(config.NewPath(config.Key("foo"), config.Key("bar"), config.Index(1)))) -} - -func TestPathEqualEmpty(t *testing.T) { - assert.True(t, config.EmptyPath.Equal(config.EmptyPath)) -} - -func TestPathEqual(t *testing.T) { - p1 := config.NewPath(config.Key("foo"), config.Index(1)) - p2 := config.NewPath(config.Key("bar"), config.Index(2)) - assert.False(t, p1.Equal(p2), "expected %q to not equal %q", p1, p2) - - p3 := config.NewPath(config.Key("foo"), config.Index(1)) - assert.True(t, p1.Equal(p3), "expected %q to equal %q", p1, p3) - - p4 := config.NewPath(config.Key("foo"), config.Index(1), config.Key("bar"), config.Index(2)) - assert.False(t, p1.Equal(p4), "expected %q to not equal %q", p1, p4) -} - -func TestPathHasPrefixEmpty(t *testing.T) { - empty := config.EmptyPath - nonEmpty := config.NewPath(config.Key("foo")) - assert.True(t, empty.HasPrefix(empty)) - assert.True(t, nonEmpty.HasPrefix(empty)) - assert.False(t, empty.HasPrefix(nonEmpty)) -} - -func TestPathHasPrefix(t *testing.T) { - p1 := config.NewPath(config.Key("foo"), config.Index(1)) - p2 := config.NewPath(config.Key("bar"), config.Index(2)) - assert.False(t, p1.HasPrefix(p2), "expected %q to not have prefix %q", p1, p2) - - p3 := config.NewPath(config.Key("foo")) - assert.True(t, p1.HasPrefix(p3), "expected %q to have prefix %q", p1, p3) -} - -func TestPathString(t *testing.T) { - p1 := config.NewPath(config.Key("foo"), config.Index(1)) - assert.Equal(t, "foo[1]", p1.String()) - - p2 := config.NewPath(config.Key("bar"), config.Index(2), config.Key("baz")) - assert.Equal(t, "bar[2].baz", p2.String()) - - p3 := config.NewPath(config.Key("foo"), config.Index(1), config.Key("bar"), config.Index(2), config.Key("baz")) - assert.Equal(t, "foo[1].bar[2].baz", p3.String()) -} diff --git a/libs/diag/diagnostic.go b/libs/diag/diagnostic.go index c5757a58..02d2e7c1 100644 --- a/libs/diag/diagnostic.go +++ b/libs/diag/diagnostic.go @@ -3,7 +3,7 @@ package diag import ( "fmt" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" ) type Diagnostic struct { @@ -19,7 +19,7 @@ type Diagnostic struct { // Location is a source code location associated with the diagnostic message. // It may be zero if there is no associated location. - Location config.Location + Location dyn.Location } // Errorf creates a new error diagnostic. diff --git a/libs/config/convert/end_to_end_test.go b/libs/dyn/convert/end_to_end_test.go similarity index 93% rename from libs/config/convert/end_to_end_test.go rename to libs/dyn/convert/end_to_end_test.go index c06830e8..fbb84336 100644 --- a/libs/config/convert/end_to_end_test.go +++ b/libs/dyn/convert/end_to_end_test.go @@ -3,13 +3,13 @@ package convert import ( "testing" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func assertFromTypedToTypedEqual[T any](t *testing.T, src T) { - nv, err := FromTyped(src, config.NilValue) + nv, err := FromTyped(src, dyn.NilValue) require.NoError(t, err) var dst T diff --git a/libs/config/convert/error.go b/libs/dyn/convert/error.go similarity index 73% rename from libs/config/convert/error.go rename to libs/dyn/convert/error.go index b55668d6..d3770d82 100644 --- a/libs/config/convert/error.go +++ b/libs/dyn/convert/error.go @@ -3,11 +3,11 @@ package convert import ( "fmt" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" ) type TypeError struct { - value config.Value + value dyn.Value msg string } diff --git a/libs/config/convert/from_typed.go b/libs/dyn/convert/from_typed.go similarity index 58% rename from libs/config/convert/from_typed.go rename to libs/dyn/convert/from_typed.go index e3911a9e..0659d1cd 100644 --- a/libs/config/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -4,18 +4,18 @@ import ( "fmt" "reflect" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" ) // FromTyped converts changes made in the typed structure w.r.t. the configuration value // back to the configuration value, retaining existing location information where possible. -func FromTyped(src any, ref config.Value) (config.Value, error) { +func FromTyped(src any, ref dyn.Value) (dyn.Value, error) { srcv := reflect.ValueOf(src) // Dereference pointer if necessary for srcv.Kind() == reflect.Pointer { if srcv.IsNil() { - return config.NilValue, nil + return dyn.NilValue, nil } srcv = srcv.Elem() } @@ -37,53 +37,53 @@ func FromTyped(src any, ref config.Value) (config.Value, error) { return fromTypedFloat(srcv, ref) } - return config.NilValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) + return dyn.NilValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) } -func fromTypedStruct(src reflect.Value, ref config.Value) (config.Value, error) { +func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { - case config.KindMap, config.KindNil: + case dyn.KindMap, dyn.KindNil: default: - return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) } - out := make(map[string]config.Value) + out := make(map[string]dyn.Value) info := getStructInfo(src.Type()) for k, v := range info.FieldValues(src) { // Convert the field taking into account the reference value (may be equal to config.NilValue). nv, err := FromTyped(v.Interface(), ref.Get(k)) if err != nil { - return config.Value{}, err + return dyn.Value{}, err } - if nv != config.NilValue { + if nv != dyn.NilValue { out[k] = nv } } // If the struct was equal to its zero value, emit a nil. if len(out) == 0 { - return config.NilValue, nil + return dyn.NilValue, nil } - return config.NewValue(out, ref.Location()), nil + return dyn.NewValue(out, ref.Location()), nil } -func fromTypedMap(src reflect.Value, ref config.Value) (config.Value, error) { +func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { - case config.KindMap, config.KindNil: + case dyn.KindMap, dyn.KindNil: default: - return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) } // Return nil if the map is nil. if src.IsNil() { - return config.NilValue, nil + return dyn.NilValue, nil } - out := make(map[string]config.Value) + out := make(map[string]dyn.Value) iter := src.MapRange() for iter.Next() { k := iter.Key().String() @@ -92,7 +92,7 @@ func fromTypedMap(src reflect.Value, ref config.Value) (config.Value, error) { // Convert entry taking into account the reference value (may be equal to config.NilValue). nv, err := FromTyped(v.Interface(), ref.Get(k)) if err != nil { - return config.Value{}, err + return dyn.Value{}, err } // Every entry is represented, even if it is a nil. @@ -100,115 +100,115 @@ func fromTypedMap(src reflect.Value, ref config.Value) (config.Value, error) { out[k] = nv } - return config.NewValue(out, ref.Location()), nil + return dyn.NewValue(out, ref.Location()), nil } -func fromTypedSlice(src reflect.Value, ref config.Value) (config.Value, error) { +func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { - case config.KindSequence, config.KindNil: + case dyn.KindSequence, dyn.KindNil: default: - return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) } // Return nil if the slice is nil. if src.IsNil() { - return config.NilValue, nil + return dyn.NilValue, nil } - out := make([]config.Value, src.Len()) + out := make([]dyn.Value, src.Len()) for i := 0; i < src.Len(); i++ { v := src.Index(i) // Convert entry taking into account the reference value (may be equal to config.NilValue). nv, err := FromTyped(v.Interface(), ref.Index(i)) if err != nil { - return config.Value{}, err + return dyn.Value{}, err } out[i] = nv } - return config.NewValue(out, ref.Location()), nil + return dyn.NewValue(out, ref.Location()), nil } -func fromTypedString(src reflect.Value, ref config.Value) (config.Value, error) { +func fromTypedString(src reflect.Value, ref dyn.Value) (dyn.Value, error) { switch ref.Kind() { - case config.KindString: + case dyn.KindString: value := src.String() if value == ref.MustString() { return ref, nil } - return config.V(value), nil - case config.KindNil: + return dyn.V(value), nil + case dyn.KindNil: // This field is not set in the reference, so we only include it if it has a non-zero value. // Otherwise, we would always include all zero valued fields. if src.IsZero() { - return config.NilValue, nil + return dyn.NilValue, nil } - return config.V(src.String()), nil + return dyn.V(src.String()), nil } - return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) } -func fromTypedBool(src reflect.Value, ref config.Value) (config.Value, error) { +func fromTypedBool(src reflect.Value, ref dyn.Value) (dyn.Value, error) { switch ref.Kind() { - case config.KindBool: + case dyn.KindBool: value := src.Bool() if value == ref.MustBool() { return ref, nil } - return config.V(value), nil - case config.KindNil: + return dyn.V(value), nil + case dyn.KindNil: // This field is not set in the reference, so we only include it if it has a non-zero value. // Otherwise, we would always include all zero valued fields. if src.IsZero() { - return config.NilValue, nil + return dyn.NilValue, nil } - return config.V(src.Bool()), nil + return dyn.V(src.Bool()), nil } - return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) } -func fromTypedInt(src reflect.Value, ref config.Value) (config.Value, error) { +func fromTypedInt(src reflect.Value, ref dyn.Value) (dyn.Value, error) { switch ref.Kind() { - case config.KindInt: + case dyn.KindInt: value := src.Int() if value == ref.MustInt() { return ref, nil } - return config.V(value), nil - case config.KindNil: + return dyn.V(value), nil + case dyn.KindNil: // This field is not set in the reference, so we only include it if it has a non-zero value. // Otherwise, we would always include all zero valued fields. if src.IsZero() { - return config.NilValue, nil + return dyn.NilValue, nil } - return config.V(src.Int()), nil + return dyn.V(src.Int()), nil } - return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) } -func fromTypedFloat(src reflect.Value, ref config.Value) (config.Value, error) { +func fromTypedFloat(src reflect.Value, ref dyn.Value) (dyn.Value, error) { switch ref.Kind() { - case config.KindFloat: + case dyn.KindFloat: value := src.Float() if value == ref.MustFloat() { return ref, nil } - return config.V(value), nil - case config.KindNil: + return dyn.V(value), nil + case dyn.KindNil: // This field is not set in the reference, so we only include it if it has a non-zero value. // Otherwise, we would always include all zero valued fields. if src.IsZero() { - return config.NilValue, nil + return dyn.NilValue, nil } - return config.V(src.Float()), nil + return dyn.V(src.Float()), nil } - return config.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) } diff --git a/libs/config/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go similarity index 60% rename from libs/config/convert/from_typed_test.go rename to libs/dyn/convert/from_typed_test.go index 2b28f549..0e9b9c7c 100644 --- a/libs/config/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -3,7 +3,7 @@ package convert import ( "testing" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -15,11 +15,11 @@ func TestFromTypedStructZeroFields(t *testing.T) { } src := Tmp{} - ref := config.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NilValue, nv) + assert.Equal(t, dyn.NilValue, nv) } func TestFromTypedStructSetFields(t *testing.T) { @@ -33,12 +33,12 @@ func TestFromTypedStructSetFields(t *testing.T) { Bar: "bar", } - ref := config.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(map[string]config.Value{ - "foo": config.V("foo"), - "bar": config.V("bar"), + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "foo": dyn.V("foo"), + "bar": dyn.V("bar"), }), nv) } @@ -53,45 +53,45 @@ func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { Bar: "qux", } - ref := config.V(map[string]config.Value{ - "foo": config.NewValue("bar", config.Location{File: "foo"}), - "bar": config.NewValue("baz", config.Location{File: "bar"}), + ref := dyn.V(map[string]dyn.Value{ + "foo": dyn.NewValue("bar", dyn.Location{File: "foo"}), + "bar": dyn.NewValue("baz", dyn.Location{File: "bar"}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) // Assert foo has retained its location. - assert.Equal(t, config.NewValue("bar", config.Location{File: "foo"}), nv.Get("foo")) + assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) // Assert bar lost its location (because it was overwritten). - assert.Equal(t, config.NewValue("qux", config.Location{}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) } func TestFromTypedMapNil(t *testing.T) { var src map[string]string = nil - ref := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + ref := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NilValue, nv) + assert.Equal(t, dyn.NilValue, nv) } func TestFromTypedMapEmpty(t *testing.T) { var src = map[string]string{} - ref := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + ref := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(map[string]config.Value{}), nv) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) } func TestFromTypedMapNonEmpty(t *testing.T) { @@ -100,12 +100,12 @@ func TestFromTypedMapNonEmpty(t *testing.T) { "bar": "bar", } - ref := config.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(map[string]config.Value{ - "foo": config.V("foo"), - "bar": config.V("bar"), + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "foo": dyn.V("foo"), + "bar": dyn.V("bar"), }), nv) } @@ -115,19 +115,19 @@ func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { "bar": "qux", } - ref := config.V(map[string]config.Value{ - "foo": config.NewValue("bar", config.Location{File: "foo"}), - "bar": config.NewValue("baz", config.Location{File: "bar"}), + ref := dyn.V(map[string]dyn.Value{ + "foo": dyn.NewValue("bar", dyn.Location{File: "foo"}), + "bar": dyn.NewValue("baz", dyn.Location{File: "bar"}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) // Assert foo has retained its location. - assert.Equal(t, config.NewValue("bar", config.Location{File: "foo"}), nv.Get("foo")) + assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) // Assert bar lost its location (because it was overwritten). - assert.Equal(t, config.NewValue("qux", config.Location{}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { @@ -135,38 +135,38 @@ func TestFromTypedMapFieldWithZeroValue(t *testing.T) { "foo": "", } - ref := config.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(map[string]config.Value{ - "foo": config.NilValue, + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "foo": dyn.NilValue, }), nv) } func TestFromTypedSliceNil(t *testing.T) { var src []string = nil - ref := config.V([]config.Value{ - config.V("bar"), - config.V("baz"), + ref := dyn.V([]dyn.Value{ + dyn.V("bar"), + dyn.V("baz"), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NilValue, nv) + assert.Equal(t, dyn.NilValue, nv) } func TestFromTypedSliceEmpty(t *testing.T) { var src = []string{} - ref := config.V([]config.Value{ - config.V("bar"), - config.V("baz"), + ref := dyn.V([]dyn.Value{ + dyn.V("bar"), + dyn.V("baz"), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V([]config.Value{}), nv) + assert.Equal(t, dyn.V([]dyn.Value{}), nv) } func TestFromTypedSliceNonEmpty(t *testing.T) { @@ -175,12 +175,12 @@ func TestFromTypedSliceNonEmpty(t *testing.T) { "bar", } - ref := config.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V([]config.Value{ - config.V("foo"), - config.V("bar"), + assert.Equal(t, dyn.V([]dyn.Value{ + dyn.V("foo"), + dyn.V("bar"), }), nv) } @@ -190,205 +190,205 @@ func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { "bar", } - ref := config.V([]config.Value{ - config.NewValue("foo", config.Location{File: "foo"}), - config.NewValue("baz", config.Location{File: "baz"}), + ref := dyn.V([]dyn.Value{ + dyn.NewValue("foo", dyn.Location{File: "foo"}), + dyn.NewValue("baz", dyn.Location{File: "baz"}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) // Assert foo has retained its location. - assert.Equal(t, config.NewValue("foo", config.Location{File: "foo"}), nv.Index(0)) + assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv.Index(0)) // Assert bar lost its location (because it was overwritten). - assert.Equal(t, config.NewValue("bar", config.Location{}), nv.Index(1)) + assert.Equal(t, dyn.NewValue("bar", dyn.Location{}), nv.Index(1)) } func TestFromTypedStringEmpty(t *testing.T) { var src string - var ref = config.NilValue + var ref = dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NilValue, nv) + assert.Equal(t, dyn.NilValue, nv) } func TestFromTypedStringEmptyOverwrite(t *testing.T) { var src string - var ref = config.V("old") + var ref = dyn.V("old") nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(""), nv) + assert.Equal(t, dyn.V(""), nv) } func TestFromTypedStringNonEmpty(t *testing.T) { var src string = "new" - var ref = config.NilValue + var ref = dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V("new"), nv) + assert.Equal(t, dyn.V("new"), nv) } func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { var src string = "new" - var ref = config.V("old") + var ref = dyn.V("old") nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V("new"), nv) + assert.Equal(t, dyn.V("new"), nv) } func TestFromTypedStringRetainsLocationsIfUnchanged(t *testing.T) { var src string = "foo" - var ref = config.NewValue("foo", config.Location{File: "foo"}) + var ref = dyn.NewValue("foo", dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NewValue("foo", config.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv) } func TestFromTypedStringTypeError(t *testing.T) { var src string = "foo" - var ref = config.V(1234) + var ref = dyn.V(1234) _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedBoolEmpty(t *testing.T) { var src bool - var ref = config.NilValue + var ref = dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NilValue, nv) + assert.Equal(t, dyn.NilValue, nv) } func TestFromTypedBoolEmptyOverwrite(t *testing.T) { var src bool - var ref = config.V(true) + var ref = dyn.V(true) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(false), nv) + assert.Equal(t, dyn.V(false), nv) } func TestFromTypedBoolNonEmpty(t *testing.T) { var src bool = true - var ref = config.NilValue + var ref = dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(true), nv) + assert.Equal(t, dyn.V(true), nv) } func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { var src bool = true - var ref = config.V(false) + var ref = dyn.V(false) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(true), nv) + assert.Equal(t, dyn.V(true), nv) } func TestFromTypedBoolRetainsLocationsIfUnchanged(t *testing.T) { var src bool = true - var ref = config.NewValue(true, config.Location{File: "foo"}) + var ref = dyn.NewValue(true, dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NewValue(true, config.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(true, dyn.Location{File: "foo"}), nv) } func TestFromTypedBoolTypeError(t *testing.T) { var src bool = true - var ref = config.V("string") + var ref = dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedIntEmpty(t *testing.T) { var src int - var ref = config.NilValue + var ref = dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NilValue, nv) + assert.Equal(t, dyn.NilValue, nv) } func TestFromTypedIntEmptyOverwrite(t *testing.T) { var src int - var ref = config.V(1234) + var ref = dyn.V(1234) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(int64(0)), nv) + assert.Equal(t, dyn.V(int64(0)), nv) } func TestFromTypedIntNonEmpty(t *testing.T) { var src int = 1234 - var ref = config.NilValue + var ref = dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(int64(1234)), nv) + assert.Equal(t, dyn.V(int64(1234)), nv) } func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { var src int = 1234 - var ref = config.V(1233) + var ref = dyn.V(1233) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(int64(1234)), nv) + assert.Equal(t, dyn.V(int64(1234)), nv) } func TestFromTypedIntRetainsLocationsIfUnchanged(t *testing.T) { var src int = 1234 - var ref = config.NewValue(1234, config.Location{File: "foo"}) + var ref = dyn.NewValue(1234, dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NewValue(1234, config.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(1234, dyn.Location{File: "foo"}), nv) } func TestFromTypedIntTypeError(t *testing.T) { var src int = 1234 - var ref = config.V("string") + var ref = dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedFloatEmpty(t *testing.T) { var src float64 - var ref = config.NilValue + var ref = dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NilValue, nv) + assert.Equal(t, dyn.NilValue, nv) } func TestFromTypedFloatEmptyOverwrite(t *testing.T) { var src float64 - var ref = config.V(1.23) + var ref = dyn.V(1.23) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(0.0), nv) + assert.Equal(t, dyn.V(0.0), nv) } func TestFromTypedFloatNonEmpty(t *testing.T) { var src float64 = 1.23 - var ref = config.NilValue + var ref = dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(1.23), nv) + assert.Equal(t, dyn.V(1.23), nv) } func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { var src float64 = 1.23 - var ref = config.V(1.24) + var ref = dyn.V(1.24) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.V(1.23), nv) + assert.Equal(t, dyn.V(1.23), nv) } func TestFromTypedFloatRetainsLocationsIfUnchanged(t *testing.T) { var src float64 = 1.23 - var ref = config.NewValue(1.23, config.Location{File: "foo"}) + var ref = dyn.NewValue(1.23, dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, config.NewValue(1.23, config.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(1.23, dyn.Location{File: "foo"}), nv) } func TestFromTypedFloatTypeError(t *testing.T) { var src float64 = 1.23 - var ref = config.V("string") + var ref = dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } diff --git a/libs/config/convert/normalize.go b/libs/dyn/convert/normalize.go similarity index 58% rename from libs/config/convert/normalize.go rename to libs/dyn/convert/normalize.go index d7d2b1df..7a652cbc 100644 --- a/libs/config/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -5,15 +5,15 @@ import ( "reflect" "strconv" - "github.com/databricks/cli/libs/config" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" ) -func Normalize(dst any, src config.Value) (config.Value, diag.Diagnostics) { +func Normalize(dst any, src dyn.Value) (dyn.Value, diag.Diagnostics) { return normalizeType(reflect.TypeOf(dst), src) } -func normalizeType(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { +func normalizeType(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { for typ.Kind() == reflect.Pointer { typ = typ.Elem() } @@ -35,10 +35,10 @@ func normalizeType(typ reflect.Type, src config.Value) (config.Value, diag.Diagn return normalizeFloat(typ, src) } - return config.NilValue, diag.Errorf("unsupported type: %s", typ.Kind()) + return dyn.NilValue, diag.Errorf("unsupported type: %s", typ.Kind()) } -func typeMismatch(expected config.Kind, src config.Value) diag.Diagnostic { +func typeMismatch(expected dyn.Kind, src dyn.Value) diag.Diagnostic { return diag.Diagnostic{ Severity: diag.Error, Summary: fmt.Sprintf("expected %s, found %s", expected, src.Kind()), @@ -46,12 +46,12 @@ func typeMismatch(expected config.Kind, src config.Value) diag.Diagnostic { } } -func normalizeStruct(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { +func normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { - case config.KindMap: - out := make(map[string]config.Value) + case dyn.KindMap: + out := make(map[string]dyn.Value) info := getStructInfo(typ) for k, v := range src.MustMap() { index, ok := info.Fields[k] @@ -77,20 +77,20 @@ func normalizeStruct(typ reflect.Type, src config.Value) (config.Value, diag.Dia out[k] = v } - return config.NewValue(out, src.Location()), diags - case config.KindNil: + return dyn.NewValue(out, src.Location()), diags + case dyn.KindNil: return src, diags } - return config.NilValue, diags.Append(typeMismatch(config.KindMap, src)) + return dyn.NilValue, diags.Append(typeMismatch(dyn.KindMap, src)) } -func normalizeMap(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { +func normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { - case config.KindMap: - out := make(map[string]config.Value) + case dyn.KindMap: + out := make(map[string]dyn.Value) for k, v := range src.MustMap() { // Normalize the value according to the map element type. v, err := normalizeType(typ.Elem(), v) @@ -105,20 +105,20 @@ func normalizeMap(typ reflect.Type, src config.Value) (config.Value, diag.Diagno out[k] = v } - return config.NewValue(out, src.Location()), diags - case config.KindNil: + return dyn.NewValue(out, src.Location()), diags + case dyn.KindNil: return src, diags } - return config.NilValue, diags.Append(typeMismatch(config.KindMap, src)) + return dyn.NilValue, diags.Append(typeMismatch(dyn.KindMap, src)) } -func normalizeSlice(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { +func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { - case config.KindSequence: - out := make([]config.Value, 0, len(src.MustSequence())) + case dyn.KindSequence: + out := make([]dyn.Value, 0, len(src.MustSequence())) for _, v := range src.MustSequence() { // Normalize the value according to the slice element type. v, err := normalizeType(typ.Elem(), v) @@ -133,42 +133,42 @@ func normalizeSlice(typ reflect.Type, src config.Value) (config.Value, diag.Diag out = append(out, v) } - return config.NewValue(out, src.Location()), diags - case config.KindNil: + return dyn.NewValue(out, src.Location()), diags + case dyn.KindNil: return src, diags } - return config.NilValue, diags.Append(typeMismatch(config.KindSequence, src)) + return dyn.NilValue, diags.Append(typeMismatch(dyn.KindSequence, src)) } -func normalizeString(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { +func normalizeString(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out string switch src.Kind() { - case config.KindString: + case dyn.KindString: out = src.MustString() - case config.KindBool: + case dyn.KindBool: out = strconv.FormatBool(src.MustBool()) - case config.KindInt: + case dyn.KindInt: out = strconv.FormatInt(src.MustInt(), 10) - case config.KindFloat: + case dyn.KindFloat: out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64) default: - return config.NilValue, diags.Append(typeMismatch(config.KindString, src)) + return dyn.NilValue, diags.Append(typeMismatch(dyn.KindString, src)) } - return config.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Location()), diags } -func normalizeBool(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { +func normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out bool switch src.Kind() { - case config.KindBool: + case dyn.KindBool: out = src.MustBool() - case config.KindString: + case dyn.KindString: // See https://github.com/go-yaml/yaml/blob/f6f7691b1fdeb513f56608cd2c32c51f8194bf51/decode.go#L684-L693. switch src.MustString() { case "true", "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": @@ -177,59 +177,59 @@ func normalizeBool(typ reflect.Type, src config.Value) (config.Value, diag.Diagn out = false default: // Cannot interpret as a boolean. - return config.NilValue, diags.Append(typeMismatch(config.KindBool, src)) + return dyn.NilValue, diags.Append(typeMismatch(dyn.KindBool, src)) } default: - return config.NilValue, diags.Append(typeMismatch(config.KindBool, src)) + return dyn.NilValue, diags.Append(typeMismatch(dyn.KindBool, src)) } - return config.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Location()), diags } -func normalizeInt(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { +func normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out int64 switch src.Kind() { - case config.KindInt: + case dyn.KindInt: out = src.MustInt() - case config.KindString: + case dyn.KindString: var err error out, err = strconv.ParseInt(src.MustString(), 10, 64) if err != nil { - return config.NilValue, diags.Append(diag.Diagnostic{ + return dyn.NilValue, diags.Append(diag.Diagnostic{ Severity: diag.Error, Summary: fmt.Sprintf("cannot parse %q as an integer", src.MustString()), Location: src.Location(), }) } default: - return config.NilValue, diags.Append(typeMismatch(config.KindInt, src)) + return dyn.NilValue, diags.Append(typeMismatch(dyn.KindInt, src)) } - return config.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Location()), diags } -func normalizeFloat(typ reflect.Type, src config.Value) (config.Value, diag.Diagnostics) { +func normalizeFloat(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out float64 switch src.Kind() { - case config.KindFloat: + case dyn.KindFloat: out = src.MustFloat() - case config.KindString: + case dyn.KindString: var err error out, err = strconv.ParseFloat(src.MustString(), 64) if err != nil { - return config.NilValue, diags.Append(diag.Diagnostic{ + return dyn.NilValue, diags.Append(diag.Diagnostic{ Severity: diag.Error, Summary: fmt.Sprintf("cannot parse %q as a floating point number", src.MustString()), Location: src.Location(), }) } default: - return config.NilValue, diags.Append(typeMismatch(config.KindFloat, src)) + return dyn.NilValue, diags.Append(typeMismatch(dyn.KindFloat, src)) } - return config.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Location()), diags } diff --git a/libs/config/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go similarity index 74% rename from libs/config/convert/normalize_test.go rename to libs/dyn/convert/normalize_test.go index 9c4b10bb..13b1ed52 100644 --- a/libs/config/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -3,8 +3,8 @@ package convert import ( "testing" - "github.com/databricks/cli/libs/config" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" ) @@ -15,9 +15,9 @@ func TestNormalizeStruct(t *testing.T) { } var typ Tmp - vin := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) vout, err := Normalize(typ, vin) @@ -32,9 +32,9 @@ func TestNormalizeStructElementDiagnostic(t *testing.T) { } var typ Tmp - vin := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V(map[string]config.Value{"an": config.V("error")}), + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V(map[string]dyn.Value{"an": dyn.V("error")}), }) vout, err := Normalize(typ, vin) @@ -42,7 +42,7 @@ func TestNormalizeStructElementDiagnostic(t *testing.T) { assert.Equal(t, diag.Diagnostic{ Severity: diag.Error, Summary: `expected string, found map`, - Location: config.Location{}, + Location: dyn.Location{}, }, err[0]) // Elements that encounter an error during normalization are dropped. @@ -57,9 +57,9 @@ func TestNormalizeStructUnknownField(t *testing.T) { } var typ Tmp - vin := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) vout, err := Normalize(typ, vin) @@ -82,7 +82,7 @@ func TestNormalizeStructNil(t *testing.T) { } var typ Tmp - vin := config.NilValue + vin := dyn.NilValue vout, err := Normalize(typ, vin) assert.Empty(t, err) assert.Equal(t, vin, vout) @@ -94,7 +94,7 @@ func TestNormalizeStructError(t *testing.T) { } var typ Tmp - vin := config.V("string") + vin := dyn.V("string") _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -106,9 +106,9 @@ func TestNormalizeStructError(t *testing.T) { func TestNormalizeMap(t *testing.T) { var typ map[string]string - vin := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) vout, err := Normalize(typ, vin) @@ -118,9 +118,9 @@ func TestNormalizeMap(t *testing.T) { func TestNormalizeMapElementDiagnostic(t *testing.T) { var typ map[string]string - vin := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V(map[string]config.Value{"an": config.V("error")}), + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V(map[string]dyn.Value{"an": dyn.V("error")}), }) vout, err := Normalize(typ, vin) @@ -128,7 +128,7 @@ func TestNormalizeMapElementDiagnostic(t *testing.T) { assert.Equal(t, diag.Diagnostic{ Severity: diag.Error, Summary: `expected string, found map`, - Location: config.Location{}, + Location: dyn.Location{}, }, err[0]) // Elements that encounter an error during normalization are dropped. @@ -139,7 +139,7 @@ func TestNormalizeMapElementDiagnostic(t *testing.T) { func TestNormalizeMapNil(t *testing.T) { var typ map[string]string - vin := config.NilValue + vin := dyn.NilValue vout, err := Normalize(typ, vin) assert.Empty(t, err) assert.Equal(t, vin, vout) @@ -147,7 +147,7 @@ func TestNormalizeMapNil(t *testing.T) { func TestNormalizeMapError(t *testing.T) { var typ map[string]string - vin := config.V("string") + vin := dyn.V("string") _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -159,9 +159,9 @@ func TestNormalizeMapError(t *testing.T) { func TestNormalizeSlice(t *testing.T) { var typ []string - vin := config.V([]config.Value{ - config.V("foo"), - config.V("bar"), + vin := dyn.V([]dyn.Value{ + dyn.V("foo"), + dyn.V("bar"), }) vout, err := Normalize(typ, vin) @@ -171,10 +171,10 @@ func TestNormalizeSlice(t *testing.T) { func TestNormalizeSliceElementDiagnostic(t *testing.T) { var typ []string - vin := config.V([]config.Value{ - config.V("foo"), - config.V("bar"), - config.V(map[string]config.Value{"an": config.V("error")}), + vin := dyn.V([]dyn.Value{ + dyn.V("foo"), + dyn.V("bar"), + dyn.V(map[string]dyn.Value{"an": dyn.V("error")}), }) vout, err := Normalize(typ, vin) @@ -182,7 +182,7 @@ func TestNormalizeSliceElementDiagnostic(t *testing.T) { assert.Equal(t, diag.Diagnostic{ Severity: diag.Error, Summary: `expected string, found map`, - Location: config.Location{}, + Location: dyn.Location{}, }, err[0]) // Elements that encounter an error during normalization are dropped. @@ -191,7 +191,7 @@ func TestNormalizeSliceElementDiagnostic(t *testing.T) { func TestNormalizeSliceNil(t *testing.T) { var typ []string - vin := config.NilValue + vin := dyn.NilValue vout, err := Normalize(typ, vin) assert.Empty(t, err) assert.Equal(t, vin, vout) @@ -199,7 +199,7 @@ func TestNormalizeSliceNil(t *testing.T) { func TestNormalizeSliceError(t *testing.T) { var typ []string - vin := config.V("string") + vin := dyn.V("string") _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -211,7 +211,7 @@ func TestNormalizeSliceError(t *testing.T) { func TestNormalizeString(t *testing.T) { var typ string - vin := config.V("string") + vin := dyn.V("string") vout, err := Normalize(&typ, vin) assert.Empty(t, err) assert.Equal(t, vin, vout) @@ -219,7 +219,7 @@ func TestNormalizeString(t *testing.T) { func TestNormalizeStringNil(t *testing.T) { var typ string - vin := config.NewValue(nil, config.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -231,51 +231,51 @@ func TestNormalizeStringNil(t *testing.T) { func TestNormalizeStringFromBool(t *testing.T) { var typ string - vin := config.NewValue(true, config.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(true, dyn.Location{File: "file", Line: 1, Column: 1}) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, config.NewValue("true", vin.Location()), vout) + assert.Equal(t, dyn.NewValue("true", vin.Location()), vout) } func TestNormalizeStringFromInt(t *testing.T) { var typ string - vin := config.NewValue(123, config.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(123, dyn.Location{File: "file", Line: 1, Column: 1}) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, config.NewValue("123", vin.Location()), vout) + assert.Equal(t, dyn.NewValue("123", vin.Location()), vout) } func TestNormalizeStringFromFloat(t *testing.T) { var typ string - vin := config.NewValue(1.20, config.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(1.20, dyn.Location{File: "file", Line: 1, Column: 1}) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, config.NewValue("1.2", vin.Location()), vout) + assert.Equal(t, dyn.NewValue("1.2", vin.Location()), vout) } func TestNormalizeStringError(t *testing.T) { var typ string - vin := config.V(map[string]config.Value{"an": config.V("error")}) + vin := dyn.V(map[string]dyn.Value{"an": dyn.V("error")}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Error, Summary: `expected string, found map`, - Location: config.Location{}, + Location: dyn.Location{}, }, err[0]) } func TestNormalizeBool(t *testing.T) { var typ bool - vin := config.V(true) + vin := dyn.V(true) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, config.V(true), vout) + assert.Equal(t, dyn.V(true), vout) } func TestNormalizeBoolNil(t *testing.T) { var typ bool - vin := config.NewValue(nil, config.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -299,16 +299,16 @@ func TestNormalizeBoolFromString(t *testing.T) { {"on", true}, {"off", false}, } { - vin := config.V(c.Input) + vin := dyn.V(c.Input) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, config.V(c.Output), vout) + assert.Equal(t, dyn.V(c.Output), vout) } } func TestNormalizeBoolFromStringError(t *testing.T) { var typ bool - vin := config.V("abc") + vin := dyn.V("abc") _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -320,27 +320,27 @@ func TestNormalizeBoolFromStringError(t *testing.T) { func TestNormalizeBoolError(t *testing.T) { var typ bool - vin := config.V(map[string]config.Value{"an": config.V("error")}) + vin := dyn.V(map[string]dyn.Value{"an": dyn.V("error")}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Error, Summary: `expected bool, found map`, - Location: config.Location{}, + Location: dyn.Location{}, }, err[0]) } func TestNormalizeInt(t *testing.T) { var typ int - vin := config.V(123) + vin := dyn.V(123) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, config.V(int64(123)), vout) + assert.Equal(t, dyn.V(int64(123)), vout) } func TestNormalizeIntNil(t *testing.T) { var typ int - vin := config.NewValue(nil, config.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -352,15 +352,15 @@ func TestNormalizeIntNil(t *testing.T) { func TestNormalizeIntFromString(t *testing.T) { var typ int - vin := config.V("123") + vin := dyn.V("123") vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, config.V(int64(123)), vout) + assert.Equal(t, dyn.V(int64(123)), vout) } func TestNormalizeIntFromStringError(t *testing.T) { var typ int - vin := config.V("abc") + vin := dyn.V("abc") _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -372,27 +372,27 @@ func TestNormalizeIntFromStringError(t *testing.T) { func TestNormalizeIntError(t *testing.T) { var typ int - vin := config.V(map[string]config.Value{"an": config.V("error")}) + vin := dyn.V(map[string]dyn.Value{"an": dyn.V("error")}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Error, Summary: `expected int, found map`, - Location: config.Location{}, + Location: dyn.Location{}, }, err[0]) } func TestNormalizeFloat(t *testing.T) { var typ float64 - vin := config.V(1.2) + vin := dyn.V(1.2) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, config.V(1.2), vout) + assert.Equal(t, dyn.V(1.2), vout) } func TestNormalizeFloatNil(t *testing.T) { var typ float64 - vin := config.NewValue(nil, config.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -404,15 +404,15 @@ func TestNormalizeFloatNil(t *testing.T) { func TestNormalizeFloatFromString(t *testing.T) { var typ float64 - vin := config.V("1.2") + vin := dyn.V("1.2") vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, config.V(1.2), vout) + assert.Equal(t, dyn.V(1.2), vout) } func TestNormalizeFloatFromStringError(t *testing.T) { var typ float64 - vin := config.V("abc") + vin := dyn.V("abc") _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -424,12 +424,12 @@ func TestNormalizeFloatFromStringError(t *testing.T) { func TestNormalizeFloatError(t *testing.T) { var typ float64 - vin := config.V(map[string]config.Value{"an": config.V("error")}) + vin := dyn.V(map[string]dyn.Value{"an": dyn.V("error")}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Error, Summary: `expected float, found map`, - Location: config.Location{}, + Location: dyn.Location{}, }, err[0]) } diff --git a/libs/config/convert/struct_info.go b/libs/dyn/convert/struct_info.go similarity index 87% rename from libs/config/convert/struct_info.go rename to libs/dyn/convert/struct_info.go index 80cfabb6..dc3ed4da 100644 --- a/libs/config/convert/struct_info.go +++ b/libs/dyn/convert/struct_info.go @@ -5,16 +5,16 @@ import ( "strings" "sync" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" ) // structInfo holds the type information we need to efficiently -// convert data from a [config.Value] to a Go struct. +// convert data from a [dyn.Value] to a Go struct. type structInfo struct { // Fields maps the JSON-name of the field to the field's index for use with [FieldByIndex]. Fields map[string][]int - // ValueField maps to the field with a [config.Value]. + // ValueField maps to the field with a [dyn.Value]. // The underlying type is expected to only have one of these. ValueField []int } @@ -74,10 +74,10 @@ func buildStructInfo(typ reflect.Type) structInfo { continue } - // If this field has type [config.Value], we populate it with the source [config.Value] from [ToTyped]. + // If this field has type [dyn.Value], we populate it with the source [dyn.Value] from [ToTyped]. if sf.IsExported() && sf.Type == configValueType { if out.ValueField != nil { - panic("multiple config.Value fields") + panic("multiple dyn.Value fields") } out.ValueField = append(prefix, sf.Index...) continue @@ -129,5 +129,5 @@ func (s *structInfo) FieldValues(v reflect.Value) map[string]reflect.Value { return out } -// Type of [config.Value]. -var configValueType = reflect.TypeOf((*config.Value)(nil)).Elem() +// Type of [dyn.Value]. +var configValueType = reflect.TypeOf((*dyn.Value)(nil)).Elem() diff --git a/libs/config/convert/struct_info_test.go b/libs/dyn/convert/struct_info_test.go similarity index 97% rename from libs/config/convert/struct_info_test.go rename to libs/dyn/convert/struct_info_test.go index 685679ae..08be3c47 100644 --- a/libs/config/convert/struct_info_test.go +++ b/libs/dyn/convert/struct_info_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" ) @@ -207,7 +207,7 @@ func TestStructInfoValueFieldAbsent(t *testing.T) { func TestStructInfoValueFieldPresent(t *testing.T) { type Tmp struct { - Foo config.Value + Foo dyn.Value } si := getStructInfo(reflect.TypeOf(Tmp{})) @@ -216,8 +216,8 @@ func TestStructInfoValueFieldPresent(t *testing.T) { func TestStructInfoValueFieldMultiple(t *testing.T) { type Tmp struct { - Foo config.Value - Bar config.Value + Foo dyn.Value + Bar dyn.Value } assert.Panics(t, func() { diff --git a/libs/config/convert/to_typed.go b/libs/dyn/convert/to_typed.go similarity index 81% rename from libs/config/convert/to_typed.go rename to libs/dyn/convert/to_typed.go index 8c43d974..209de12c 100644 --- a/libs/config/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -5,17 +5,17 @@ import ( "reflect" "strconv" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" ) -func ToTyped(dst any, src config.Value) error { +func ToTyped(dst any, src dyn.Value) error { dstv := reflect.ValueOf(dst) // Dereference pointer if necessary for dstv.Kind() == reflect.Pointer { // If the source value is nil and the destination is a settable pointer, // set the destination to nil. Also see `end_to_end_test.go`. - if dstv.CanSet() && src == config.NilValue { + if dstv.CanSet() && src == dyn.NilValue { dstv.SetZero() return nil } @@ -50,9 +50,9 @@ func ToTyped(dst any, src config.Value) error { return fmt.Errorf("unsupported type: %s", dstv.Kind()) } -func toTypedStruct(dst reflect.Value, src config.Value) error { +func toTypedStruct(dst reflect.Value, src dyn.Value) error { switch src.Kind() { - case config.KindMap: + case dyn.KindMap: info := getStructInfo(dst.Type()) for k, v := range src.MustMap() { index, ok := info.Fields[k] @@ -83,14 +83,14 @@ func toTypedStruct(dst reflect.Value, src config.Value) error { } } - // Populate field(s) for [config.Value], if any. + // Populate field(s) for [dyn.Value], if any. if info.ValueField != nil { vv := dst.FieldByIndex(info.ValueField) vv.Set(reflect.ValueOf(src)) } return nil - case config.KindNil: + case dyn.KindNil: dst.SetZero() return nil } @@ -101,9 +101,9 @@ func toTypedStruct(dst reflect.Value, src config.Value) error { } } -func toTypedMap(dst reflect.Value, src config.Value) error { +func toTypedMap(dst reflect.Value, src dyn.Value) error { switch src.Kind() { - case config.KindMap: + case dyn.KindMap: m := src.MustMap() // Always overwrite. @@ -118,7 +118,7 @@ func toTypedMap(dst reflect.Value, src config.Value) error { dst.SetMapIndex(kv, vv.Elem()) } return nil - case config.KindNil: + case dyn.KindNil: dst.SetZero() return nil } @@ -129,9 +129,9 @@ func toTypedMap(dst reflect.Value, src config.Value) error { } } -func toTypedSlice(dst reflect.Value, src config.Value) error { +func toTypedSlice(dst reflect.Value, src dyn.Value) error { switch src.Kind() { - case config.KindSequence: + case dyn.KindSequence: seq := src.MustSequence() // Always overwrite. @@ -143,7 +143,7 @@ func toTypedSlice(dst reflect.Value, src config.Value) error { } } return nil - case config.KindNil: + case dyn.KindNil: dst.SetZero() return nil } @@ -154,18 +154,18 @@ func toTypedSlice(dst reflect.Value, src config.Value) error { } } -func toTypedString(dst reflect.Value, src config.Value) error { +func toTypedString(dst reflect.Value, src dyn.Value) error { switch src.Kind() { - case config.KindString: + case dyn.KindString: dst.SetString(src.MustString()) return nil - case config.KindBool: + case dyn.KindBool: dst.SetString(strconv.FormatBool(src.MustBool())) return nil - case config.KindInt: + case dyn.KindInt: dst.SetString(strconv.FormatInt(src.MustInt(), 10)) return nil - case config.KindFloat: + case dyn.KindFloat: dst.SetString(strconv.FormatFloat(src.MustFloat(), 'f', -1, 64)) return nil } @@ -176,12 +176,12 @@ func toTypedString(dst reflect.Value, src config.Value) error { } } -func toTypedBool(dst reflect.Value, src config.Value) error { +func toTypedBool(dst reflect.Value, src dyn.Value) error { switch src.Kind() { - case config.KindBool: + case dyn.KindBool: dst.SetBool(src.MustBool()) return nil - case config.KindString: + case dyn.KindString: // See https://github.com/go-yaml/yaml/blob/f6f7691b1fdeb513f56608cd2c32c51f8194bf51/decode.go#L684-L693. switch src.MustString() { case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": @@ -199,12 +199,12 @@ func toTypedBool(dst reflect.Value, src config.Value) error { } } -func toTypedInt(dst reflect.Value, src config.Value) error { +func toTypedInt(dst reflect.Value, src dyn.Value) error { switch src.Kind() { - case config.KindInt: + case dyn.KindInt: dst.SetInt(src.MustInt()) return nil - case config.KindString: + case dyn.KindString: if i64, err := strconv.ParseInt(src.MustString(), 10, 64); err == nil { dst.SetInt(i64) return nil @@ -217,12 +217,12 @@ func toTypedInt(dst reflect.Value, src config.Value) error { } } -func toTypedFloat(dst reflect.Value, src config.Value) error { +func toTypedFloat(dst reflect.Value, src dyn.Value) error { switch src.Kind() { - case config.KindFloat: + case dyn.KindFloat: dst.SetFloat(src.MustFloat()) return nil - case config.KindString: + case dyn.KindString: if f64, err := strconv.ParseFloat(src.MustString(), 64); err == nil { dst.SetFloat(f64) return nil diff --git a/libs/config/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go similarity index 76% rename from libs/config/convert/to_typed_test.go rename to libs/dyn/convert/to_typed_test.go index 2845bdda..3adc94c7 100644 --- a/libs/config/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -3,7 +3,7 @@ package convert import ( "testing" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -21,9 +21,9 @@ func TestToTypedStruct(t *testing.T) { } var out Tmp - v := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) err := ToTyped(&out, v) @@ -48,9 +48,9 @@ func TestToTypedStructOverwrite(t *testing.T) { Foo: "baz", Bar: "qux", } - v := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) err := ToTyped(&out, v) @@ -74,9 +74,9 @@ func TestToTypedStructAnonymousByValue(t *testing.T) { } var out Tmp - v := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) err := ToTyped(&out, v) @@ -100,9 +100,9 @@ func TestToTypedStructAnonymousByPointer(t *testing.T) { } var out Tmp - v := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) err := ToTyped(&out, v) @@ -117,7 +117,7 @@ func TestToTypedStructNil(t *testing.T) { } var out = Tmp{} - err := ToTyped(&out, config.NilValue) + err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Equal(t, Tmp{}, out) } @@ -128,7 +128,7 @@ func TestToTypedStructNilOverwrite(t *testing.T) { } var out = Tmp{"bar"} - err := ToTyped(&out, config.NilValue) + err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Equal(t, Tmp{}, out) } @@ -137,12 +137,12 @@ func TestToTypedStructWithValueField(t *testing.T) { type Tmp struct { Foo string `json:"foo"` - ConfigValue config.Value + ConfigValue dyn.Value } var out Tmp - v := config.V(map[string]config.Value{ - "foo": config.V("bar"), + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), }) err := ToTyped(&out, v) @@ -154,8 +154,8 @@ func TestToTypedStructWithValueField(t *testing.T) { func TestToTypedMap(t *testing.T) { var out = map[string]string{} - v := config.V(map[string]config.Value{ - "key": config.V("value"), + v := dyn.V(map[string]dyn.Value{ + "key": dyn.V("value"), }) err := ToTyped(&out, v) @@ -169,8 +169,8 @@ func TestToTypedMapOverwrite(t *testing.T) { "foo": "bar", } - v := config.V(map[string]config.Value{ - "bar": config.V("qux"), + v := dyn.V(map[string]dyn.Value{ + "bar": dyn.V("qux"), }) err := ToTyped(&out, v) @@ -182,8 +182,8 @@ func TestToTypedMapOverwrite(t *testing.T) { func TestToTypedMapWithPointerElement(t *testing.T) { var out map[string]*string - v := config.V(map[string]config.Value{ - "key": config.V("value"), + v := dyn.V(map[string]dyn.Value{ + "key": dyn.V("value"), }) err := ToTyped(&out, v) @@ -194,7 +194,7 @@ func TestToTypedMapWithPointerElement(t *testing.T) { func TestToTypedMapNil(t *testing.T) { var out = map[string]string{} - err := ToTyped(&out, config.NilValue) + err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) } @@ -203,7 +203,7 @@ func TestToTypedMapNilOverwrite(t *testing.T) { var out = map[string]string{ "foo": "bar", } - err := ToTyped(&out, config.NilValue) + err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) } @@ -211,9 +211,9 @@ func TestToTypedMapNilOverwrite(t *testing.T) { func TestToTypedSlice(t *testing.T) { var out []string - v := config.V([]config.Value{ - config.V("foo"), - config.V("bar"), + v := dyn.V([]dyn.Value{ + dyn.V("foo"), + dyn.V("bar"), }) err := ToTyped(&out, v) @@ -226,9 +226,9 @@ func TestToTypedSlice(t *testing.T) { func TestToTypedSliceOverwrite(t *testing.T) { var out = []string{"qux"} - v := config.V([]config.Value{ - config.V("foo"), - config.V("bar"), + v := dyn.V([]dyn.Value{ + dyn.V("foo"), + dyn.V("bar"), }) err := ToTyped(&out, v) @@ -241,9 +241,9 @@ func TestToTypedSliceOverwrite(t *testing.T) { func TestToTypedSliceWithPointerElement(t *testing.T) { var out []*string - v := config.V([]config.Value{ - config.V("foo"), - config.V("bar"), + v := dyn.V([]dyn.Value{ + dyn.V("foo"), + dyn.V("bar"), }) err := ToTyped(&out, v) @@ -255,63 +255,63 @@ func TestToTypedSliceWithPointerElement(t *testing.T) { func TestToTypedSliceNil(t *testing.T) { var out []string - err := ToTyped(&out, config.NilValue) + err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) } func TestToTypedSliceNilOverwrite(t *testing.T) { var out = []string{"foo"} - err := ToTyped(&out, config.NilValue) + err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) } func TestToTypedString(t *testing.T) { var out string - err := ToTyped(&out, config.V("foo")) + err := ToTyped(&out, dyn.V("foo")) require.NoError(t, err) assert.Equal(t, "foo", out) } func TestToTypedStringOverwrite(t *testing.T) { var out string = "bar" - err := ToTyped(&out, config.V("foo")) + err := ToTyped(&out, dyn.V("foo")) require.NoError(t, err) assert.Equal(t, "foo", out) } func TestToTypedStringFromBool(t *testing.T) { var out string - err := ToTyped(&out, config.V(true)) + err := ToTyped(&out, dyn.V(true)) require.NoError(t, err) assert.Equal(t, "true", out) } func TestToTypedStringFromInt(t *testing.T) { var out string - err := ToTyped(&out, config.V(123)) + err := ToTyped(&out, dyn.V(123)) require.NoError(t, err) assert.Equal(t, "123", out) } func TestToTypedStringFromFloat(t *testing.T) { var out string - err := ToTyped(&out, config.V(1.2)) + err := ToTyped(&out, dyn.V(1.2)) require.NoError(t, err) assert.Equal(t, "1.2", out) } func TestToTypedBool(t *testing.T) { var out bool - err := ToTyped(&out, config.V(true)) + err := ToTyped(&out, dyn.V(true)) require.NoError(t, err) assert.Equal(t, true, out) } func TestToTypedBoolOverwrite(t *testing.T) { var out bool = true - err := ToTyped(&out, config.V(false)) + err := ToTyped(&out, dyn.V(false)) require.NoError(t, err) assert.Equal(t, false, out) } @@ -321,128 +321,128 @@ func TestToTypedBoolFromString(t *testing.T) { // True-ish for _, v := range []string{"y", "yes", "on"} { - err := ToTyped(&out, config.V(v)) + err := ToTyped(&out, dyn.V(v)) require.NoError(t, err) assert.Equal(t, true, out) } // False-ish for _, v := range []string{"n", "no", "off"} { - err := ToTyped(&out, config.V(v)) + err := ToTyped(&out, dyn.V(v)) require.NoError(t, err) assert.Equal(t, false, out) } // Other - err := ToTyped(&out, config.V("${var.foo}")) + err := ToTyped(&out, dyn.V("${var.foo}")) require.Error(t, err) } func TestToTypedInt(t *testing.T) { var out int - err := ToTyped(&out, config.V(1234)) + err := ToTyped(&out, dyn.V(1234)) require.NoError(t, err) assert.Equal(t, int(1234), out) } func TestToTypedInt32(t *testing.T) { var out32 int32 - err := ToTyped(&out32, config.V(1235)) + err := ToTyped(&out32, dyn.V(1235)) require.NoError(t, err) assert.Equal(t, int32(1235), out32) } func TestToTypedInt64(t *testing.T) { var out64 int64 - err := ToTyped(&out64, config.V(1236)) + err := ToTyped(&out64, dyn.V(1236)) require.NoError(t, err) assert.Equal(t, int64(1236), out64) } func TestToTypedIntOverwrite(t *testing.T) { var out int = 123 - err := ToTyped(&out, config.V(1234)) + err := ToTyped(&out, dyn.V(1234)) require.NoError(t, err) assert.Equal(t, int(1234), out) } func TestToTypedInt32Overwrite(t *testing.T) { var out32 int32 = 123 - err := ToTyped(&out32, config.V(1234)) + err := ToTyped(&out32, dyn.V(1234)) require.NoError(t, err) assert.Equal(t, int32(1234), out32) } func TestToTypedInt64Overwrite(t *testing.T) { var out64 int64 = 123 - err := ToTyped(&out64, config.V(1234)) + err := ToTyped(&out64, dyn.V(1234)) require.NoError(t, err) assert.Equal(t, int64(1234), out64) } func TestToTypedIntFromStringError(t *testing.T) { var out int - err := ToTyped(&out, config.V("abc")) + err := ToTyped(&out, dyn.V("abc")) require.Error(t, err) } func TestToTypedIntFromStringInt(t *testing.T) { var out int - err := ToTyped(&out, config.V("123")) + err := ToTyped(&out, dyn.V("123")) require.NoError(t, err) assert.Equal(t, int(123), out) } func TestToTypedFloat32(t *testing.T) { var out float32 - err := ToTyped(&out, config.V(float32(1.0))) + err := ToTyped(&out, dyn.V(float32(1.0))) require.NoError(t, err) assert.Equal(t, float32(1.0), out) } func TestToTypedFloat64(t *testing.T) { var out float64 - err := ToTyped(&out, config.V(float64(1.0))) + err := ToTyped(&out, dyn.V(float64(1.0))) require.NoError(t, err) assert.Equal(t, float64(1.0), out) } func TestToTypedFloat32Overwrite(t *testing.T) { var out float32 = 1.0 - err := ToTyped(&out, config.V(float32(2.0))) + err := ToTyped(&out, dyn.V(float32(2.0))) require.NoError(t, err) assert.Equal(t, float32(2.0), out) } func TestToTypedFloat64Overwrite(t *testing.T) { var out float64 = 1.0 - err := ToTyped(&out, config.V(float64(2.0))) + err := ToTyped(&out, dyn.V(float64(2.0))) require.NoError(t, err) assert.Equal(t, float64(2.0), out) } func TestToTypedFloat32FromStringError(t *testing.T) { var out float32 - err := ToTyped(&out, config.V("abc")) + err := ToTyped(&out, dyn.V("abc")) require.Error(t, err) } func TestToTypedFloat64FromStringError(t *testing.T) { var out float64 - err := ToTyped(&out, config.V("abc")) + err := ToTyped(&out, dyn.V("abc")) require.Error(t, err) } func TestToTypedFloat32FromString(t *testing.T) { var out float32 - err := ToTyped(&out, config.V("1.2")) + err := ToTyped(&out, dyn.V("1.2")) require.NoError(t, err) assert.Equal(t, float32(1.2), out) } func TestToTypedFloat64FromString(t *testing.T) { var out float64 - err := ToTyped(&out, config.V("1.2")) + err := ToTyped(&out, dyn.V("1.2")) require.NoError(t, err) assert.Equal(t, float64(1.2), out) } diff --git a/libs/config/kind.go b/libs/dyn/kind.go similarity index 98% rename from libs/config/kind.go rename to libs/dyn/kind.go index 5ed1a665..ba093341 100644 --- a/libs/config/kind.go +++ b/libs/dyn/kind.go @@ -1,4 +1,4 @@ -package config +package dyn import "time" diff --git a/libs/config/location.go b/libs/dyn/location.go similarity index 92% rename from libs/config/location.go rename to libs/dyn/location.go index 534b21c2..cd369193 100644 --- a/libs/config/location.go +++ b/libs/dyn/location.go @@ -1,4 +1,4 @@ -package config +package dyn import "fmt" diff --git a/libs/config/location_test.go b/libs/dyn/location_test.go similarity index 54% rename from libs/config/location_test.go rename to libs/dyn/location_test.go index 31013193..29226d73 100644 --- a/libs/config/location_test.go +++ b/libs/dyn/location_test.go @@ -1,13 +1,13 @@ -package config_test +package dyn_test import ( "testing" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" ) func TestLocation(t *testing.T) { - loc := config.Location{File: "file", Line: 1, Column: 2} + loc := dyn.Location{File: "file", Line: 1, Column: 2} assert.Equal(t, "file:1:2", loc.String()) } diff --git a/libs/config/merge/merge.go b/libs/dyn/merge/merge.go similarity index 60% rename from libs/config/merge/merge.go rename to libs/dyn/merge/merge.go index 896e2129..1cadbea6 100644 --- a/libs/config/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -3,7 +3,7 @@ package merge import ( "fmt" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" ) // Merge recursively merges the specified values. @@ -12,46 +12,46 @@ import ( // * Merging x with nil or nil with x always yields x. // * Merging maps a and b means entries from map b take precedence. // * Merging sequences a and b means concatenating them. -func Merge(a, b config.Value) (config.Value, error) { +func Merge(a, b dyn.Value) (dyn.Value, error) { return merge(a, b) } -func merge(a, b config.Value) (config.Value, error) { +func merge(a, b dyn.Value) (dyn.Value, error) { ak := a.Kind() bk := b.Kind() // If a is nil, return b. - if ak == config.KindNil { + if ak == dyn.KindNil { return b, nil } // If b is nil, return a. - if bk == config.KindNil { + if bk == dyn.KindNil { return a, nil } // Call the appropriate merge function based on the kind of a and b. switch ak { - case config.KindMap: - if bk != config.KindMap { - return config.NilValue, fmt.Errorf("cannot merge map with %s", bk) + case dyn.KindMap: + if bk != dyn.KindMap { + return dyn.NilValue, fmt.Errorf("cannot merge map with %s", bk) } return mergeMap(a, b) - case config.KindSequence: - if bk != config.KindSequence { - return config.NilValue, fmt.Errorf("cannot merge sequence with %s", bk) + case dyn.KindSequence: + if bk != dyn.KindSequence { + return dyn.NilValue, fmt.Errorf("cannot merge sequence with %s", bk) } return mergeSequence(a, b) default: if ak != bk { - return config.NilValue, fmt.Errorf("cannot merge %s with %s", ak, bk) + return dyn.NilValue, fmt.Errorf("cannot merge %s with %s", ak, bk) } return mergePrimitive(a, b) } } -func mergeMap(a, b config.Value) (config.Value, error) { - out := make(map[string]config.Value) +func mergeMap(a, b dyn.Value) (dyn.Value, error) { + out := make(map[string]dyn.Value) am := a.MustMap() bm := b.MustMap() @@ -66,7 +66,7 @@ func mergeMap(a, b config.Value) (config.Value, error) { // If the key already exists, merge the values. merged, err := merge(out[k], v) if err != nil { - return config.NilValue, err + return dyn.NilValue, err } out[k] = merged } else { @@ -76,23 +76,23 @@ func mergeMap(a, b config.Value) (config.Value, error) { } // Preserve the location of the first value. - return config.NewValue(out, a.Location()), nil + return dyn.NewValue(out, a.Location()), nil } -func mergeSequence(a, b config.Value) (config.Value, error) { +func mergeSequence(a, b dyn.Value) (dyn.Value, error) { as := a.MustSequence() bs := b.MustSequence() // Merging sequences means concatenating them. - out := make([]config.Value, len(as)+len(bs)) + out := make([]dyn.Value, len(as)+len(bs)) copy(out[:], as) copy(out[len(as):], bs) // Preserve the location of the first value. - return config.NewValue(out, a.Location()), nil + return dyn.NewValue(out, a.Location()), nil } -func mergePrimitive(a, b config.Value) (config.Value, error) { +func mergePrimitive(a, b dyn.Value) (dyn.Value, error) { // Merging primitive values means using the incoming value. return b, nil } diff --git a/libs/config/merge/merge_test.go b/libs/dyn/merge/merge_test.go similarity index 67% rename from libs/config/merge/merge_test.go rename to libs/dyn/merge/merge_test.go index c2e89f60..c4928e35 100644 --- a/libs/config/merge/merge_test.go +++ b/libs/dyn/merge/merge_test.go @@ -3,19 +3,19 @@ package merge import ( "testing" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" ) func TestMergeMaps(t *testing.T) { - v1 := config.V(map[string]config.Value{ - "foo": config.V("bar"), - "bar": config.V("baz"), + v1 := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), }) - v2 := config.V(map[string]config.Value{ - "bar": config.V("qux"), - "qux": config.V("foo"), + v2 := dyn.V(map[string]dyn.Value{ + "bar": dyn.V("qux"), + "qux": dyn.V("foo"), }) // Merge v2 into v1. @@ -42,13 +42,13 @@ func TestMergeMaps(t *testing.T) { } func TestMergeMapsNil(t *testing.T) { - v := config.V(map[string]config.Value{ - "foo": config.V("bar"), + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), }) // Merge nil into v. { - out, err := Merge(v, config.NilValue) + out, err := Merge(v, dyn.NilValue) assert.NoError(t, err) assert.Equal(t, map[string]any{ "foo": "bar", @@ -57,7 +57,7 @@ func TestMergeMapsNil(t *testing.T) { // Merge v into nil. { - out, err := Merge(config.NilValue, v) + out, err := Merge(dyn.NilValue, v) assert.NoError(t, err) assert.Equal(t, map[string]any{ "foo": "bar", @@ -66,29 +66,29 @@ func TestMergeMapsNil(t *testing.T) { } func TestMergeMapsError(t *testing.T) { - v := config.V(map[string]config.Value{ - "foo": config.V("bar"), + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), }) - other := config.V("string") + other := dyn.V("string") // Merge a string into v. { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge map with string") - assert.Equal(t, config.NilValue, out) + assert.Equal(t, dyn.NilValue, out) } } func TestMergeSequences(t *testing.T) { - v1 := config.V([]config.Value{ - config.V("bar"), - config.V("baz"), + v1 := dyn.V([]dyn.Value{ + dyn.V("bar"), + dyn.V("baz"), }) - v2 := config.V([]config.Value{ - config.V("qux"), - config.V("foo"), + v2 := dyn.V([]dyn.Value{ + dyn.V("qux"), + dyn.V("foo"), }) // Merge v2 into v1. @@ -117,13 +117,13 @@ func TestMergeSequences(t *testing.T) { } func TestMergeSequencesNil(t *testing.T) { - v := config.V([]config.Value{ - config.V("bar"), + v := dyn.V([]dyn.Value{ + dyn.V("bar"), }) // Merge nil into v. { - out, err := Merge(v, config.NilValue) + out, err := Merge(v, dyn.NilValue) assert.NoError(t, err) assert.Equal(t, []any{ "bar", @@ -132,7 +132,7 @@ func TestMergeSequencesNil(t *testing.T) { // Merge v into nil. { - out, err := Merge(config.NilValue, v) + out, err := Merge(dyn.NilValue, v) assert.NoError(t, err) assert.Equal(t, []any{ "bar", @@ -141,23 +141,23 @@ func TestMergeSequencesNil(t *testing.T) { } func TestMergeSequencesError(t *testing.T) { - v := config.V([]config.Value{ - config.V("bar"), + v := dyn.V([]dyn.Value{ + dyn.V("bar"), }) - other := config.V("string") + other := dyn.V("string") // Merge a string into v. { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge sequence with string") - assert.Equal(t, config.NilValue, out) + assert.Equal(t, dyn.NilValue, out) } } func TestMergePrimitives(t *testing.T) { - v1 := config.V("bar") - v2 := config.V("baz") + v1 := dyn.V("bar") + v2 := dyn.V("baz") // Merge v2 into v1. { @@ -175,33 +175,33 @@ func TestMergePrimitives(t *testing.T) { } func TestMergePrimitivesNil(t *testing.T) { - v := config.V("bar") + v := dyn.V("bar") // Merge nil into v. { - out, err := Merge(v, config.NilValue) + out, err := Merge(v, dyn.NilValue) assert.NoError(t, err) assert.Equal(t, "bar", out.AsAny()) } // Merge v into nil. { - out, err := Merge(config.NilValue, v) + out, err := Merge(dyn.NilValue, v) assert.NoError(t, err) assert.Equal(t, "bar", out.AsAny()) } } func TestMergePrimitivesError(t *testing.T) { - v := config.V("bar") - other := config.V(map[string]config.Value{ - "foo": config.V("bar"), + v := dyn.V("bar") + other := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), }) // Merge a map into v. { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge string with map") - assert.Equal(t, config.NilValue, out) + assert.Equal(t, dyn.NilValue, out) } } diff --git a/libs/config/path.go b/libs/dyn/path.go similarity index 99% rename from libs/config/path.go rename to libs/dyn/path.go index f1abf48c..bfd93dad 100644 --- a/libs/config/path.go +++ b/libs/dyn/path.go @@ -1,4 +1,4 @@ -package config +package dyn import ( "bytes" diff --git a/libs/config/path_string.go b/libs/dyn/path_string.go similarity index 99% rename from libs/config/path_string.go rename to libs/dyn/path_string.go index 9538ad27..0fa0c682 100644 --- a/libs/config/path_string.go +++ b/libs/dyn/path_string.go @@ -1,4 +1,4 @@ -package config +package dyn import ( "fmt" diff --git a/libs/config/path_string_test.go b/libs/dyn/path_string_test.go similarity index 96% rename from libs/config/path_string_test.go rename to libs/dyn/path_string_test.go index 89e64561..9af394c6 100644 --- a/libs/config/path_string_test.go +++ b/libs/dyn/path_string_test.go @@ -1,10 +1,10 @@ -package config_test +package dyn_test import ( "fmt" "testing" - . "github.com/databricks/cli/libs/config" + . "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" ) diff --git a/libs/dyn/path_test.go b/libs/dyn/path_test.go new file mode 100644 index 00000000..c4ea26c4 --- /dev/null +++ b/libs/dyn/path_test.go @@ -0,0 +1,76 @@ +package dyn_test + +import ( + "testing" + + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/assert" +) + +func TestPathAppend(t *testing.T) { + p := dyn.NewPath(dyn.Key("foo")) + + // Single arg. + p1 := p.Append(dyn.Key("bar")) + assert.True(t, p1.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar")))) + + // Multiple args. + p2 := p.Append(dyn.Key("bar"), dyn.Index(1)) + assert.True(t, p2.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar"), dyn.Index(1)))) +} + +func TestPathJoin(t *testing.T) { + p := dyn.NewPath(dyn.Key("foo")) + + // Single arg. + p1 := p.Join(dyn.NewPath(dyn.Key("bar"))) + assert.True(t, p1.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar")))) + + // Multiple args. + p2 := p.Join(dyn.NewPath(dyn.Key("bar")), dyn.NewPath(dyn.Index(1))) + assert.True(t, p2.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar"), dyn.Index(1)))) +} + +func TestPathEqualEmpty(t *testing.T) { + assert.True(t, dyn.EmptyPath.Equal(dyn.EmptyPath)) +} + +func TestPathEqual(t *testing.T) { + p1 := dyn.NewPath(dyn.Key("foo"), dyn.Index(1)) + p2 := dyn.NewPath(dyn.Key("bar"), dyn.Index(2)) + assert.False(t, p1.Equal(p2), "expected %q to not equal %q", p1, p2) + + p3 := dyn.NewPath(dyn.Key("foo"), dyn.Index(1)) + assert.True(t, p1.Equal(p3), "expected %q to equal %q", p1, p3) + + p4 := dyn.NewPath(dyn.Key("foo"), dyn.Index(1), dyn.Key("bar"), dyn.Index(2)) + assert.False(t, p1.Equal(p4), "expected %q to not equal %q", p1, p4) +} + +func TestPathHasPrefixEmpty(t *testing.T) { + empty := dyn.EmptyPath + nonEmpty := dyn.NewPath(dyn.Key("foo")) + assert.True(t, empty.HasPrefix(empty)) + assert.True(t, nonEmpty.HasPrefix(empty)) + assert.False(t, empty.HasPrefix(nonEmpty)) +} + +func TestPathHasPrefix(t *testing.T) { + p1 := dyn.NewPath(dyn.Key("foo"), dyn.Index(1)) + p2 := dyn.NewPath(dyn.Key("bar"), dyn.Index(2)) + assert.False(t, p1.HasPrefix(p2), "expected %q to not have prefix %q", p1, p2) + + p3 := dyn.NewPath(dyn.Key("foo")) + assert.True(t, p1.HasPrefix(p3), "expected %q to have prefix %q", p1, p3) +} + +func TestPathString(t *testing.T) { + p1 := dyn.NewPath(dyn.Key("foo"), dyn.Index(1)) + assert.Equal(t, "foo[1]", p1.String()) + + p2 := dyn.NewPath(dyn.Key("bar"), dyn.Index(2), dyn.Key("baz")) + assert.Equal(t, "bar[2].baz", p2.String()) + + p3 := dyn.NewPath(dyn.Key("foo"), dyn.Index(1), dyn.Key("bar"), dyn.Index(2), dyn.Key("baz")) + assert.Equal(t, "foo[1].bar[2].baz", p3.String()) +} diff --git a/libs/config/value.go b/libs/dyn/value.go similarity index 99% rename from libs/config/value.go rename to libs/dyn/value.go index fe0ced9b..9ac738f9 100644 --- a/libs/config/value.go +++ b/libs/dyn/value.go @@ -1,4 +1,4 @@ -package config +package dyn import ( "fmt" diff --git a/libs/config/value_test.go b/libs/dyn/value_test.go similarity index 55% rename from libs/config/value_test.go rename to libs/dyn/value_test.go index 6c8befc7..5fa45f15 100644 --- a/libs/config/value_test.go +++ b/libs/dyn/value_test.go @@ -1,35 +1,35 @@ -package config_test +package dyn_test import ( "testing" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" ) func TestValueIsAnchor(t *testing.T) { - var zero config.Value + var zero dyn.Value assert.False(t, zero.IsAnchor()) mark := zero.MarkAnchor() assert.True(t, mark.IsAnchor()) } func TestValueAsMap(t *testing.T) { - var zeroValue config.Value + var zeroValue dyn.Value m, ok := zeroValue.AsMap() assert.False(t, ok) assert.Nil(t, m) - var intValue = config.NewValue(1, config.Location{}) + var intValue = dyn.NewValue(1, dyn.Location{}) m, ok = intValue.AsMap() assert.False(t, ok) assert.Nil(t, m) - var mapValue = config.NewValue( - map[string]config.Value{ - "key": config.NewValue("value", config.Location{File: "file", Line: 1, Column: 2}), + var mapValue = dyn.NewValue( + map[string]dyn.Value{ + "key": dyn.NewValue("value", dyn.Location{File: "file", Line: 1, Column: 2}), }, - config.Location{File: "file", Line: 1, Column: 2}, + dyn.Location{File: "file", Line: 1, Column: 2}, ) m, ok = mapValue.AsMap() assert.True(t, ok) @@ -37,8 +37,8 @@ func TestValueAsMap(t *testing.T) { } func TestValueIsValid(t *testing.T) { - var zeroValue config.Value + var zeroValue dyn.Value assert.False(t, zeroValue.IsValid()) - var intValue = config.NewValue(1, config.Location{}) + var intValue = dyn.NewValue(1, dyn.Location{}) assert.True(t, intValue.IsValid()) } diff --git a/libs/config/walk.go b/libs/dyn/walk.go similarity index 99% rename from libs/config/walk.go rename to libs/dyn/walk.go index ce058338..138816be 100644 --- a/libs/config/walk.go +++ b/libs/dyn/walk.go @@ -1,4 +1,4 @@ -package config +package dyn import "errors" diff --git a/libs/config/walk_test.go b/libs/dyn/walk_test.go similarity index 98% rename from libs/config/walk_test.go rename to libs/dyn/walk_test.go index 806ca256..1b94ad90 100644 --- a/libs/config/walk_test.go +++ b/libs/dyn/walk_test.go @@ -1,10 +1,10 @@ -package config_test +package dyn_test import ( "errors" "testing" - . "github.com/databricks/cli/libs/config" + . "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/libs/config/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go similarity index 61% rename from libs/config/yamlloader/loader.go rename to libs/dyn/yamlloader/loader.go index 6472c137..899e1d7b 100644 --- a/libs/config/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "gopkg.in/yaml.v3" ) @@ -15,7 +15,7 @@ type loader struct { path string } -func errorf(loc config.Location, format string, args ...interface{}) error { +func errorf(loc dyn.Location, format string, args ...interface{}) error { return fmt.Errorf("yaml (%s): %s", loc, fmt.Sprintf(format, args...)) } @@ -25,22 +25,22 @@ func newLoader(path string) *loader { } } -func (d *loader) location(node *yaml.Node) config.Location { - return config.Location{ +func (d *loader) location(node *yaml.Node) dyn.Location { + return dyn.Location{ File: d.path, Line: node.Line, Column: node.Column, } } -func (d *loader) load(node *yaml.Node) (config.Value, error) { - loc := config.Location{ +func (d *loader) load(node *yaml.Node) (dyn.Value, error) { + loc := dyn.Location{ File: d.path, Line: node.Line, Column: node.Column, } - var value config.Value + var value dyn.Value var err error switch node.Kind { @@ -55,7 +55,7 @@ func (d *loader) load(node *yaml.Node) (config.Value, error) { case yaml.AliasNode: value, err = d.loadAlias(node, loc) default: - return config.NilValue, errorf(loc, "unknown node kind: %v", node.Kind) + return dyn.NilValue, errorf(loc, "unknown node kind: %v", node.Kind) } if err != nil { @@ -71,35 +71,35 @@ func (d *loader) load(node *yaml.Node) (config.Value, error) { return value, nil } -func (d *loader) loadDocument(node *yaml.Node, loc config.Location) (config.Value, error) { +func (d *loader) loadDocument(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { return d.load(node.Content[0]) } -func (d *loader) loadSequence(node *yaml.Node, loc config.Location) (config.Value, error) { - acc := make([]config.Value, len(node.Content)) +func (d *loader) loadSequence(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { + acc := make([]dyn.Value, len(node.Content)) for i, n := range node.Content { v, err := d.load(n) if err != nil { - return config.NilValue, err + return dyn.NilValue, err } acc[i] = v } - return config.NewValue(acc, loc), nil + return dyn.NewValue(acc, loc), nil } -func (d *loader) loadMapping(node *yaml.Node, loc config.Location) (config.Value, error) { +func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { var merge *yaml.Node - acc := make(map[string]config.Value) + acc := make(map[string]dyn.Value) for i := 0; i < len(node.Content); i += 2 { key := node.Content[i] val := node.Content[i+1] // Assert that keys are strings if key.Kind != yaml.ScalarNode { - return config.NilValue, errorf(loc, "key is not a scalar") + return dyn.NilValue, errorf(loc, "key is not a scalar") } st := key.ShortTag() @@ -113,19 +113,19 @@ func (d *loader) loadMapping(node *yaml.Node, loc config.Location) (config.Value merge = val continue default: - return config.NilValue, errorf(loc, "invalid key tag: %v", st) + return dyn.NilValue, errorf(loc, "invalid key tag: %v", st) } v, err := d.load(val) if err != nil { - return config.NilValue, err + return dyn.NilValue, err } acc[key.Value] = v } if merge == nil { - return config.NewValue(acc, loc), nil + return dyn.NewValue(acc, loc), nil } // Build location for the merge node. @@ -141,68 +141,68 @@ func (d *loader) loadMapping(node *yaml.Node, loc config.Location) (config.Value case yaml.AliasNode: mnodes = []*yaml.Node{merge} default: - return config.NilValue, merr + return dyn.NilValue, merr } // Build a sequence of values to merge. // The entries that we already accumulated have precedence. - var seq []map[string]config.Value + var seq []map[string]dyn.Value for _, n := range mnodes { v, err := d.load(n) if err != nil { - return config.NilValue, err + return dyn.NilValue, err } m, ok := v.AsMap() if !ok { - return config.NilValue, merr + return dyn.NilValue, merr } seq = append(seq, m) } // Append the accumulated entries to the sequence. seq = append(seq, acc) - out := make(map[string]config.Value) + out := make(map[string]dyn.Value) for _, m := range seq { for k, v := range m { out[k] = v } } - return config.NewValue(out, loc), nil + return dyn.NewValue(out, loc), nil } -func (d *loader) loadScalar(node *yaml.Node, loc config.Location) (config.Value, error) { +func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { st := node.ShortTag() switch st { case "!!str": - return config.NewValue(node.Value, loc), nil + return dyn.NewValue(node.Value, loc), nil case "!!bool": switch strings.ToLower(node.Value) { case "true": - return config.NewValue(true, loc), nil + return dyn.NewValue(true, loc), nil case "false": - return config.NewValue(false, loc), nil + return dyn.NewValue(false, loc), nil default: - return config.NilValue, errorf(loc, "invalid bool value: %v", node.Value) + return dyn.NilValue, errorf(loc, "invalid bool value: %v", node.Value) } case "!!int": i64, err := strconv.ParseInt(node.Value, 10, 64) if err != nil { - return config.NilValue, errorf(loc, "invalid int value: %v", node.Value) + return dyn.NilValue, errorf(loc, "invalid int value: %v", node.Value) } // Use regular int type instead of int64 if possible. if i64 >= math.MinInt32 && i64 <= math.MaxInt32 { - return config.NewValue(int(i64), loc), nil + return dyn.NewValue(int(i64), loc), nil } - return config.NewValue(i64, loc), nil + return dyn.NewValue(i64, loc), nil case "!!float": f64, err := strconv.ParseFloat(node.Value, 64) if err != nil { - return config.NilValue, errorf(loc, "invalid float value: %v", node.Value) + return dyn.NilValue, errorf(loc, "invalid float value: %v", node.Value) } - return config.NewValue(f64, loc), nil + return dyn.NewValue(f64, loc), nil case "!!null": - return config.NewValue(nil, loc), nil + return dyn.NewValue(nil, loc), nil case "!!timestamp": // Try a couple of layouts for _, layout := range []string{ @@ -213,15 +213,15 @@ func (d *loader) loadScalar(node *yaml.Node, loc config.Location) (config.Value, } { t, terr := time.Parse(layout, node.Value) if terr == nil { - return config.NewValue(t, loc), nil + return dyn.NewValue(t, loc), nil } } - return config.NilValue, errorf(loc, "invalid timestamp value: %v", node.Value) + return dyn.NilValue, errorf(loc, "invalid timestamp value: %v", node.Value) default: - return config.NilValue, errorf(loc, "unknown tag: %v", st) + return dyn.NilValue, errorf(loc, "unknown tag: %v", st) } } -func (d *loader) loadAlias(node *yaml.Node, loc config.Location) (config.Value, error) { +func (d *loader) loadAlias(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { return d.load(node.Alias) } diff --git a/libs/config/yamlloader/testdata/anchor_01.yml b/libs/dyn/yamlloader/testdata/anchor_01.yml similarity index 100% rename from libs/config/yamlloader/testdata/anchor_01.yml rename to libs/dyn/yamlloader/testdata/anchor_01.yml diff --git a/libs/config/yamlloader/testdata/anchor_02.yml b/libs/dyn/yamlloader/testdata/anchor_02.yml similarity index 100% rename from libs/config/yamlloader/testdata/anchor_02.yml rename to libs/dyn/yamlloader/testdata/anchor_02.yml diff --git a/libs/config/yamlloader/testdata/anchor_03.yml b/libs/dyn/yamlloader/testdata/anchor_03.yml similarity index 100% rename from libs/config/yamlloader/testdata/anchor_03.yml rename to libs/dyn/yamlloader/testdata/anchor_03.yml diff --git a/libs/config/yamlloader/testdata/anchor_04.yml b/libs/dyn/yamlloader/testdata/anchor_04.yml similarity index 100% rename from libs/config/yamlloader/testdata/anchor_04.yml rename to libs/dyn/yamlloader/testdata/anchor_04.yml diff --git a/libs/config/yamlloader/testdata/anchor_05.yml b/libs/dyn/yamlloader/testdata/anchor_05.yml similarity index 100% rename from libs/config/yamlloader/testdata/anchor_05.yml rename to libs/dyn/yamlloader/testdata/anchor_05.yml diff --git a/libs/config/yamlloader/testdata/anchor_06.yml b/libs/dyn/yamlloader/testdata/anchor_06.yml similarity index 100% rename from libs/config/yamlloader/testdata/anchor_06.yml rename to libs/dyn/yamlloader/testdata/anchor_06.yml diff --git a/libs/config/yamlloader/testdata/anchor_07.yml b/libs/dyn/yamlloader/testdata/anchor_07.yml similarity index 100% rename from libs/config/yamlloader/testdata/anchor_07.yml rename to libs/dyn/yamlloader/testdata/anchor_07.yml diff --git a/libs/config/yamlloader/testdata/anchor_08.yml b/libs/dyn/yamlloader/testdata/anchor_08.yml similarity index 100% rename from libs/config/yamlloader/testdata/anchor_08.yml rename to libs/dyn/yamlloader/testdata/anchor_08.yml diff --git a/libs/config/yamlloader/testdata/empty.yml b/libs/dyn/yamlloader/testdata/empty.yml similarity index 100% rename from libs/config/yamlloader/testdata/empty.yml rename to libs/dyn/yamlloader/testdata/empty.yml diff --git a/libs/config/yamlloader/testdata/error_01.yml b/libs/dyn/yamlloader/testdata/error_01.yml similarity index 100% rename from libs/config/yamlloader/testdata/error_01.yml rename to libs/dyn/yamlloader/testdata/error_01.yml diff --git a/libs/config/yamlloader/testdata/error_02.yml b/libs/dyn/yamlloader/testdata/error_02.yml similarity index 100% rename from libs/config/yamlloader/testdata/error_02.yml rename to libs/dyn/yamlloader/testdata/error_02.yml diff --git a/libs/config/yamlloader/testdata/error_03.yml b/libs/dyn/yamlloader/testdata/error_03.yml similarity index 100% rename from libs/config/yamlloader/testdata/error_03.yml rename to libs/dyn/yamlloader/testdata/error_03.yml diff --git a/libs/config/yamlloader/testdata/mix_01.yml b/libs/dyn/yamlloader/testdata/mix_01.yml similarity index 100% rename from libs/config/yamlloader/testdata/mix_01.yml rename to libs/dyn/yamlloader/testdata/mix_01.yml diff --git a/libs/config/yamlloader/testdata/mix_02.yml b/libs/dyn/yamlloader/testdata/mix_02.yml similarity index 100% rename from libs/config/yamlloader/testdata/mix_02.yml rename to libs/dyn/yamlloader/testdata/mix_02.yml diff --git a/libs/config/yamlloader/yaml.go b/libs/dyn/yamlloader/yaml.go similarity index 56% rename from libs/config/yamlloader/yaml.go rename to libs/dyn/yamlloader/yaml.go index a3cc7284..a18324ff 100644 --- a/libs/config/yamlloader/yaml.go +++ b/libs/dyn/yamlloader/yaml.go @@ -3,19 +3,19 @@ package yamlloader import ( "io" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "gopkg.in/yaml.v3" ) -func LoadYAML(path string, r io.Reader) (config.Value, error) { +func LoadYAML(path string, r io.Reader) (dyn.Value, error) { var node yaml.Node dec := yaml.NewDecoder(r) err := dec.Decode(&node) if err != nil { if err == io.EOF { - return config.NilValue, nil + return dyn.NilValue, nil } - return config.NilValue, err + return dyn.NilValue, err } return newLoader(path).load(&node) diff --git a/libs/config/yamlloader/yaml_anchor_test.go b/libs/dyn/yamlloader/yaml_anchor_test.go similarity index 61% rename from libs/config/yamlloader/yaml_anchor_test.go rename to libs/dyn/yamlloader/yaml_anchor_test.go index a8b66686..05beb540 100644 --- a/libs/config/yamlloader/yaml_anchor_test.go +++ b/libs/dyn/yamlloader/yaml_anchor_test.go @@ -3,14 +3,14 @@ package yamlloader_test import ( "testing" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" ) func TestYAMLAnchor01(t *testing.T) { file := "testdata/anchor_01.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) assert.True(t, self.Get("defaults").IsAnchor()) assert.False(t, self.Get("shirt1").IsAnchor()) @@ -18,31 +18,31 @@ func TestYAMLAnchor01(t *testing.T) { pattern := self.Get("shirt1").Get("pattern") assert.Equal(t, "striped", pattern.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 8, Column: 12}, pattern.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 8, Column: 12}, pattern.Location()) } func TestYAMLAnchor02(t *testing.T) { file := "testdata/anchor_02.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) color := self.Get("shirt").Get("color") assert.Equal(t, "red", color.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 4, Column: 10}, color.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 4, Column: 10}, color.Location()) primary := self.Get("shirt").Get("primary") assert.Equal(t, "cotton", primary.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 8, Column: 12}, primary.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 8, Column: 12}, primary.Location()) pattern := self.Get("shirt").Get("pattern") assert.Equal(t, "striped", pattern.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 13, Column: 12}, pattern.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 13, Column: 12}, pattern.Location()) } func TestYAMLAnchor03(t *testing.T) { file := "testdata/anchor_03.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) // Assert the override took place. blue := self.Get("shirt").Get("color") @@ -55,63 +55,63 @@ func TestYAMLAnchor03(t *testing.T) { func TestYAMLAnchor04(t *testing.T) { file := "testdata/anchor_04.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) p1 := self.Get("person1").Get("address").Get("city") assert.Equal(t, "San Francisco", p1.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 4, Column: 9}, p1.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 4, Column: 9}, p1.Location()) p2 := self.Get("person2").Get("address").Get("city") assert.Equal(t, "Los Angeles", p2.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 16, Column: 11}, p2.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 16, Column: 11}, p2.Location()) } func TestYAMLAnchor05(t *testing.T) { file := "testdata/anchor_05.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) features := self.Get("phone1").Get("features") assert.Equal(t, "wifi", features.Index(0).AsAny()) - assert.Equal(t, config.Location{File: file, Line: 4, Column: 5}, features.Index(0).Location()) + assert.Equal(t, dyn.Location{File: file, Line: 4, Column: 5}, features.Index(0).Location()) assert.Equal(t, "bluetooth", features.Index(1).AsAny()) - assert.Equal(t, config.Location{File: file, Line: 5, Column: 5}, features.Index(1).Location()) + assert.Equal(t, dyn.Location{File: file, Line: 5, Column: 5}, features.Index(1).Location()) } func TestYAMLAnchor06(t *testing.T) { file := "testdata/anchor_06.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) greeting := self.Get("greeting1") assert.Equal(t, "Hello, World!", greeting.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 2, Column: 16}, greeting.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 2, Column: 16}, greeting.Location()) } func TestYAMLAnchor07(t *testing.T) { file := "testdata/anchor_07.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) name := self.Get("person1").Get("name") assert.Equal(t, "Alice", name.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 5, Column: 9}, name.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 5, Column: 9}, name.Location()) age := self.Get("person1").Get("age") assert.Equal(t, 25, age.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 2, Column: 13}, age.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 2, Column: 13}, age.Location()) } func TestYAMLAnchor08(t *testing.T) { file := "testdata/anchor_08.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) username := self.Get("user1").Get("username") assert.Equal(t, "user1", username.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 5, Column: 13}, username.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 5, Column: 13}, username.Location()) active := self.Get("user1").Get("active") assert.Equal(t, true, active.AsAny()) - assert.Equal(t, config.Location{File: file, Line: 2, Column: 11}, active.Location()) + assert.Equal(t, dyn.Location{File: file, Line: 2, Column: 11}, active.Location()) } diff --git a/libs/config/yamlloader/yaml_error_test.go b/libs/dyn/yamlloader/yaml_error_test.go similarity index 94% rename from libs/config/yamlloader/yaml_error_test.go rename to libs/dyn/yamlloader/yaml_error_test.go index 2685042f..11c444ad 100644 --- a/libs/config/yamlloader/yaml_error_test.go +++ b/libs/dyn/yamlloader/yaml_error_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/databricks/cli/libs/config/yamlloader" + "github.com/databricks/cli/libs/dyn/yamlloader" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" diff --git a/libs/config/yamlloader/yaml_mix_test.go b/libs/dyn/yamlloader/yaml_mix_test.go similarity index 79% rename from libs/config/yamlloader/yaml_mix_test.go rename to libs/dyn/yamlloader/yaml_mix_test.go index 9cd0753d..307b93db 100644 --- a/libs/config/yamlloader/yaml_mix_test.go +++ b/libs/dyn/yamlloader/yaml_mix_test.go @@ -3,14 +3,14 @@ package yamlloader_test import ( "testing" - "github.com/databricks/cli/libs/config" + "github.com/databricks/cli/libs/dyn" "github.com/stretchr/testify/assert" ) func TestYAMLMix01(t *testing.T) { file := "testdata/mix_01.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) assert.True(t, self.Get("base_address").IsAnchor()) assert.False(t, self.Get("office_address").IsAnchor()) @@ -19,7 +19,7 @@ func TestYAMLMix01(t *testing.T) { func TestYAMLMix02(t *testing.T) { file := "testdata/mix_02.yml" self := loadYAML(t, file) - assert.NotEqual(t, config.NilValue, self) + assert.NotEqual(t, dyn.NilValue, self) assert.True(t, self.Get("base_colors").IsAnchor()) assert.False(t, self.Get("theme").IsAnchor()) diff --git a/libs/config/yamlloader/yaml_test.go b/libs/dyn/yamlloader/yaml_test.go similarity index 76% rename from libs/config/yamlloader/yaml_test.go rename to libs/dyn/yamlloader/yaml_test.go index ab61f071..14269fee 100644 --- a/libs/config/yamlloader/yaml_test.go +++ b/libs/dyn/yamlloader/yaml_test.go @@ -5,14 +5,14 @@ import ( "os" "testing" - "github.com/databricks/cli/libs/config" - "github.com/databricks/cli/libs/config/yamlloader" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/yamlloader" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) -func loadYAML(t *testing.T, path string) config.Value { +func loadYAML(t *testing.T, path string) dyn.Value { input, err := os.ReadFile(path) require.NoError(t, err) @@ -31,5 +31,5 @@ func loadYAML(t *testing.T, path string) config.Value { func TestYAMLEmpty(t *testing.T) { self := loadYAML(t, "testdata/empty.yml") - assert.Equal(t, config.NilValue, self) + assert.Equal(t, dyn.NilValue, self) } From fa3c8b1017aad050757fa1074fa32f2602cb33d0 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 22 Dec 2023 15:45:53 +0100 Subject: [PATCH 303/310] Use resource key as name in permissions code (#1087) ## Changes The code relied on the `Name` property being accessible for every resource. This is generally true, but because these property structs are embedded as pointer, they can be nil. This is also why the tests had to initialize the embedded struct to pass. This changes the approach to use the keys from the resource map instead, so that we no longer rely on the non-nil embedded struct. Note: we should evaluate whether we should turn these into values instead of pointers. I don't recall if we get value from them being pointers. ## Tests Unit tests pass. --- bundle/permissions/mutator.go | 20 ++++++++++---------- bundle/permissions/mutator_test.go | 26 ++++++++++---------------- 2 files changed, 20 insertions(+), 26 deletions(-) diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go index 025556f3..54925d1c 100644 --- a/bundle/permissions/mutator.go +++ b/bundle/permissions/mutator.go @@ -72,60 +72,60 @@ func validate(b *bundle.Bundle) error { } func applyForJobs(ctx context.Context, b *bundle.Bundle) { - for _, job := range b.Config.Resources.Jobs { + for key, job := range b.Config.Resources.Jobs { job.Permissions = append(job.Permissions, convert( ctx, b.Config.Permissions, job.Permissions, - job.Name, + key, levelsMap["jobs"], )...) } } func applyForPipelines(ctx context.Context, b *bundle.Bundle) { - for _, pipeline := range b.Config.Resources.Pipelines { + for key, pipeline := range b.Config.Resources.Pipelines { pipeline.Permissions = append(pipeline.Permissions, convert( ctx, b.Config.Permissions, pipeline.Permissions, - pipeline.Name, + key, levelsMap["pipelines"], )...) } } func applyForMlExperiments(ctx context.Context, b *bundle.Bundle) { - for _, experiment := range b.Config.Resources.Experiments { + for key, experiment := range b.Config.Resources.Experiments { experiment.Permissions = append(experiment.Permissions, convert( ctx, b.Config.Permissions, experiment.Permissions, - experiment.Name, + key, levelsMap["mlflow_experiments"], )...) } } func applyForMlModels(ctx context.Context, b *bundle.Bundle) { - for _, model := range b.Config.Resources.Models { + for key, model := range b.Config.Resources.Models { model.Permissions = append(model.Permissions, convert( ctx, b.Config.Permissions, model.Permissions, - model.Name, + key, levelsMap["mlflow_models"], )...) } } func applyForModelServiceEndpoints(ctx context.Context, b *bundle.Bundle) { - for _, model := range b.Config.Resources.ModelServingEndpoints { + for key, model := range b.Config.Resources.ModelServingEndpoints { model.Permissions = append(model.Permissions, convert( ctx, b.Config.Permissions, model.Permissions, - model.Name, + key, levelsMap["model_serving_endpoints"], )...) } diff --git a/bundle/permissions/mutator_test.go b/bundle/permissions/mutator_test.go index d9bf3efe..62c0589d 100644 --- a/bundle/permissions/mutator_test.go +++ b/bundle/permissions/mutator_test.go @@ -7,10 +7,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/databricks/databricks-sdk-go/service/ml" - "github.com/databricks/databricks-sdk-go/service/pipelines" - "github.com/databricks/databricks-sdk-go/service/serving" "github.com/stretchr/testify/require" ) @@ -27,24 +23,24 @@ func TestApplyBundlePermissions(t *testing.T) { }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "job_1": {JobSettings: &jobs.JobSettings{}}, - "job_2": {JobSettings: &jobs.JobSettings{}}, + "job_1": {}, + "job_2": {}, }, Pipelines: map[string]*resources.Pipeline{ - "pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}}, - "pipeline_2": {PipelineSpec: &pipelines.PipelineSpec{}}, + "pipeline_1": {}, + "pipeline_2": {}, }, Models: map[string]*resources.MlflowModel{ - "model_1": {Model: &ml.Model{}}, - "model_2": {Model: &ml.Model{}}, + "model_1": {}, + "model_2": {}, }, Experiments: map[string]*resources.MlflowExperiment{ - "experiment_1": {Experiment: &ml.Experiment{}}, - "experiment_2": {Experiment: &ml.Experiment{}}, + "experiment_1": {}, + "experiment_2": {}, }, ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ - "endpoint_1": {CreateServingEndpoint: &serving.CreateServingEndpoint{}}, - "endpoint_2": {CreateServingEndpoint: &serving.CreateServingEndpoint{}}, + "endpoint_1": {}, + "endpoint_2": {}, }, }, }, @@ -116,13 +112,11 @@ func TestWarningOnOverlapPermission(t *testing.T) { Permissions: []resources.Permission{ {Level: CAN_VIEW, UserName: "TestUser"}, }, - JobSettings: &jobs.JobSettings{}, }, "job_2": { Permissions: []resources.Permission{ {Level: CAN_VIEW, UserName: "TestUser2"}, }, - JobSettings: &jobs.JobSettings{}, }, }, }, From f2408eda6285fb26b72676756b4be897f2afe2e7 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 22 Dec 2023 21:13:08 +0530 Subject: [PATCH 304/310] Add support for reprompts if user input does not match template schema (#946) ## Changes This PR adds retry logic to user input prompts, prompting users again if the value does not match the requirements specified in the bundle template schema. ## Tests Manually. Here's an example UX. The first prompt expects an integer and the second one a string made only from the letters "defg" ``` shreyas.goenka@THW32HFW6T cli % cli bundle init ~/mlops-stack Please enter an integer [123]: abc Validation failed: "abc" is not a integer Please enter an integer [123]: 123 Please enter a string [dddd]: apple Validation failed: invalid value for input_root_dir: "apple". Only characters the 'd', 'e', 'f', 'g' are allowed ``` --- libs/jsonschema/utils.go | 54 ++++++++++++++++++++--- libs/jsonschema/utils_test.go | 4 +- libs/template/config.go | 83 ++++++++++++++++++++++++----------- 3 files changed, 107 insertions(+), 34 deletions(-) diff --git a/libs/jsonschema/utils.go b/libs/jsonschema/utils.go index 9e65ed06..ff9b8831 100644 --- a/libs/jsonschema/utils.go +++ b/libs/jsonschema/utils.go @@ -7,6 +7,19 @@ import ( "strconv" ) +// This error indicates an failure to parse a string as a particular JSON schema type. +type parseStringError struct { + // Expected JSON schema type for the value + ExpectedType Type + + // The string value that failed to parse + Value string +} + +func (e parseStringError) Error() string { + return fmt.Sprintf("%q is not a %s", e.Value, e.ExpectedType) +} + // function to check whether a float value represents an integer func isIntegerValue(v float64) bool { return v == float64(int64(v)) @@ -108,11 +121,40 @@ func fromString(s string, T Type) (any, error) { // Return more readable error incase of a syntax error if errors.Is(err, strconv.ErrSyntax) { - return nil, fmt.Errorf("could not parse %q as a %s: %w", s, T, err) + return nil, parseStringError{ + ExpectedType: T, + Value: s, + } } return v, err } +// Error indicates a value entered by the user failed to match the pattern specified +// in the template schema. +type patternMatchError struct { + // The name of the property that failed to match the pattern + PropertyName string + + // The value of the property that failed to match the pattern + PropertyValue any + + // The regex pattern that the property value failed to match + Pattern string + + // Failure message to display to the user, if specified in the template + // schema + FailureMessage string +} + +func (e patternMatchError) Error() string { + // If custom user error message is defined, return error with the custom message + msg := e.FailureMessage + if msg == "" { + msg = fmt.Sprintf("Expected to match regex pattern: %s", e.Pattern) + } + return fmt.Sprintf("invalid value for %s: %q. %s", e.PropertyName, e.PropertyValue, msg) +} + func validatePatternMatch(name string, value any, propertySchema *Schema) error { if propertySchema.Pattern == "" { // Return early if no pattern is specified @@ -134,10 +176,10 @@ func validatePatternMatch(name string, value any, propertySchema *Schema) error return nil } - // If custom user error message is defined, return error with the custom message - msg := propertySchema.PatternMatchFailureMessage - if msg == "" { - msg = fmt.Sprintf("Expected to match regex pattern: %s", propertySchema.Pattern) + return patternMatchError{ + PropertyName: name, + PropertyValue: value, + Pattern: propertySchema.Pattern, + FailureMessage: propertySchema.PatternMatchFailureMessage, } - return fmt.Errorf("invalid value for %s: %q. %s", name, value, msg) } diff --git a/libs/jsonschema/utils_test.go b/libs/jsonschema/utils_test.go index b036a23f..89200dae 100644 --- a/libs/jsonschema/utils_test.go +++ b/libs/jsonschema/utils_test.go @@ -110,10 +110,10 @@ func TestTemplateFromString(t *testing.T) { assert.EqualError(t, err, "cannot parse string as object of type array. Value of string: \"qrt\"") _, err = fromString("abc", IntegerType) - assert.EqualError(t, err, "could not parse \"abc\" as a integer: strconv.ParseInt: parsing \"abc\": invalid syntax") + assert.EqualError(t, err, "\"abc\" is not a integer") _, err = fromString("1.0", IntegerType) - assert.EqualError(t, err, "could not parse \"1.0\" as a integer: strconv.ParseInt: parsing \"1.0\": invalid syntax") + assert.EqualError(t, err, "\"1.0\" is not a integer") _, err = fromString("1.0", "foobar") assert.EqualError(t, err, "unknown json schema type: \"foobar\"") diff --git a/libs/template/config.go b/libs/template/config.go index 85fa2265..6b6e7ed3 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -2,6 +2,7 @@ package template import ( "context" + "errors" "fmt" "github.com/databricks/cli/libs/cmdio" @@ -12,6 +13,14 @@ import ( // The latest template schema version supported by the CLI const latestSchemaVersion = 1 +type retriableError struct { + err error +} + +func (e retriableError) Error() string { + return e.err.Error() +} + type config struct { ctx context.Context values map[string]any @@ -143,6 +152,45 @@ func (c *config) skipPrompt(p jsonschema.Property, r *renderer) (bool, error) { return true, nil } +func (c *config) promptOnce(property *jsonschema.Schema, name, defaultVal, description string) error { + var userInput string + if property.Enum != nil { + // List options for the user to select from + options, err := property.EnumStringSlice() + if err != nil { + return err + } + userInput, err = cmdio.AskSelect(c.ctx, description, options) + if err != nil { + return err + } + } else { + var err error + userInput, err = cmdio.Ask(c.ctx, description, defaultVal) + if err != nil { + return err + } + } + + // Convert user input string back to a Go value + var err error + c.values[name], err = property.ParseString(userInput) + if err != nil { + // Show error and retry if validation fails + cmdio.LogString(c.ctx, fmt.Sprintf("Validation failed: %s", err.Error())) + return retriableError{err: err} + } + + // Validate the partial config which includes the new value + err = c.schema.ValidateInstance(c.values) + if err != nil { + // Show error and retry if validation fails + cmdio.LogString(c.ctx, fmt.Sprintf("Validation failed: %s", err.Error())) + return retriableError{err: err} + } + return nil +} + // Prompts user for values for properties that do not have a value set yet func (c *config) promptForValues(r *renderer) error { for _, p := range c.schema.OrderedProperties() { @@ -171,39 +219,22 @@ func (c *config) promptForValues(r *renderer) error { } } + // Compute description for the prompt description, err := r.executeTemplate(property.Description) if err != nil { return err } - // Get user input by running the prompt - var userInput string - if property.Enum != nil { - // convert list of enums to string slice - enums, err := property.EnumStringSlice() - if err != nil { + // We wrap this function in a retry loop to allow retries when the user + // entered value is invalid. + for { + err = c.promptOnce(property, name, defaultVal, description) + if err == nil { + break + } + if !errors.As(err, &retriableError{}) { return err } - userInput, err = cmdio.AskSelect(c.ctx, description, enums) - if err != nil { - return err - } - } else { - userInput, err = cmdio.Ask(c.ctx, description, defaultVal) - if err != nil { - return err - } - } - - // Convert user input string back to a value - c.values[name], err = property.ParseString(userInput) - if err != nil { - return err - } - - // Validate the partial config based on this update - if err := c.schema.ValidateInstance(c.values); err != nil { - return err } } return nil From 10a8ce4562e3046cecbc55dca8980adfbe6d4307 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 27 Dec 2023 13:03:08 +0100 Subject: [PATCH 305/310] Improve experience for multiple builtin templates (#1052) ## Changes This enhances the template selection experience a bit as we add more and more built-in templates (like https://github.com/databricks/cli/pull/1051 and https://github.com/databricks/cli/pull/1059): ### New experience: image ### Current experience: image --------- Co-authored-by: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> --- cmd/bundle/init.go | 48 +++++++++++++++++++++++++++++++++-------- cmd/bundle/init_test.go | 14 +++++++++--- libs/cmdio/io.go | 30 +++++++++++++++----------- 3 files changed, 68 insertions(+), 24 deletions(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 18d76db1..db8250d0 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -27,35 +27,58 @@ type nativeTemplate struct { aliases []string } +const customTemplate = "custom..." + var nativeTemplates = []nativeTemplate{ { name: "default-python", - description: "The default Python template", + description: "The default Python template for Notebooks / Delta Live Tables / Workflows", }, { name: "mlops-stacks", gitUrl: "https://github.com/databricks/mlops-stacks", - description: "The Databricks MLOps Stacks template (https://github.com/databricks/mlops-stacks)", + description: "The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)", aliases: []string{"mlops-stack"}, }, + { + name: customTemplate, + description: "Bring your own template", + }, } -func nativeTemplateDescriptions() string { +// Return template descriptions for command-line help +func nativeTemplateHelpDescriptions() string { var lines []string for _, template := range nativeTemplates { - lines = append(lines, fmt.Sprintf("- %s: %s", template.name, template.description)) + if template.name != customTemplate { + lines = append(lines, fmt.Sprintf("- %s: %s", template.name, template.description)) + } } return strings.Join(lines, "\n") } -func nativeTemplateOptions() []string { - names := make([]string, 0, len(nativeTemplates)) +// Return template options for an interactive prompt +func nativeTemplateOptions() []cmdio.Tuple { + names := make([]cmdio.Tuple, 0, len(nativeTemplates)) for _, template := range nativeTemplates { - names = append(names, template.name) + tuple := cmdio.Tuple{ + Name: template.name, + Id: template.description, + } + names = append(names, tuple) } return names } +func getNativeTemplateByDescription(description string) string { + for _, template := range nativeTemplates { + if template.description == description { + return template.name + } + } + return "" +} + func getUrlForNativeTemplate(name string) string { for _, template := range nativeTemplates { if template.name == name { @@ -99,7 +122,7 @@ TEMPLATE_PATH optionally specifies which template to use. It can be one of the f - a local file system path with a template directory - a Git repository URL, e.g. https://github.com/my/repository -See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more information on templates.`, nativeTemplateDescriptions()), +See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more information on templates.`, nativeTemplateHelpDescriptions()), } var configFile string @@ -134,10 +157,17 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf if !cmdio.IsPromptSupported(ctx) { return errors.New("please specify a template") } - templatePath, err = cmdio.AskSelect(ctx, "Template to use", nativeTemplateOptions()) + description, err := cmdio.SelectOrdered(ctx, nativeTemplateOptions(), "Template to use") if err != nil { return err } + templatePath = getNativeTemplateByDescription(description) + } + + if templatePath == customTemplate { + cmdio.LogString(ctx, "Please specify a path or Git repository to use a custom template.") + cmdio.LogString(ctx, "See https://docs.databricks.com/en/dev-tools/bundles/templates.html to learn more about custom templates.") + return nil } // Expand templatePath to a git URL if it's an alias for a known native template diff --git a/cmd/bundle/init_test.go b/cmd/bundle/init_test.go index db4446bb..aa899159 100644 --- a/cmd/bundle/init_test.go +++ b/cmd/bundle/init_test.go @@ -3,6 +3,7 @@ package bundle import ( "testing" + "github.com/databricks/cli/libs/cmdio" "github.com/stretchr/testify/assert" ) @@ -27,11 +28,18 @@ func TestBundleInitRepoName(t *testing.T) { } func TestNativeTemplateOptions(t *testing.T) { - assert.Equal(t, []string{"default-python", "mlops-stacks"}, nativeTemplateOptions()) + expected := []cmdio.Tuple{ + {Name: "default-python", Id: "The default Python template for Notebooks / Delta Live Tables / Workflows"}, + {Name: "mlops-stacks", Id: "The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)"}, + {Name: "custom...", Id: "Bring your own template"}, + } + assert.Equal(t, expected, nativeTemplateOptions()) } -func TestNativeTemplateDescriptions(t *testing.T) { - assert.Equal(t, "- default-python: The default Python template\n- mlops-stacks: The Databricks MLOps Stacks template (https://github.com/databricks/mlops-stacks)", nativeTemplateDescriptions()) +func TestNativeTemplateHelpDescriptions(t *testing.T) { + expected := `- default-python: The default Python template for Notebooks / Delta Live Tables / Workflows +- mlops-stacks: The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)` + assert.Equal(t, expected, nativeTemplateHelpDescriptions()) } func TestGetUrlForNativeTemplate(t *testing.T) { diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index 8b421ef5..d20991a7 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -155,19 +155,13 @@ func RenderReader(ctx context.Context, r io.Reader) error { } } -type tuple struct{ Name, Id string } +type Tuple struct{ Name, Id string } -func (c *cmdIO) Select(names map[string]string, label string) (id string, err error) { +func (c *cmdIO) Select(items []Tuple, label string) (id string, err error) { if !c.interactive { return "", fmt.Errorf("expected to have %s", label) } - var items []tuple - for k, v := range names { - items = append(items, tuple{k, v}) - } - slices.SortFunc(items, func(a, b tuple) int { - return strings.Compare(a.Name, b.Name) - }) + idx, _, err := (&promptui.Select{ Label: label, Items: items, @@ -190,13 +184,25 @@ func (c *cmdIO) Select(names map[string]string, label string) (id string, err er return } +// Show a selection prompt where the user can pick one of the name/id items. +// The items are sorted alphabetically by name. func Select[V any](ctx context.Context, names map[string]V, label string) (id string, err error) { c := fromContext(ctx) - stringNames := map[string]string{} + var items []Tuple for k, v := range names { - stringNames[k] = fmt.Sprint(v) + items = append(items, Tuple{k, fmt.Sprint(v)}) } - return c.Select(stringNames, label) + slices.SortFunc(items, func(a, b Tuple) int { + return strings.Compare(a.Name, b.Name) + }) + return c.Select(items, label) +} + +// Show a selection prompt where the user can pick one of the name/id items. +// The items appear in the order specified in the "items" argument. +func SelectOrdered(ctx context.Context, items []Tuple, label string) (id string, err error) { + c := fromContext(ctx) + return c.Select(items, label) } func (c *cmdIO) Secret(label string) (value string, err error) { From c6a2ce6ea8374f9298928e3cea479fd53d0a8ace Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Dec 2023 16:46:24 +0100 Subject: [PATCH 306/310] Bump github.com/hashicorp/terraform-exec from 0.19.0 to 0.20.0 (#1088) Bumps [github.com/hashicorp/terraform-exec](https://github.com/hashicorp/terraform-exec) from 0.19.0 to 0.20.0.
Release notes

Sourced from github.com/hashicorp/terraform-exec's releases.

v0.20.0

ENHANCEMENTS:

  • Add JSONNumber option to Show to enable json.Number representation of numerical values in returned tfjson.Plan and tfjson.State values (hashicorp/terraform-exec#427)
Changelog

Sourced from github.com/hashicorp/terraform-exec's changelog.

0.20.0 (December 20, 2023)

ENHANCEMENTS:

  • Add JSONNumber option to Show to enable json.Number representation of numerical values in returned tfjson.Plan and tfjson.State values (#427)
Commits
  • e37dda6 v0.20.0 [skip ci]
  • 324f556 Update CHANGELOG.md
  • ff2b3b9 Update CHANGELOG.md
  • 7975ca6 Add ShowOption for configuring JSON decoding (#427)
  • ba030d6 build(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 (#430)
  • 922f644 Result of tsccr-helper -log-level=info gha update -latest . (#428)
  • 1469cf8 build(deps): bump github.com/hashicorp/hc-install from 0.6.1 to 0.6.2 (#425)
  • a2bb72f graph: update format for v1.7 (#424)
  • ccb7b50 Result of tsccr-helper -log-level=info -pin-all-workflows . (#422)
  • 961bd50 build(deps): bump github.com/hashicorp/terraform-json (#421)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/terraform-exec&package-manager=go_modules&previous-version=0.19.0&new-version=0.20.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 70224949..6fbea4ed 100644 --- a/go.mod +++ b/go.mod @@ -10,8 +10,8 @@ require ( github.com/google/uuid v1.5.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 github.com/hashicorp/hc-install v0.6.2 // MPL 2.0 - github.com/hashicorp/terraform-exec v0.19.0 // MPL 2.0 - github.com/hashicorp/terraform-json v0.18.0 // MPL 2.0 + github.com/hashicorp/terraform-exec v0.20.0 // MPL 2.0 + github.com/hashicorp/terraform-json v0.19.0 // MPL 2.0 github.com/imdario/mergo v0.3.15 // BSD-3-Clause github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT diff --git a/go.sum b/go.sum index 4256acbd..c37af7e2 100644 --- a/go.sum +++ b/go.sum @@ -96,10 +96,10 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= -github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= -github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= -github.com/hashicorp/terraform-json v0.18.0 h1:pCjgJEqqDESv4y0Tzdqfxr/edOIGkjs8keY42xfNBwU= -github.com/hashicorp/terraform-json v0.18.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= +github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= +github.com/hashicorp/terraform-json v0.19.0 h1:e9DBKC5sxDfiJT7Zoi+yRIwqLVtFur/fwK/FuE6AWsA= +github.com/hashicorp/terraform-json v0.19.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= From 0c850ddab59ab1c40f7a7cf5c176821d626321eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Dec 2023 16:46:36 +0100 Subject: [PATCH 307/310] Bump go.uber.org/mock from 0.3.0 to 0.4.0 (#1089) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [go.uber.org/mock](https://github.com/uber/mock) from 0.3.0 to 0.4.0.
Release notes

Sourced from go.uber.org/mock's releases.

v0.4.0

0.4.0 (20 Dec 2023)

Added

  • #63[]: AnyOf matcher for values that satisfy at least one matcher.
  • #114[]: Regex matcher.

Fixed

  • #93[]: Ignore interfaces that contain type constraints.
  • #101[]: Fix race condition on Controller.Satisfied.
  • #121[]: Fix paths for windows.
  • #127[]: Propogate -mock_names to Recorder and ReturnCall types.
  • #132[]: Sanitize "any" package names.

#63: uber-go/mock#63 #93: uber-go/mock#93 #101: uber-go/mock#101 #114: uber-go/mock#114 #121: uber-go/mock#121 #127: uber-go/mock#127 #132: uber-go/mock#132

Thanks to @​favonia @​hoonmin @​pshopper @​davidharrigan @​dlsniper @​merrett010 @​craig65535 @​chemidy @​tulzke @​UnAfraid @​JacobOaks @​sywhang for their contributions this release.

Changelog

Sourced from go.uber.org/mock's changelog.

0.4.0 (20 Dec 2023)

Added

  • #63[]: AnyOf matcher for values that satisfy at least one matcher.
  • #114[]: Regex matcher.

Fixed

  • #93[]: Ignore interfaces that contain type constraints.
  • #101[]: Fix race condition on Controller.Satisfied.
  • #121[]: Fix paths for windows.
  • #127[]: Propogate -mock_names to Recorder and ReturnCall types.
  • #132[]: Sanitize "any" package names.

#63: uber-go/mock#63 #93: uber-go/mock#93 #101: uber-go/mock#101 #114: uber-go/mock#114 #121: uber-go/mock#121 #127: uber-go/mock#127 #132: uber-go/mock#132

Thanks to @​favonia @​hoonmin @​pshopper @​davidharrigan @​dlsniper @​merrett010 @​craig65535 @​chemidy @​tulzke @​UnAfraid @​JacobOaks @​sywhang for their contributions this release.

Commits
  • 74a29c6 Release v0.4.0 (#131)
  • 37f6db3 mockgen: Sanitize the "any" package name (#132)
  • 6dd8fe5 Fixed -mockNames not propagating to Recorder and ReturnCall types (#127)
  • 7bf3d91 added a description of the exclude_interfaces flag to the README (#123)
  • 5b48f95 MockGen header comment: trim .exe suffix on Windows (#119)
  • 857e269 Update model.go (#115)
  • 7fb6390 Use path.Join for expected package paths (#121)
  • 94a7ac3 feat: add Regex() matcher (#114)
  • b233940 Fix 111: Package comments are now split correctly from the rest of the commen...
  • 892b665 fix: race condition on Controller.Satisfied (#101)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.uber.org/mock&package-manager=go_modules&previous-version=0.3.0&new-version=0.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6fbea4ed..9bab8917 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( ) require ( - go.uber.org/mock v0.3.0 + go.uber.org/mock v0.4.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/go.sum b/go.sum index c37af7e2..30c92aef 100644 --- a/go.sum +++ b/go.sum @@ -154,8 +154,8 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= -go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= From 3b6681c30100477b35b248a32204f986a840050b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Dec 2023 17:26:54 +0100 Subject: [PATCH 308/310] Bump github.com/hashicorp/terraform-json from 0.18.0 to 0.20.0 (#1090) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/hashicorp/terraform-json](https://github.com/hashicorp/terraform-json) from 0.18.0 to 0.20.0.
Release notes

Sourced from github.com/hashicorp/terraform-json's releases.

v0.20.0

ENHANCEMENTS:

Full Changelog: https://github.com/hashicorp/terraform-json/compare/v0.19.0...v0.20.0

v0.19.0

ENHANCEMENTS:

INTERNAL:

New Contributors

Full Changelog: https://github.com/hashicorp/terraform-json/compare/v0.18.0...v0.19.0

Commits
  • 884568c Merge pull request #117 from sebasslash/add-replace-paths-field
  • e112901 Add ReplacePaths to Change struct
  • 41fa8df Merge pull request #113 from hashicorp/bendbennett/issues-111
  • c00e873 Merge pull request #116 from hashicorp/tsccr-auto-pinning/trusted/2023-12-18
  • 15b3438 Result of tsccr-helper -log-level=info gha update -latest .
  • 9afe2fc Result of tsccr-helper -log-level=info gha update -latest . (#115)
  • daf8e04 github: Disable dependabot for GHA (#114)
  • da9b6fa Unformat plan.json (#111)
  • a2ced5d Add test coverage for handling of numeric values by Plan.UnmarshalJSON (#111)
  • 970acde Add useJSONNumber field to Plan (#111)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/terraform-json&package-manager=go_modules&previous-version=0.18.0&new-version=0.20.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9bab8917..53c581b3 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/hashicorp/go-version v1.6.0 // MPL 2.0 github.com/hashicorp/hc-install v0.6.2 // MPL 2.0 github.com/hashicorp/terraform-exec v0.20.0 // MPL 2.0 - github.com/hashicorp/terraform-json v0.19.0 // MPL 2.0 + github.com/hashicorp/terraform-json v0.20.0 // MPL 2.0 github.com/imdario/mergo v0.3.15 // BSD-3-Clause github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT diff --git a/go.sum b/go.sum index 30c92aef..1f18fd34 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,8 @@ github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= -github.com/hashicorp/terraform-json v0.19.0 h1:e9DBKC5sxDfiJT7Zoi+yRIwqLVtFur/fwK/FuE6AWsA= -github.com/hashicorp/terraform-json v0.19.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-json v0.20.0 h1:cJcvn4gIOTi0SD7pIy+xiofV1zFA3hza+6K+fo52IX8= +github.com/hashicorp/terraform-json v0.20.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= From 206b1bf198d7f1f557e0fda6a7733a7e45770a85 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Thu, 28 Dec 2023 14:14:55 +0100 Subject: [PATCH 309/310] Tweak command groups in CLI help (#1092) ## Changes This tweaks the help output shown when using `databricks help`: * make`jobs` appears under `Workflows` (as done in baseline OpenAPI). * move `bundle` and `sync` under a new group called `Developer Tools` (similar to what we have in docs) * minor wording changes --- cmd/bundle/bundle.go | 7 ++++--- cmd/fs/fs.go | 7 ++++--- cmd/sync/sync.go | 7 ++++--- cmd/workspace/groups.go | 6 +++++- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/cmd/bundle/bundle.go b/cmd/bundle/bundle.go index 128c8302..3206b94e 100644 --- a/cmd/bundle/bundle.go +++ b/cmd/bundle/bundle.go @@ -6,9 +6,10 @@ import ( func New() *cobra.Command { cmd := &cobra.Command{ - Use: "bundle", - Short: "Databricks Asset Bundles", - Long: "Databricks Asset Bundles\n\nOnline documentation: https://docs.databricks.com/en/dev-tools/bundles", + Use: "bundle", + Short: "Databricks Asset Bundles let you express data/AI/analytics projects as code.", + Long: "Databricks Asset Bundles let you express data/AI/analytics projects as code.\n\nOnline documentation: https://docs.databricks.com/en/dev-tools/bundles", + GroupID: "development", } initVariableFlag(cmd) diff --git a/cmd/fs/fs.go b/cmd/fs/fs.go index 190220f4..01d8a745 100644 --- a/cmd/fs/fs.go +++ b/cmd/fs/fs.go @@ -6,9 +6,10 @@ import ( func New() *cobra.Command { cmd := &cobra.Command{ - Use: "fs", - Short: "Filesystem related commands", - Long: `Commands to do DBFS operations.`, + Use: "fs", + Short: "Filesystem related commands", + Long: `Commands to do DBFS operations.`, + GroupID: "workspace", } cmd.AddCommand( diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index f00c02a8..c613e8ca 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -76,9 +76,10 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn func New() *cobra.Command { cmd := &cobra.Command{ - Use: "sync [flags] SRC DST", - Short: "Synchronize a local directory to a workspace directory", - Args: cobra.MaximumNArgs(2), + Use: "sync [flags] SRC DST", + Short: "Synchronize a local directory to a workspace directory", + Args: cobra.MaximumNArgs(2), + GroupID: "development", } f := syncFlags{ diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 43159d18..b1505e77 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -18,7 +18,7 @@ func Groups() []cobra.Group { }, { ID: "jobs", - Title: "Jobs", + Title: "Workflows", }, { ID: "pipelines", @@ -52,5 +52,9 @@ func Groups() []cobra.Group { ID: "settings", Title: "Settings", }, + { + ID: "development", + Title: "Developer Tools", + }, } } From 9a1f078bd9fae6fb5a12e7fc0f26c70e32b70906 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Thu, 28 Dec 2023 14:15:21 +0100 Subject: [PATCH 310/310] Improve error when bundle root is not writable (#1093) ## Changes This improves the error when deploying to a bundle root that the current user doesn't have write access to. This can come up slightly more often since the change of https://github.com/databricks/cli/pull/1091. Before this change: ``` $ databricks bundle deploy --target prod Building my_project... Error: no such directory: /Users/lennart.kats@databricks.com/.bundle/my_project/prod/state ``` After this change: ``` $ databricks bundle deploy --target prod Building my_project... Error: cannot write to deployment root (this can indicate a previous deploy was done with a different identity): /Users/lennart.kats@databricks.com/.bundle/my_project/prod ``` Note that this change uses the "no such directory" error returned from the filer. --- bundle/deploy/lock/acquire.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/bundle/deploy/lock/acquire.go b/bundle/deploy/lock/acquire.go index 18778aa5..1335f780 100644 --- a/bundle/deploy/lock/acquire.go +++ b/bundle/deploy/lock/acquire.go @@ -2,8 +2,11 @@ package lock import ( "context" + "errors" + "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" ) @@ -47,6 +50,13 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error { err = b.Locker.Lock(ctx, force) if err != nil { log.Errorf(ctx, "Failed to acquire deployment lock: %v", err) + + notExistsError := filer.NoSuchDirectoryError{} + if errors.As(err, ¬ExistsError) { + // If we get a "doesn't exist" error from the API this indicates + // we either don't have permissions or the path is invalid. + return fmt.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath) + } return err }